1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90 /*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105 #include <sys/cdefs.h> 106 #include "opt_wlan.h" 107 #include "opt_iwm.h" 108 109 #include <sys/param.h> 110 #include <sys/bus.h> 111 #include <sys/conf.h> 112 #include <sys/endian.h> 113 #include <sys/firmware.h> 114 #include <sys/kernel.h> 115 #include <sys/malloc.h> 116 #include <sys/mbuf.h> 117 #include <sys/mutex.h> 118 #include <sys/module.h> 119 #include <sys/proc.h> 120 #include <sys/rman.h> 121 #include <sys/socket.h> 122 #include <sys/sockio.h> 123 #include <sys/sysctl.h> 124 #include <sys/linker.h> 125 126 #include <machine/bus.h> 127 #include <machine/endian.h> 128 #include <machine/resource.h> 129 130 #include <dev/pci/pcivar.h> 131 #include <dev/pci/pcireg.h> 132 133 #include <net/bpf.h> 134 135 #include <net/if.h> 136 #include <net/if_var.h> 137 #include <net/if_arp.h> 138 #include <net/if_dl.h> 139 #include <net/if_media.h> 140 #include <net/if_types.h> 141 142 #include <netinet/in.h> 143 #include <netinet/in_systm.h> 144 #include <netinet/if_ether.h> 145 #include <netinet/ip.h> 146 147 #include <net80211/ieee80211_var.h> 148 #include <net80211/ieee80211_regdomain.h> 149 #include <net80211/ieee80211_ratectl.h> 150 #include <net80211/ieee80211_radiotap.h> 151 152 #include <dev/iwm/if_iwmreg.h> 153 #include <dev/iwm/if_iwmvar.h> 154 #include <dev/iwm/if_iwm_config.h> 155 #include <dev/iwm/if_iwm_debug.h> 156 #include <dev/iwm/if_iwm_notif_wait.h> 157 #include <dev/iwm/if_iwm_util.h> 158 #include <dev/iwm/if_iwm_binding.h> 159 #include <dev/iwm/if_iwm_phy_db.h> 160 #include <dev/iwm/if_iwm_mac_ctxt.h> 161 #include <dev/iwm/if_iwm_phy_ctxt.h> 162 #include <dev/iwm/if_iwm_time_event.h> 163 #include <dev/iwm/if_iwm_power.h> 164 #include <dev/iwm/if_iwm_scan.h> 165 #include <dev/iwm/if_iwm_sf.h> 166 #include <dev/iwm/if_iwm_sta.h> 167 168 #include <dev/iwm/if_iwm_pcie_trans.h> 169 #include <dev/iwm/if_iwm_led.h> 170 #include <dev/iwm/if_iwm_fw.h> 171 172 /* From DragonflyBSD */ 173 #define mtodoff(m, t, off) ((t)((m)->m_data + (off))) 174 175 const uint8_t iwm_nvm_channels[] = { 176 /* 2.4 GHz */ 177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 178 /* 5 GHz */ 179 36, 40, 44, 48, 52, 56, 60, 64, 180 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 181 149, 153, 157, 161, 165 182 }; 183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 184 "IWM_NUM_CHANNELS is too small"); 185 186 const uint8_t iwm_nvm_channels_8000[] = { 187 /* 2.4 GHz */ 188 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 189 /* 5 GHz */ 190 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 191 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 192 149, 153, 157, 161, 165, 169, 173, 177, 181 193 }; 194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 195 "IWM_NUM_CHANNELS_8000 is too small"); 196 197 #define IWM_NUM_2GHZ_CHANNELS 14 198 #define IWM_N_HW_ADDR_MASK 0xF 199 200 /* 201 * XXX For now, there's simply a fixed set of rate table entries 202 * that are populated. 203 */ 204 const struct iwm_rate { 205 uint8_t rate; 206 uint8_t plcp; 207 } iwm_rates[] = { 208 { 2, IWM_RATE_1M_PLCP }, 209 { 4, IWM_RATE_2M_PLCP }, 210 { 11, IWM_RATE_5M_PLCP }, 211 { 22, IWM_RATE_11M_PLCP }, 212 { 12, IWM_RATE_6M_PLCP }, 213 { 18, IWM_RATE_9M_PLCP }, 214 { 24, IWM_RATE_12M_PLCP }, 215 { 36, IWM_RATE_18M_PLCP }, 216 { 48, IWM_RATE_24M_PLCP }, 217 { 72, IWM_RATE_36M_PLCP }, 218 { 96, IWM_RATE_48M_PLCP }, 219 { 108, IWM_RATE_54M_PLCP }, 220 }; 221 #define IWM_RIDX_CCK 0 222 #define IWM_RIDX_OFDM 4 223 #define IWM_RIDX_MAX (nitems(iwm_rates)-1) 224 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 225 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 226 227 struct iwm_nvm_section { 228 uint16_t length; 229 uint8_t *data; 230 }; 231 232 #define IWM_UCODE_ALIVE_TIMEOUT hz 233 #define IWM_UCODE_CALIB_TIMEOUT (2*hz) 234 235 struct iwm_alive_data { 236 int valid; 237 uint32_t scd_base_addr; 238 }; 239 240 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 241 static int iwm_firmware_store_section(struct iwm_softc *, 242 enum iwm_ucode_type, 243 const uint8_t *, size_t); 244 static int iwm_set_default_calib(struct iwm_softc *, const void *); 245 static void iwm_fw_info_free(struct iwm_fw_info *); 246 static int iwm_read_firmware(struct iwm_softc *); 247 static int iwm_alloc_fwmem(struct iwm_softc *); 248 static int iwm_alloc_sched(struct iwm_softc *); 249 static int iwm_alloc_kw(struct iwm_softc *); 250 static int iwm_alloc_ict(struct iwm_softc *); 251 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 252 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 253 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 254 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 255 int); 256 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 257 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 258 static void iwm_enable_interrupts(struct iwm_softc *); 259 static void iwm_restore_interrupts(struct iwm_softc *); 260 static void iwm_disable_interrupts(struct iwm_softc *); 261 static void iwm_ict_reset(struct iwm_softc *); 262 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 263 static void iwm_stop_device(struct iwm_softc *); 264 static void iwm_nic_config(struct iwm_softc *); 265 static int iwm_nic_rx_init(struct iwm_softc *); 266 static int iwm_nic_tx_init(struct iwm_softc *); 267 static int iwm_nic_init(struct iwm_softc *); 268 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t); 269 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 270 uint16_t, uint8_t *, uint16_t *); 271 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 272 uint16_t *, uint32_t); 273 static uint32_t iwm_eeprom_channel_flags(uint16_t); 274 static void iwm_add_channel_band(struct iwm_softc *, 275 struct ieee80211_channel[], int, int *, int, size_t, 276 const uint8_t[]); 277 static void iwm_init_channel_map(struct ieee80211com *, int, int *, 278 struct ieee80211_channel[]); 279 static struct iwm_nvm_data * 280 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 281 const uint16_t *, const uint16_t *, 282 const uint16_t *, const uint16_t *, 283 const uint16_t *); 284 static void iwm_free_nvm_data(struct iwm_nvm_data *); 285 static void iwm_set_hw_address_family_8000(struct iwm_softc *, 286 struct iwm_nvm_data *, 287 const uint16_t *, 288 const uint16_t *); 289 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 290 const uint16_t *); 291 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 292 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 293 const uint16_t *); 294 static int iwm_get_n_hw_addrs(const struct iwm_softc *, 295 const uint16_t *); 296 static void iwm_set_radio_cfg(const struct iwm_softc *, 297 struct iwm_nvm_data *, uint32_t); 298 static struct iwm_nvm_data * 299 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); 300 static int iwm_nvm_init(struct iwm_softc *); 301 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, 302 const struct iwm_fw_desc *); 303 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, 304 bus_addr_t, uint32_t); 305 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 306 const struct iwm_fw_img *, 307 int, int *); 308 static int iwm_pcie_load_cpu_sections(struct iwm_softc *, 309 const struct iwm_fw_img *, 310 int, int *); 311 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, 312 const struct iwm_fw_img *); 313 static int iwm_pcie_load_given_ucode(struct iwm_softc *, 314 const struct iwm_fw_img *); 315 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *); 316 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 317 static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 318 static int iwm_load_ucode_wait_alive(struct iwm_softc *, 319 enum iwm_ucode_type); 320 static int iwm_run_init_ucode(struct iwm_softc *, int); 321 static int iwm_config_ltr(struct iwm_softc *sc); 322 static int iwm_rx_addbuf(struct iwm_softc *, int, int); 323 static void iwm_rx_rx_phy_cmd(struct iwm_softc *, 324 struct iwm_rx_packet *); 325 static int iwm_get_noise(struct iwm_softc *, 326 const struct iwm_statistics_rx_non_phy *); 327 static void iwm_handle_rx_statistics(struct iwm_softc *, 328 struct iwm_rx_packet *); 329 static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, 330 uint32_t, bool); 331 static int iwm_rx_tx_cmd_single(struct iwm_softc *, 332 struct iwm_rx_packet *, 333 struct iwm_node *); 334 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *); 335 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 336 #if 0 337 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 338 uint16_t); 339 #endif 340 static const struct iwm_rate * 341 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 342 struct mbuf *, struct iwm_tx_cmd *); 343 static int iwm_tx(struct iwm_softc *, struct mbuf *, 344 struct ieee80211_node *, int); 345 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 346 const struct ieee80211_bpf_params *); 347 static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *); 348 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 349 static struct ieee80211_node * 350 iwm_node_alloc(struct ieee80211vap *, 351 const uint8_t[IEEE80211_ADDR_LEN]); 352 static uint8_t iwm_rate_from_ucode_rate(uint32_t); 353 static int iwm_rate2ridx(struct iwm_softc *, uint8_t); 354 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int); 355 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 356 static void iwm_endscan_cb(void *, int); 357 static int iwm_send_bt_init_conf(struct iwm_softc *); 358 static boolean_t iwm_is_lar_supported(struct iwm_softc *); 359 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *); 360 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 361 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t); 362 static int iwm_init_hw(struct iwm_softc *); 363 static void iwm_init(struct iwm_softc *); 364 static void iwm_start(struct iwm_softc *); 365 static void iwm_stop(struct iwm_softc *); 366 static void iwm_watchdog(void *); 367 static void iwm_parent(struct ieee80211com *); 368 #ifdef IWM_DEBUG 369 static const char * 370 iwm_desc_lookup(uint32_t); 371 static void iwm_nic_error(struct iwm_softc *); 372 static void iwm_nic_umac_error(struct iwm_softc *); 373 #endif 374 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *); 375 static void iwm_notif_intr(struct iwm_softc *); 376 static void iwm_intr(void *); 377 static int iwm_attach(device_t); 378 static int iwm_is_valid_ether_addr(uint8_t *); 379 static void iwm_preinit(void *); 380 static int iwm_detach_local(struct iwm_softc *sc, int); 381 static void iwm_init_task(void *); 382 static void iwm_radiotap_attach(struct iwm_softc *); 383 static struct ieee80211vap * 384 iwm_vap_create(struct ieee80211com *, 385 const char [IFNAMSIZ], int, 386 enum ieee80211_opmode, int, 387 const uint8_t [IEEE80211_ADDR_LEN], 388 const uint8_t [IEEE80211_ADDR_LEN]); 389 static void iwm_vap_delete(struct ieee80211vap *); 390 static void iwm_xmit_queue_drain(struct iwm_softc *); 391 static void iwm_scan_start(struct ieee80211com *); 392 static void iwm_scan_end(struct ieee80211com *); 393 static void iwm_update_mcast(struct ieee80211com *); 394 static void iwm_set_channel(struct ieee80211com *); 395 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 396 static void iwm_scan_mindwell(struct ieee80211_scan_state *); 397 static int iwm_detach(device_t); 398 399 static int iwm_lar_disable = 0; 400 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable); 401 402 /* 403 * Firmware parser. 404 */ 405 406 static int 407 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 408 { 409 const struct iwm_fw_cscheme_list *l = (const void *)data; 410 411 if (dlen < sizeof(*l) || 412 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 413 return EINVAL; 414 415 /* we don't actually store anything for now, always use s/w crypto */ 416 417 return 0; 418 } 419 420 static int 421 iwm_firmware_store_section(struct iwm_softc *sc, 422 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 423 { 424 struct iwm_fw_img *fws; 425 struct iwm_fw_desc *fwone; 426 427 if (type >= IWM_UCODE_TYPE_MAX) 428 return EINVAL; 429 if (dlen < sizeof(uint32_t)) 430 return EINVAL; 431 432 fws = &sc->sc_fw.img[type]; 433 if (fws->fw_count >= IWM_UCODE_SECTION_MAX) 434 return EINVAL; 435 436 fwone = &fws->sec[fws->fw_count]; 437 438 /* first 32bit are device load offset */ 439 memcpy(&fwone->offset, data, sizeof(uint32_t)); 440 441 /* rest is data */ 442 fwone->data = data + sizeof(uint32_t); 443 fwone->len = dlen - sizeof(uint32_t); 444 445 fws->fw_count++; 446 447 return 0; 448 } 449 450 #define IWM_DEFAULT_SCAN_CHANNELS 40 451 452 /* iwlwifi: iwl-drv.c */ 453 struct iwm_tlv_calib_data { 454 uint32_t ucode_type; 455 struct iwm_tlv_calib_ctrl calib; 456 } __packed; 457 458 static int 459 iwm_set_default_calib(struct iwm_softc *sc, const void *data) 460 { 461 const struct iwm_tlv_calib_data *def_calib = data; 462 uint32_t ucode_type = le32toh(def_calib->ucode_type); 463 464 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 465 device_printf(sc->sc_dev, 466 "Wrong ucode_type %u for default " 467 "calibration.\n", ucode_type); 468 return EINVAL; 469 } 470 471 sc->sc_default_calib[ucode_type].flow_trigger = 472 def_calib->calib.flow_trigger; 473 sc->sc_default_calib[ucode_type].event_trigger = 474 def_calib->calib.event_trigger; 475 476 return 0; 477 } 478 479 static int 480 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, 481 struct iwm_ucode_capabilities *capa) 482 { 483 const struct iwm_ucode_api *ucode_api = (const void *)data; 484 uint32_t api_index = le32toh(ucode_api->api_index); 485 uint32_t api_flags = le32toh(ucode_api->api_flags); 486 int i; 487 488 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) { 489 device_printf(sc->sc_dev, 490 "api flags index %d larger than supported by driver\n", 491 api_index); 492 /* don't return an error so we can load FW that has more bits */ 493 return 0; 494 } 495 496 for (i = 0; i < 32; i++) { 497 if (api_flags & (1U << i)) 498 setbit(capa->enabled_api, i + 32 * api_index); 499 } 500 501 return 0; 502 } 503 504 static int 505 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, 506 struct iwm_ucode_capabilities *capa) 507 { 508 const struct iwm_ucode_capa *ucode_capa = (const void *)data; 509 uint32_t api_index = le32toh(ucode_capa->api_index); 510 uint32_t api_flags = le32toh(ucode_capa->api_capa); 511 int i; 512 513 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 514 device_printf(sc->sc_dev, 515 "capa flags index %d larger than supported by driver\n", 516 api_index); 517 /* don't return an error so we can load FW that has more bits */ 518 return 0; 519 } 520 521 for (i = 0; i < 32; i++) { 522 if (api_flags & (1U << i)) 523 setbit(capa->enabled_capa, i + 32 * api_index); 524 } 525 526 return 0; 527 } 528 529 static void 530 iwm_fw_info_free(struct iwm_fw_info *fw) 531 { 532 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 533 fw->fw_fp = NULL; 534 memset(fw->img, 0, sizeof(fw->img)); 535 } 536 537 static int 538 iwm_read_firmware(struct iwm_softc *sc) 539 { 540 struct iwm_fw_info *fw = &sc->sc_fw; 541 const struct iwm_tlv_ucode_header *uhdr; 542 const struct iwm_ucode_tlv *tlv; 543 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa; 544 enum iwm_ucode_tlv_type tlv_type; 545 const struct firmware *fwp; 546 const uint8_t *data; 547 uint32_t tlv_len; 548 uint32_t usniffer_img; 549 const uint8_t *tlv_data; 550 uint32_t paging_mem_size; 551 int num_of_cpus; 552 int error = 0; 553 size_t len; 554 555 /* 556 * Load firmware into driver memory. 557 * fw_fp will be set. 558 */ 559 fwp = firmware_get(sc->cfg->fw_name); 560 if (fwp == NULL) { 561 device_printf(sc->sc_dev, 562 "could not read firmware %s (error %d)\n", 563 sc->cfg->fw_name, error); 564 goto out; 565 } 566 fw->fw_fp = fwp; 567 568 /* (Re-)Initialize default values. */ 569 capa->flags = 0; 570 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH; 571 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS; 572 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa)); 573 memset(capa->enabled_api, 0, sizeof(capa->enabled_api)); 574 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 575 576 /* 577 * Parse firmware contents 578 */ 579 580 uhdr = (const void *)fw->fw_fp->data; 581 if (*(const uint32_t *)fw->fw_fp->data != 0 582 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 583 device_printf(sc->sc_dev, "invalid firmware %s\n", 584 sc->cfg->fw_name); 585 error = EINVAL; 586 goto out; 587 } 588 589 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)", 590 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 591 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 592 IWM_UCODE_API(le32toh(uhdr->ver))); 593 data = uhdr->data; 594 len = fw->fw_fp->datasize - sizeof(*uhdr); 595 596 while (len >= sizeof(*tlv)) { 597 len -= sizeof(*tlv); 598 tlv = (const void *)data; 599 600 tlv_len = le32toh(tlv->length); 601 tlv_type = le32toh(tlv->type); 602 tlv_data = tlv->data; 603 604 if (len < tlv_len) { 605 device_printf(sc->sc_dev, 606 "firmware too short: %zu bytes\n", 607 len); 608 error = EINVAL; 609 goto parse_out; 610 } 611 len -= roundup2(tlv_len, 4); 612 data += sizeof(*tlv) + roundup2(tlv_len, 4); 613 614 switch ((int)tlv_type) { 615 case IWM_UCODE_TLV_PROBE_MAX_LEN: 616 if (tlv_len != sizeof(uint32_t)) { 617 device_printf(sc->sc_dev, 618 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n", 619 __func__, tlv_len); 620 error = EINVAL; 621 goto parse_out; 622 } 623 capa->max_probe_length = 624 le32_to_cpup((const uint32_t *)tlv_data); 625 /* limit it to something sensible */ 626 if (capa->max_probe_length > 627 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 628 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 629 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 630 "ridiculous\n", __func__); 631 error = EINVAL; 632 goto parse_out; 633 } 634 break; 635 case IWM_UCODE_TLV_PAN: 636 if (tlv_len) { 637 device_printf(sc->sc_dev, 638 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n", 639 __func__, tlv_len); 640 error = EINVAL; 641 goto parse_out; 642 } 643 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN; 644 break; 645 case IWM_UCODE_TLV_FLAGS: 646 if (tlv_len < sizeof(uint32_t)) { 647 device_printf(sc->sc_dev, 648 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n", 649 __func__, tlv_len); 650 error = EINVAL; 651 goto parse_out; 652 } 653 if (tlv_len % sizeof(uint32_t)) { 654 device_printf(sc->sc_dev, 655 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n", 656 __func__, tlv_len); 657 error = EINVAL; 658 goto parse_out; 659 } 660 /* 661 * Apparently there can be many flags, but Linux driver 662 * parses only the first one, and so do we. 663 * 664 * XXX: why does this override IWM_UCODE_TLV_PAN? 665 * Intentional or a bug? Observations from 666 * current firmware file: 667 * 1) TLV_PAN is parsed first 668 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 669 * ==> this resets TLV_PAN to itself... hnnnk 670 */ 671 capa->flags = le32_to_cpup((const uint32_t *)tlv_data); 672 break; 673 case IWM_UCODE_TLV_CSCHEME: 674 if ((error = iwm_store_cscheme(sc, 675 tlv_data, tlv_len)) != 0) { 676 device_printf(sc->sc_dev, 677 "%s: iwm_store_cscheme(): returned %d\n", 678 __func__, error); 679 goto parse_out; 680 } 681 break; 682 case IWM_UCODE_TLV_NUM_OF_CPU: 683 if (tlv_len != sizeof(uint32_t)) { 684 device_printf(sc->sc_dev, 685 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n", 686 __func__, tlv_len); 687 error = EINVAL; 688 goto parse_out; 689 } 690 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data); 691 if (num_of_cpus == 2) { 692 fw->img[IWM_UCODE_REGULAR].is_dual_cpus = 693 TRUE; 694 fw->img[IWM_UCODE_INIT].is_dual_cpus = 695 TRUE; 696 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus = 697 TRUE; 698 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { 699 device_printf(sc->sc_dev, 700 "%s: Driver supports only 1 or 2 CPUs\n", 701 __func__); 702 error = EINVAL; 703 goto parse_out; 704 } 705 break; 706 case IWM_UCODE_TLV_SEC_RT: 707 if ((error = iwm_firmware_store_section(sc, 708 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) { 709 device_printf(sc->sc_dev, 710 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n", 711 __func__, error); 712 goto parse_out; 713 } 714 break; 715 case IWM_UCODE_TLV_SEC_INIT: 716 if ((error = iwm_firmware_store_section(sc, 717 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) { 718 device_printf(sc->sc_dev, 719 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n", 720 __func__, error); 721 goto parse_out; 722 } 723 break; 724 case IWM_UCODE_TLV_SEC_WOWLAN: 725 if ((error = iwm_firmware_store_section(sc, 726 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) { 727 device_printf(sc->sc_dev, 728 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n", 729 __func__, error); 730 goto parse_out; 731 } 732 break; 733 case IWM_UCODE_TLV_DEF_CALIB: 734 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 735 device_printf(sc->sc_dev, 736 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n", 737 __func__, tlv_len, 738 sizeof(struct iwm_tlv_calib_data)); 739 error = EINVAL; 740 goto parse_out; 741 } 742 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 743 device_printf(sc->sc_dev, 744 "%s: iwm_set_default_calib() failed: %d\n", 745 __func__, error); 746 goto parse_out; 747 } 748 break; 749 case IWM_UCODE_TLV_PHY_SKU: 750 if (tlv_len != sizeof(uint32_t)) { 751 error = EINVAL; 752 device_printf(sc->sc_dev, 753 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n", 754 __func__, tlv_len); 755 goto parse_out; 756 } 757 sc->sc_fw.phy_config = 758 le32_to_cpup((const uint32_t *)tlv_data); 759 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config & 760 IWM_FW_PHY_CFG_TX_CHAIN) >> 761 IWM_FW_PHY_CFG_TX_CHAIN_POS; 762 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config & 763 IWM_FW_PHY_CFG_RX_CHAIN) >> 764 IWM_FW_PHY_CFG_RX_CHAIN_POS; 765 break; 766 767 case IWM_UCODE_TLV_API_CHANGES_SET: { 768 if (tlv_len != sizeof(struct iwm_ucode_api)) { 769 error = EINVAL; 770 goto parse_out; 771 } 772 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) { 773 error = EINVAL; 774 goto parse_out; 775 } 776 break; 777 } 778 779 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 780 if (tlv_len != sizeof(struct iwm_ucode_capa)) { 781 error = EINVAL; 782 goto parse_out; 783 } 784 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) { 785 error = EINVAL; 786 goto parse_out; 787 } 788 break; 789 } 790 791 case IWM_UCODE_TLV_CMD_VERSIONS: 792 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 793 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 794 /* ignore, not used by current driver */ 795 break; 796 797 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 798 if ((error = iwm_firmware_store_section(sc, 799 IWM_UCODE_REGULAR_USNIFFER, tlv_data, 800 tlv_len)) != 0) 801 goto parse_out; 802 break; 803 804 case IWM_UCODE_TLV_PAGING: 805 if (tlv_len != sizeof(uint32_t)) { 806 error = EINVAL; 807 goto parse_out; 808 } 809 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data); 810 811 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 812 "%s: Paging: paging enabled (size = %u bytes)\n", 813 __func__, paging_mem_size); 814 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) { 815 device_printf(sc->sc_dev, 816 "%s: Paging: driver supports up to %u bytes for paging image\n", 817 __func__, IWM_MAX_PAGING_IMAGE_SIZE); 818 error = EINVAL; 819 goto out; 820 } 821 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) { 822 device_printf(sc->sc_dev, 823 "%s: Paging: image isn't multiple %u\n", 824 __func__, IWM_FW_PAGING_SIZE); 825 error = EINVAL; 826 goto out; 827 } 828 829 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size = 830 paging_mem_size; 831 usniffer_img = IWM_UCODE_REGULAR_USNIFFER; 832 sc->sc_fw.img[usniffer_img].paging_mem_size = 833 paging_mem_size; 834 break; 835 836 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 837 if (tlv_len != sizeof(uint32_t)) { 838 error = EINVAL; 839 goto parse_out; 840 } 841 capa->n_scan_channels = 842 le32_to_cpup((const uint32_t *)tlv_data); 843 break; 844 845 case IWM_UCODE_TLV_FW_VERSION: 846 if (tlv_len != sizeof(uint32_t) * 3) { 847 error = EINVAL; 848 goto parse_out; 849 } 850 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 851 "%u.%u.%u", 852 le32toh(((const uint32_t *)tlv_data)[0]), 853 le32toh(((const uint32_t *)tlv_data)[1]), 854 le32toh(((const uint32_t *)tlv_data)[2])); 855 break; 856 857 case IWM_UCODE_TLV_FW_MEM_SEG: 858 break; 859 860 default: 861 device_printf(sc->sc_dev, 862 "%s: unknown firmware section %d, abort\n", 863 __func__, tlv_type); 864 error = EINVAL; 865 goto parse_out; 866 } 867 } 868 869 KASSERT(error == 0, ("unhandled error")); 870 871 parse_out: 872 if (error) { 873 device_printf(sc->sc_dev, "firmware parse error %d, " 874 "section type %d\n", error, tlv_type); 875 } 876 877 out: 878 if (error) { 879 if (fw->fw_fp != NULL) 880 iwm_fw_info_free(fw); 881 } 882 883 return error; 884 } 885 886 /* 887 * DMA resource routines 888 */ 889 890 /* fwmem is used to load firmware onto the card */ 891 static int 892 iwm_alloc_fwmem(struct iwm_softc *sc) 893 { 894 /* Must be aligned on a 16-byte boundary. */ 895 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 896 IWM_FH_MEM_TB_MAX_LENGTH, 16); 897 } 898 899 /* tx scheduler rings. not used? */ 900 static int 901 iwm_alloc_sched(struct iwm_softc *sc) 902 { 903 /* TX scheduler rings must be aligned on a 1KB boundary. */ 904 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 905 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 906 } 907 908 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 909 static int 910 iwm_alloc_kw(struct iwm_softc *sc) 911 { 912 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 913 } 914 915 /* interrupt cause table */ 916 static int 917 iwm_alloc_ict(struct iwm_softc *sc) 918 { 919 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 920 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 921 } 922 923 static int 924 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 925 { 926 bus_size_t size; 927 size_t descsz; 928 int count, i, error; 929 930 ring->cur = 0; 931 if (sc->cfg->mqrx_supported) { 932 count = IWM_RX_MQ_RING_COUNT; 933 descsz = sizeof(uint64_t); 934 } else { 935 count = IWM_RX_LEGACY_RING_COUNT; 936 descsz = sizeof(uint32_t); 937 } 938 939 /* Allocate RX descriptors (256-byte aligned). */ 940 size = count * descsz; 941 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 942 256); 943 if (error != 0) { 944 device_printf(sc->sc_dev, 945 "could not allocate RX ring DMA memory\n"); 946 goto fail; 947 } 948 ring->desc = ring->free_desc_dma.vaddr; 949 950 /* Allocate RX status area (16-byte aligned). */ 951 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 952 sizeof(*ring->stat), 16); 953 if (error != 0) { 954 device_printf(sc->sc_dev, 955 "could not allocate RX status DMA memory\n"); 956 goto fail; 957 } 958 ring->stat = ring->stat_dma.vaddr; 959 960 if (sc->cfg->mqrx_supported) { 961 size = count * sizeof(uint32_t); 962 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, 963 size, 256); 964 if (error != 0) { 965 device_printf(sc->sc_dev, 966 "could not allocate RX ring DMA memory\n"); 967 goto fail; 968 } 969 } 970 971 /* Create RX buffer DMA tag. */ 972 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 973 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 974 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 975 if (error != 0) { 976 device_printf(sc->sc_dev, 977 "%s: could not create RX buf DMA tag, error %d\n", 978 __func__, error); 979 goto fail; 980 } 981 982 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 983 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 984 if (error != 0) { 985 device_printf(sc->sc_dev, 986 "%s: could not create RX buf DMA map, error %d\n", 987 __func__, error); 988 goto fail; 989 } 990 991 /* 992 * Allocate and map RX buffers. 993 */ 994 for (i = 0; i < count; i++) { 995 struct iwm_rx_data *data = &ring->data[i]; 996 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 997 if (error != 0) { 998 device_printf(sc->sc_dev, 999 "%s: could not create RX buf DMA map, error %d\n", 1000 __func__, error); 1001 goto fail; 1002 } 1003 data->m = NULL; 1004 1005 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 1006 goto fail; 1007 } 1008 } 1009 return 0; 1010 1011 fail: iwm_free_rx_ring(sc, ring); 1012 return error; 1013 } 1014 1015 static void 1016 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1017 { 1018 /* Reset the ring state */ 1019 ring->cur = 0; 1020 1021 /* 1022 * The hw rx ring index in shared memory must also be cleared, 1023 * otherwise the discrepancy can cause reprocessing chaos. 1024 */ 1025 if (sc->rxq.stat) 1026 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1027 } 1028 1029 static void 1030 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1031 { 1032 int count, i; 1033 1034 iwm_dma_contig_free(&ring->free_desc_dma); 1035 iwm_dma_contig_free(&ring->stat_dma); 1036 iwm_dma_contig_free(&ring->used_desc_dma); 1037 1038 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT : 1039 IWM_RX_LEGACY_RING_COUNT; 1040 1041 for (i = 0; i < count; i++) { 1042 struct iwm_rx_data *data = &ring->data[i]; 1043 1044 if (data->m != NULL) { 1045 bus_dmamap_sync(ring->data_dmat, data->map, 1046 BUS_DMASYNC_POSTREAD); 1047 bus_dmamap_unload(ring->data_dmat, data->map); 1048 m_freem(data->m); 1049 data->m = NULL; 1050 } 1051 if (data->map != NULL) { 1052 bus_dmamap_destroy(ring->data_dmat, data->map); 1053 data->map = NULL; 1054 } 1055 } 1056 if (ring->spare_map != NULL) { 1057 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1058 ring->spare_map = NULL; 1059 } 1060 if (ring->data_dmat != NULL) { 1061 bus_dma_tag_destroy(ring->data_dmat); 1062 ring->data_dmat = NULL; 1063 } 1064 } 1065 1066 static int 1067 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1068 { 1069 bus_addr_t paddr; 1070 bus_size_t size; 1071 size_t maxsize; 1072 int nsegments; 1073 int i, error; 1074 1075 ring->qid = qid; 1076 ring->queued = 0; 1077 ring->cur = 0; 1078 1079 /* Allocate TX descriptors (256-byte aligned). */ 1080 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1081 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1082 if (error != 0) { 1083 device_printf(sc->sc_dev, 1084 "could not allocate TX ring DMA memory\n"); 1085 goto fail; 1086 } 1087 ring->desc = ring->desc_dma.vaddr; 1088 1089 /* 1090 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1091 * to allocate commands space for other rings. 1092 */ 1093 if (qid > IWM_CMD_QUEUE) 1094 return 0; 1095 1096 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1097 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1098 if (error != 0) { 1099 device_printf(sc->sc_dev, 1100 "could not allocate TX cmd DMA memory\n"); 1101 goto fail; 1102 } 1103 ring->cmd = ring->cmd_dma.vaddr; 1104 1105 /* FW commands may require more mapped space than packets. */ 1106 if (qid == IWM_CMD_QUEUE) { 1107 maxsize = IWM_RBUF_SIZE; 1108 nsegments = 1; 1109 } else { 1110 maxsize = MCLBYTES; 1111 nsegments = IWM_MAX_SCATTER - 2; 1112 } 1113 1114 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1115 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1116 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat); 1117 if (error != 0) { 1118 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1119 goto fail; 1120 } 1121 1122 paddr = ring->cmd_dma.paddr; 1123 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1124 struct iwm_tx_data *data = &ring->data[i]; 1125 1126 data->cmd_paddr = paddr; 1127 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1128 + offsetof(struct iwm_tx_cmd, scratch); 1129 paddr += sizeof(struct iwm_device_cmd); 1130 1131 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1132 if (error != 0) { 1133 device_printf(sc->sc_dev, 1134 "could not create TX buf DMA map\n"); 1135 goto fail; 1136 } 1137 } 1138 KASSERT(paddr == ring->cmd_dma.paddr + size, 1139 ("invalid physical address")); 1140 return 0; 1141 1142 fail: iwm_free_tx_ring(sc, ring); 1143 return error; 1144 } 1145 1146 static void 1147 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1148 { 1149 int i; 1150 1151 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1152 struct iwm_tx_data *data = &ring->data[i]; 1153 1154 if (data->m != NULL) { 1155 bus_dmamap_sync(ring->data_dmat, data->map, 1156 BUS_DMASYNC_POSTWRITE); 1157 bus_dmamap_unload(ring->data_dmat, data->map); 1158 m_freem(data->m); 1159 data->m = NULL; 1160 } 1161 } 1162 /* Clear TX descriptors. */ 1163 memset(ring->desc, 0, ring->desc_dma.size); 1164 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1165 BUS_DMASYNC_PREWRITE); 1166 sc->qfullmsk &= ~(1 << ring->qid); 1167 ring->queued = 0; 1168 ring->cur = 0; 1169 1170 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake) 1171 iwm_pcie_clear_cmd_in_flight(sc); 1172 } 1173 1174 static void 1175 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1176 { 1177 int i; 1178 1179 iwm_dma_contig_free(&ring->desc_dma); 1180 iwm_dma_contig_free(&ring->cmd_dma); 1181 1182 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1183 struct iwm_tx_data *data = &ring->data[i]; 1184 1185 if (data->m != NULL) { 1186 bus_dmamap_sync(ring->data_dmat, data->map, 1187 BUS_DMASYNC_POSTWRITE); 1188 bus_dmamap_unload(ring->data_dmat, data->map); 1189 m_freem(data->m); 1190 data->m = NULL; 1191 } 1192 if (data->map != NULL) { 1193 bus_dmamap_destroy(ring->data_dmat, data->map); 1194 data->map = NULL; 1195 } 1196 } 1197 if (ring->data_dmat != NULL) { 1198 bus_dma_tag_destroy(ring->data_dmat); 1199 ring->data_dmat = NULL; 1200 } 1201 } 1202 1203 /* 1204 * High-level hardware frobbing routines 1205 */ 1206 1207 static void 1208 iwm_enable_interrupts(struct iwm_softc *sc) 1209 { 1210 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1211 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1212 } 1213 1214 static void 1215 iwm_restore_interrupts(struct iwm_softc *sc) 1216 { 1217 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1218 } 1219 1220 static void 1221 iwm_disable_interrupts(struct iwm_softc *sc) 1222 { 1223 /* disable interrupts */ 1224 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1225 1226 /* acknowledge all interrupts */ 1227 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1228 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1229 } 1230 1231 static void 1232 iwm_ict_reset(struct iwm_softc *sc) 1233 { 1234 iwm_disable_interrupts(sc); 1235 1236 /* Reset ICT table. */ 1237 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1238 sc->ict_cur = 0; 1239 1240 /* Set physical address of ICT table (4KB aligned). */ 1241 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1242 IWM_CSR_DRAM_INT_TBL_ENABLE 1243 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1244 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1245 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1246 1247 /* Switch to ICT interrupt mode in driver. */ 1248 sc->sc_flags |= IWM_FLAG_USE_ICT; 1249 1250 /* Re-enable interrupts. */ 1251 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1252 iwm_enable_interrupts(sc); 1253 } 1254 1255 /* iwlwifi pcie/trans.c */ 1256 1257 /* 1258 * Since this .. hard-resets things, it's time to actually 1259 * mark the first vap (if any) as having no mac context. 1260 * It's annoying, but since the driver is potentially being 1261 * stop/start'ed whilst active (thanks openbsd port!) we 1262 * have to correctly track this. 1263 */ 1264 static void 1265 iwm_stop_device(struct iwm_softc *sc) 1266 { 1267 struct ieee80211com *ic = &sc->sc_ic; 1268 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1269 int chnl, qid; 1270 uint32_t mask = 0; 1271 1272 /* tell the device to stop sending interrupts */ 1273 iwm_disable_interrupts(sc); 1274 1275 /* 1276 * FreeBSD-local: mark the first vap as not-uploaded, 1277 * so the next transition through auth/assoc 1278 * will correctly populate the MAC context. 1279 */ 1280 if (vap) { 1281 struct iwm_vap *iv = IWM_VAP(vap); 1282 iv->phy_ctxt = NULL; 1283 iv->is_uploaded = 0; 1284 } 1285 sc->sc_firmware_state = 0; 1286 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE; 1287 1288 /* device going down, Stop using ICT table */ 1289 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1290 1291 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1292 1293 if (iwm_nic_lock(sc)) { 1294 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1295 1296 /* Stop each Tx DMA channel */ 1297 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1298 IWM_WRITE(sc, 1299 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1300 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl); 1301 } 1302 1303 /* Wait for DMA channels to be idle */ 1304 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask, 1305 5000)) { 1306 device_printf(sc->sc_dev, 1307 "Failing on timeout while stopping DMA channel: [0x%08x]\n", 1308 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)); 1309 } 1310 iwm_nic_unlock(sc); 1311 } 1312 iwm_pcie_rx_stop(sc); 1313 1314 /* Stop RX ring. */ 1315 iwm_reset_rx_ring(sc, &sc->rxq); 1316 1317 /* Reset all TX rings. */ 1318 for (qid = 0; qid < nitems(sc->txq); qid++) 1319 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1320 1321 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1322 /* Power-down device's busmaster DMA clocks */ 1323 if (iwm_nic_lock(sc)) { 1324 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, 1325 IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1326 iwm_nic_unlock(sc); 1327 } 1328 DELAY(5); 1329 } 1330 1331 /* Make sure (redundant) we've released our request to stay awake */ 1332 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1333 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1334 1335 /* Stop the device, and put it in low power state */ 1336 iwm_apm_stop(sc); 1337 1338 /* stop and reset the on-board processor */ 1339 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1340 DELAY(5000); 1341 1342 /* 1343 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1344 */ 1345 iwm_disable_interrupts(sc); 1346 1347 /* 1348 * Even if we stop the HW, we still want the RF kill 1349 * interrupt 1350 */ 1351 iwm_enable_rfkill_int(sc); 1352 iwm_check_rfkill(sc); 1353 1354 iwm_prepare_card_hw(sc); 1355 } 1356 1357 /* iwlwifi: mvm/ops.c */ 1358 static void 1359 iwm_nic_config(struct iwm_softc *sc) 1360 { 1361 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1362 uint32_t reg_val = 0; 1363 uint32_t phy_config = iwm_get_phy_config(sc); 1364 1365 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1366 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1367 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1368 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1369 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1370 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1371 1372 /* SKU control */ 1373 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1374 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1375 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1376 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1377 1378 /* radio configuration */ 1379 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1380 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1381 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1382 1383 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, 1384 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 1385 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | 1386 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | 1387 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | 1388 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | 1389 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 1390 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 1391 reg_val); 1392 1393 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1394 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1395 radio_cfg_step, radio_cfg_dash); 1396 1397 /* 1398 * W/A : NIC is stuck in a reset state after Early PCIe power off 1399 * (PCIe power is lost before PERST# is asserted), causing ME FW 1400 * to lose ownership and not being able to obtain it back. 1401 */ 1402 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1403 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1404 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1405 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1406 } 1407 } 1408 1409 static int 1410 iwm_nic_rx_mq_init(struct iwm_softc *sc) 1411 { 1412 int enabled; 1413 1414 if (!iwm_nic_lock(sc)) 1415 return EBUSY; 1416 1417 /* Stop RX DMA. */ 1418 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0); 1419 /* Disable RX used and free queue operation. */ 1420 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0); 1421 1422 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB, 1423 sc->rxq.free_desc_dma.paddr); 1424 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB, 1425 sc->rxq.used_desc_dma.paddr); 1426 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB, 1427 sc->rxq.stat_dma.paddr); 1428 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0); 1429 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0); 1430 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0); 1431 1432 /* We configure only queue 0 for now. */ 1433 enabled = ((1 << 0) << 16) | (1 << 0); 1434 1435 /* Enable RX DMA, 4KB buffer size. */ 1436 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 1437 IWM_RFH_DMA_EN_ENABLE_VAL | 1438 IWM_RFH_RXF_DMA_RB_SIZE_4K | 1439 IWM_RFH_RXF_DMA_MIN_RB_4_8 | 1440 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 1441 IWM_RFH_RXF_DMA_RBDCB_SIZE_512); 1442 1443 /* Enable RX DMA snooping. */ 1444 iwm_write_prph(sc, IWM_RFH_GEN_CFG, 1445 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP | 1446 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP | 1447 (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 1448 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 1449 1450 /* Enable the configured queue(s). */ 1451 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled); 1452 1453 iwm_nic_unlock(sc); 1454 1455 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1456 1457 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8); 1458 1459 return (0); 1460 } 1461 1462 static int 1463 iwm_nic_rx_legacy_init(struct iwm_softc *sc) 1464 { 1465 1466 /* Stop Rx DMA */ 1467 iwm_pcie_rx_stop(sc); 1468 1469 if (!iwm_nic_lock(sc)) 1470 return EBUSY; 1471 1472 /* reset and flush pointers */ 1473 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1474 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1475 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1476 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1477 1478 /* Set physical address of RX ring (256-byte aligned). */ 1479 IWM_WRITE(sc, 1480 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, 1481 sc->rxq.free_desc_dma.paddr >> 8); 1482 1483 /* Set physical address of RX status (16-byte aligned). */ 1484 IWM_WRITE(sc, 1485 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1486 1487 /* Enable Rx DMA 1488 * XXX 5000 HW isn't supported by the iwm(4) driver. 1489 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 1490 * the credit mechanism in 5000 HW RX FIFO 1491 * Direct rx interrupts to hosts 1492 * Rx buffer size 4 or 8k or 12k 1493 * RB timeout 0x10 1494 * 256 RBDs 1495 */ 1496 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1497 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1498 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1499 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1500 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1501 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1502 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1503 1504 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1505 1506 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1507 if (sc->cfg->host_interrupt_operation_mode) 1508 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1509 1510 iwm_nic_unlock(sc); 1511 1512 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1513 1514 return 0; 1515 } 1516 1517 static int 1518 iwm_nic_rx_init(struct iwm_softc *sc) 1519 { 1520 if (sc->cfg->mqrx_supported) 1521 return iwm_nic_rx_mq_init(sc); 1522 else 1523 return iwm_nic_rx_legacy_init(sc); 1524 } 1525 1526 static int 1527 iwm_nic_tx_init(struct iwm_softc *sc) 1528 { 1529 int qid; 1530 1531 if (!iwm_nic_lock(sc)) 1532 return EBUSY; 1533 1534 /* Deactivate TX scheduler. */ 1535 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1536 1537 /* Set physical address of "keep warm" page (16-byte aligned). */ 1538 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1539 1540 /* Initialize TX rings. */ 1541 for (qid = 0; qid < nitems(sc->txq); qid++) { 1542 struct iwm_tx_ring *txq = &sc->txq[qid]; 1543 1544 /* Set physical address of TX ring (256-byte aligned). */ 1545 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1546 txq->desc_dma.paddr >> 8); 1547 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1548 "%s: loading ring %d descriptors (%p) at %lx\n", 1549 __func__, 1550 qid, txq->desc, 1551 (unsigned long) (txq->desc_dma.paddr >> 8)); 1552 } 1553 1554 iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL, 1555 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE | 1556 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES); 1557 1558 iwm_nic_unlock(sc); 1559 1560 return 0; 1561 } 1562 1563 static int 1564 iwm_nic_init(struct iwm_softc *sc) 1565 { 1566 int error; 1567 1568 iwm_apm_init(sc); 1569 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1570 iwm_set_pwr(sc); 1571 1572 iwm_nic_config(sc); 1573 1574 if ((error = iwm_nic_rx_init(sc)) != 0) 1575 return error; 1576 1577 /* 1578 * Ditto for TX, from iwn 1579 */ 1580 if ((error = iwm_nic_tx_init(sc)) != 0) 1581 return error; 1582 1583 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1584 "%s: shadow registers enabled\n", __func__); 1585 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1586 1587 return 0; 1588 } 1589 1590 int 1591 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1592 { 1593 int qmsk; 1594 1595 qmsk = 1 << qid; 1596 1597 if (!iwm_nic_lock(sc)) { 1598 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n", 1599 __func__, qid); 1600 return EBUSY; 1601 } 1602 1603 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1604 1605 if (qid == IWM_CMD_QUEUE) { 1606 /* Disable the scheduler. */ 1607 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0); 1608 1609 /* Stop the TX queue prior to configuration. */ 1610 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1611 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1612 (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1613 1614 iwm_nic_unlock(sc); 1615 1616 /* Disable aggregations for this queue. */ 1617 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk); 1618 1619 if (!iwm_nic_lock(sc)) { 1620 device_printf(sc->sc_dev, 1621 "%s: cannot enable txq %d\n", __func__, qid); 1622 return EBUSY; 1623 } 1624 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1625 iwm_nic_unlock(sc); 1626 1627 iwm_write_mem32(sc, 1628 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1629 /* Set scheduler window size and frame limit. */ 1630 iwm_write_mem32(sc, 1631 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1632 sizeof(uint32_t), 1633 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1634 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1635 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1636 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1637 1638 if (!iwm_nic_lock(sc)) { 1639 device_printf(sc->sc_dev, 1640 "%s: cannot enable txq %d\n", __func__, qid); 1641 return EBUSY; 1642 } 1643 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1644 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1645 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1646 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1647 IWM_SCD_QUEUE_STTS_REG_MSK); 1648 1649 /* Enable the scheduler for this queue. */ 1650 iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk); 1651 } else { 1652 struct iwm_scd_txq_cfg_cmd cmd; 1653 int error; 1654 1655 iwm_nic_unlock(sc); 1656 1657 memset(&cmd, 0, sizeof(cmd)); 1658 cmd.scd_queue = qid; 1659 cmd.enable = 1; 1660 cmd.sta_id = sta_id; 1661 cmd.tx_fifo = fifo; 1662 cmd.aggregate = 0; 1663 cmd.window = IWM_FRAME_LIMIT; 1664 1665 error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1666 sizeof(cmd), &cmd); 1667 if (error) { 1668 device_printf(sc->sc_dev, 1669 "cannot enable txq %d\n", qid); 1670 return error; 1671 } 1672 1673 if (!iwm_nic_lock(sc)) 1674 return EBUSY; 1675 } 1676 1677 iwm_nic_unlock(sc); 1678 1679 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1680 __func__, qid, fifo); 1681 1682 return 0; 1683 } 1684 1685 static int 1686 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr) 1687 { 1688 int error, chnl; 1689 1690 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1691 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t); 1692 1693 if (!iwm_nic_lock(sc)) 1694 return EBUSY; 1695 1696 iwm_ict_reset(sc); 1697 1698 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1699 if (scd_base_addr != 0 && 1700 scd_base_addr != sc->scd_base_addr) { 1701 device_printf(sc->sc_dev, 1702 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1703 __func__, sc->scd_base_addr, scd_base_addr); 1704 } 1705 1706 iwm_nic_unlock(sc); 1707 1708 /* reset context data, TX status and translation data */ 1709 error = iwm_write_mem(sc, 1710 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1711 NULL, clear_dwords); 1712 if (error) 1713 return EBUSY; 1714 1715 if (!iwm_nic_lock(sc)) 1716 return EBUSY; 1717 1718 /* Set physical address of TX scheduler rings (1KB aligned). */ 1719 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1720 1721 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1722 1723 iwm_nic_unlock(sc); 1724 1725 /* enable command channel */ 1726 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7); 1727 if (error) 1728 return error; 1729 1730 if (!iwm_nic_lock(sc)) 1731 return EBUSY; 1732 1733 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1734 1735 /* Enable DMA channels. */ 1736 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1737 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1738 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1739 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1740 } 1741 1742 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1743 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1744 1745 iwm_nic_unlock(sc); 1746 1747 /* Enable L1-Active */ 1748 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 1749 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1750 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1751 } 1752 1753 return error; 1754 } 1755 1756 /* 1757 * NVM read access and content parsing. We do not support 1758 * external NVM or writing NVM. 1759 * iwlwifi/mvm/nvm.c 1760 */ 1761 1762 /* Default NVM size to read */ 1763 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1764 1765 #define IWM_NVM_WRITE_OPCODE 1 1766 #define IWM_NVM_READ_OPCODE 0 1767 1768 /* load nvm chunk response */ 1769 enum { 1770 IWM_READ_NVM_CHUNK_SUCCEED = 0, 1771 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 1772 }; 1773 1774 static int 1775 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1776 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1777 { 1778 struct iwm_nvm_access_cmd nvm_access_cmd = { 1779 .offset = htole16(offset), 1780 .length = htole16(length), 1781 .type = htole16(section), 1782 .op_code = IWM_NVM_READ_OPCODE, 1783 }; 1784 struct iwm_nvm_access_resp *nvm_resp; 1785 struct iwm_rx_packet *pkt; 1786 struct iwm_host_cmd cmd = { 1787 .id = IWM_NVM_ACCESS_CMD, 1788 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL, 1789 .data = { &nvm_access_cmd, }, 1790 }; 1791 int ret, bytes_read, offset_read; 1792 uint8_t *resp_data; 1793 1794 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1795 1796 ret = iwm_send_cmd(sc, &cmd); 1797 if (ret) { 1798 device_printf(sc->sc_dev, 1799 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1800 return ret; 1801 } 1802 1803 pkt = cmd.resp_pkt; 1804 1805 /* Extract NVM response */ 1806 nvm_resp = (void *)pkt->data; 1807 ret = le16toh(nvm_resp->status); 1808 bytes_read = le16toh(nvm_resp->length); 1809 offset_read = le16toh(nvm_resp->offset); 1810 resp_data = nvm_resp->data; 1811 if (ret) { 1812 if ((offset != 0) && 1813 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 1814 /* 1815 * meaning of NOT_VALID_ADDRESS: 1816 * driver try to read chunk from address that is 1817 * multiple of 2K and got an error since addr is empty. 1818 * meaning of (offset != 0): driver already 1819 * read valid data from another chunk so this case 1820 * is not an error. 1821 */ 1822 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1823 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 1824 offset); 1825 *len = 0; 1826 ret = 0; 1827 } else { 1828 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1829 "NVM access command failed with status %d\n", ret); 1830 ret = EIO; 1831 } 1832 goto exit; 1833 } 1834 1835 if (offset_read != offset) { 1836 device_printf(sc->sc_dev, 1837 "NVM ACCESS response with invalid offset %d\n", 1838 offset_read); 1839 ret = EINVAL; 1840 goto exit; 1841 } 1842 1843 if (bytes_read > length) { 1844 device_printf(sc->sc_dev, 1845 "NVM ACCESS response with too much data " 1846 "(%d bytes requested, %d bytes received)\n", 1847 length, bytes_read); 1848 ret = EINVAL; 1849 goto exit; 1850 } 1851 1852 /* Write data to NVM */ 1853 memcpy(data + offset, resp_data, bytes_read); 1854 *len = bytes_read; 1855 1856 exit: 1857 iwm_free_resp(sc, &cmd); 1858 return ret; 1859 } 1860 1861 /* 1862 * Reads an NVM section completely. 1863 * NICs prior to 7000 family don't have a real NVM, but just read 1864 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1865 * by uCode, we need to manually check in this case that we don't 1866 * overflow and try to read more than the EEPROM size. 1867 * For 7000 family NICs, we supply the maximal size we can read, and 1868 * the uCode fills the response with as much data as we can, 1869 * without overflowing, so no check is needed. 1870 */ 1871 static int 1872 iwm_nvm_read_section(struct iwm_softc *sc, 1873 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read) 1874 { 1875 uint16_t seglen, length, offset = 0; 1876 int ret; 1877 1878 /* Set nvm section read length */ 1879 length = IWM_NVM_DEFAULT_CHUNK_SIZE; 1880 1881 seglen = length; 1882 1883 /* Read the NVM until exhausted (reading less than requested) */ 1884 while (seglen == length) { 1885 /* Check no memory assumptions fail and cause an overflow */ 1886 if ((size_read + offset + length) > 1887 sc->cfg->eeprom_size) { 1888 device_printf(sc->sc_dev, 1889 "EEPROM size is too small for NVM\n"); 1890 return ENOBUFS; 1891 } 1892 1893 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen); 1894 if (ret) { 1895 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1896 "Cannot read NVM from section %d offset %d, length %d\n", 1897 section, offset, length); 1898 return ret; 1899 } 1900 offset += seglen; 1901 } 1902 1903 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1904 "NVM section %d read completed\n", section); 1905 *len = offset; 1906 return 0; 1907 } 1908 1909 /* 1910 * BEGIN IWM_NVM_PARSE 1911 */ 1912 1913 /* iwlwifi/iwl-nvm-parse.c */ 1914 1915 /* 1916 * Translate EEPROM flags to net80211. 1917 */ 1918 static uint32_t 1919 iwm_eeprom_channel_flags(uint16_t ch_flags) 1920 { 1921 uint32_t nflags; 1922 1923 nflags = 0; 1924 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 1925 nflags |= IEEE80211_CHAN_PASSIVE; 1926 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 1927 nflags |= IEEE80211_CHAN_NOADHOC; 1928 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 1929 nflags |= IEEE80211_CHAN_DFS; 1930 /* Just in case. */ 1931 nflags |= IEEE80211_CHAN_NOADHOC; 1932 } 1933 1934 return (nflags); 1935 } 1936 1937 static void 1938 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 1939 int maxchans, int *nchans, int ch_idx, size_t ch_num, 1940 const uint8_t bands[]) 1941 { 1942 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags; 1943 uint32_t nflags; 1944 uint16_t ch_flags; 1945 uint8_t ieee; 1946 int error; 1947 1948 for (; ch_idx < ch_num; ch_idx++) { 1949 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 1950 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1951 ieee = iwm_nvm_channels[ch_idx]; 1952 else 1953 ieee = iwm_nvm_channels_8000[ch_idx]; 1954 1955 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 1956 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1957 "Ch. %d Flags %x [%sGHz] - No traffic\n", 1958 ieee, ch_flags, 1959 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1960 "5.2" : "2.4"); 1961 continue; 1962 } 1963 1964 nflags = iwm_eeprom_channel_flags(ch_flags); 1965 error = ieee80211_add_channel(chans, maxchans, nchans, 1966 ieee, 0, 0, nflags, bands); 1967 if (error != 0) 1968 break; 1969 1970 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1971 "Ch. %d Flags %x [%sGHz] - Added\n", 1972 ieee, ch_flags, 1973 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1974 "5.2" : "2.4"); 1975 } 1976 } 1977 1978 static void 1979 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 1980 struct ieee80211_channel chans[]) 1981 { 1982 struct iwm_softc *sc = ic->ic_softc; 1983 struct iwm_nvm_data *data = sc->nvm_data; 1984 uint8_t bands[IEEE80211_MODE_BYTES]; 1985 size_t ch_num; 1986 1987 memset(bands, 0, sizeof(bands)); 1988 /* 1-13: 11b/g channels. */ 1989 setbit(bands, IEEE80211_MODE_11B); 1990 setbit(bands, IEEE80211_MODE_11G); 1991 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 1992 IWM_NUM_2GHZ_CHANNELS - 1, bands); 1993 1994 /* 14: 11b channel only. */ 1995 clrbit(bands, IEEE80211_MODE_11G); 1996 iwm_add_channel_band(sc, chans, maxchans, nchans, 1997 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 1998 1999 if (data->sku_cap_band_52GHz_enable) { 2000 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 2001 ch_num = nitems(iwm_nvm_channels); 2002 else 2003 ch_num = nitems(iwm_nvm_channels_8000); 2004 memset(bands, 0, sizeof(bands)); 2005 setbit(bands, IEEE80211_MODE_11A); 2006 iwm_add_channel_band(sc, chans, maxchans, nchans, 2007 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2008 } 2009 } 2010 2011 static void 2012 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2013 const uint16_t *mac_override, const uint16_t *nvm_hw) 2014 { 2015 const uint8_t *hw_addr; 2016 2017 if (mac_override) { 2018 static const uint8_t reserved_mac[] = { 2019 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2020 }; 2021 2022 hw_addr = (const uint8_t *)(mac_override + 2023 IWM_MAC_ADDRESS_OVERRIDE_8000); 2024 2025 /* 2026 * Store the MAC address from MAO section. 2027 * No byte swapping is required in MAO section 2028 */ 2029 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2030 2031 /* 2032 * Force the use of the OTP MAC address in case of reserved MAC 2033 * address in the NVM, or if address is given but invalid. 2034 */ 2035 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2036 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2037 iwm_is_valid_ether_addr(data->hw_addr) && 2038 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2039 return; 2040 2041 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2042 "%s: mac address from nvm override section invalid\n", 2043 __func__); 2044 } 2045 2046 if (nvm_hw) { 2047 /* read the mac address from WFMP registers */ 2048 uint32_t mac_addr0 = 2049 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2050 uint32_t mac_addr1 = 2051 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2052 2053 hw_addr = (const uint8_t *)&mac_addr0; 2054 data->hw_addr[0] = hw_addr[3]; 2055 data->hw_addr[1] = hw_addr[2]; 2056 data->hw_addr[2] = hw_addr[1]; 2057 data->hw_addr[3] = hw_addr[0]; 2058 2059 hw_addr = (const uint8_t *)&mac_addr1; 2060 data->hw_addr[4] = hw_addr[1]; 2061 data->hw_addr[5] = hw_addr[0]; 2062 2063 return; 2064 } 2065 2066 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2067 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2068 } 2069 2070 static int 2071 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2072 const uint16_t *phy_sku) 2073 { 2074 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2075 return le16_to_cpup(nvm_sw + IWM_SKU); 2076 2077 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2078 } 2079 2080 static int 2081 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2082 { 2083 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2084 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2085 else 2086 return le32_to_cpup((const uint32_t *)(nvm_sw + 2087 IWM_NVM_VERSION_8000)); 2088 } 2089 2090 static int 2091 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2092 const uint16_t *phy_sku) 2093 { 2094 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2095 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2096 2097 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2098 } 2099 2100 static int 2101 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2102 { 2103 int n_hw_addr; 2104 2105 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2106 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2107 2108 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2109 2110 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2111 } 2112 2113 static void 2114 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2115 uint32_t radio_cfg) 2116 { 2117 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2118 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2119 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2120 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2121 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2122 return; 2123 } 2124 2125 /* set the radio configuration for family 8000 */ 2126 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2127 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2128 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2129 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg); 2130 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2131 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2132 } 2133 2134 static int 2135 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, 2136 const uint16_t *nvm_hw, const uint16_t *mac_override) 2137 { 2138 #ifdef notyet /* for FAMILY 9000 */ 2139 if (cfg->mac_addr_from_csr) { 2140 iwm_set_hw_address_from_csr(sc, data); 2141 } else 2142 #endif 2143 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2144 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR); 2145 2146 /* The byte order is little endian 16 bit, meaning 214365 */ 2147 data->hw_addr[0] = hw_addr[1]; 2148 data->hw_addr[1] = hw_addr[0]; 2149 data->hw_addr[2] = hw_addr[3]; 2150 data->hw_addr[3] = hw_addr[2]; 2151 data->hw_addr[4] = hw_addr[5]; 2152 data->hw_addr[5] = hw_addr[4]; 2153 } else { 2154 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw); 2155 } 2156 2157 if (!iwm_is_valid_ether_addr(data->hw_addr)) { 2158 device_printf(sc->sc_dev, "no valid mac address was found\n"); 2159 return EINVAL; 2160 } 2161 2162 return 0; 2163 } 2164 2165 static struct iwm_nvm_data * 2166 iwm_parse_nvm_data(struct iwm_softc *sc, 2167 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2168 const uint16_t *nvm_calib, const uint16_t *mac_override, 2169 const uint16_t *phy_sku, const uint16_t *regulatory) 2170 { 2171 struct iwm_nvm_data *data; 2172 uint32_t sku, radio_cfg; 2173 uint16_t lar_config; 2174 2175 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2176 data = malloc(sizeof(*data) + 2177 IWM_NUM_CHANNELS * sizeof(uint16_t), 2178 M_DEVBUF, M_NOWAIT | M_ZERO); 2179 } else { 2180 data = malloc(sizeof(*data) + 2181 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t), 2182 M_DEVBUF, M_NOWAIT | M_ZERO); 2183 } 2184 if (!data) 2185 return NULL; 2186 2187 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2188 2189 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2190 iwm_set_radio_cfg(sc, data, radio_cfg); 2191 2192 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2193 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2194 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2195 data->sku_cap_11n_enable = 0; 2196 2197 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2198 2199 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2200 /* TODO: use IWL_NVM_EXT */ 2201 uint16_t lar_offset = data->nvm_version < 0xE39 ? 2202 IWM_NVM_LAR_OFFSET_8000_OLD : 2203 IWM_NVM_LAR_OFFSET_8000; 2204 2205 lar_config = le16_to_cpup(regulatory + lar_offset); 2206 data->lar_enabled = !!(lar_config & 2207 IWM_NVM_LAR_ENABLED_8000); 2208 } 2209 2210 /* If no valid mac address was found - bail out */ 2211 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) { 2212 free(data, M_DEVBUF); 2213 return NULL; 2214 } 2215 2216 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2217 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ? 2218 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS], 2219 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2220 } else { 2221 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2222 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2223 } 2224 2225 return data; 2226 } 2227 2228 static void 2229 iwm_free_nvm_data(struct iwm_nvm_data *data) 2230 { 2231 if (data != NULL) 2232 free(data, M_DEVBUF); 2233 } 2234 2235 static struct iwm_nvm_data * 2236 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2237 { 2238 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2239 2240 /* Checking for required sections */ 2241 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2242 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2243 !sections[sc->cfg->nvm_hw_section_num].data) { 2244 device_printf(sc->sc_dev, 2245 "Can't parse empty OTP/NVM sections\n"); 2246 return NULL; 2247 } 2248 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2249 /* SW and REGULATORY sections are mandatory */ 2250 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2251 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2252 device_printf(sc->sc_dev, 2253 "Can't parse empty OTP/NVM sections\n"); 2254 return NULL; 2255 } 2256 /* MAC_OVERRIDE or at least HW section must exist */ 2257 if (!sections[sc->cfg->nvm_hw_section_num].data && 2258 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2259 device_printf(sc->sc_dev, 2260 "Can't parse mac_address, empty sections\n"); 2261 return NULL; 2262 } 2263 2264 /* PHY_SKU section is mandatory in B0 */ 2265 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2266 device_printf(sc->sc_dev, 2267 "Can't parse phy_sku in B0, empty sections\n"); 2268 return NULL; 2269 } 2270 } else { 2271 panic("unknown device family %d\n", sc->cfg->device_family); 2272 } 2273 2274 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data; 2275 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2276 calib = (const uint16_t *) 2277 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2278 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ? 2279 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data : 2280 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2281 mac_override = (const uint16_t *) 2282 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2283 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2284 2285 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2286 phy_sku, regulatory); 2287 } 2288 2289 static int 2290 iwm_nvm_init(struct iwm_softc *sc) 2291 { 2292 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS]; 2293 int i, ret, section; 2294 uint32_t size_read = 0; 2295 uint8_t *nvm_buffer, *temp; 2296 uint16_t len; 2297 2298 memset(nvm_sections, 0, sizeof(nvm_sections)); 2299 2300 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS) 2301 return EINVAL; 2302 2303 /* load NVM values from nic */ 2304 /* Read From FW NVM */ 2305 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n"); 2306 2307 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO); 2308 if (!nvm_buffer) 2309 return ENOMEM; 2310 for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) { 2311 /* we override the constness for initial read */ 2312 ret = iwm_nvm_read_section(sc, section, nvm_buffer, 2313 &len, size_read); 2314 if (ret) 2315 continue; 2316 size_read += len; 2317 temp = malloc(len, M_DEVBUF, M_NOWAIT); 2318 if (!temp) { 2319 ret = ENOMEM; 2320 break; 2321 } 2322 memcpy(temp, nvm_buffer, len); 2323 2324 nvm_sections[section].data = temp; 2325 nvm_sections[section].length = len; 2326 } 2327 if (!size_read) 2328 device_printf(sc->sc_dev, "OTP is blank\n"); 2329 free(nvm_buffer, M_DEVBUF); 2330 2331 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections); 2332 if (!sc->nvm_data) 2333 return EINVAL; 2334 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 2335 "nvm version = %x\n", sc->nvm_data->nvm_version); 2336 2337 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) { 2338 if (nvm_sections[i].data != NULL) 2339 free(nvm_sections[i].data, M_DEVBUF); 2340 } 2341 2342 return 0; 2343 } 2344 2345 static int 2346 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num, 2347 const struct iwm_fw_desc *section) 2348 { 2349 struct iwm_dma_info *dma = &sc->fw_dma; 2350 uint8_t *v_addr; 2351 bus_addr_t p_addr; 2352 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len); 2353 int ret = 0; 2354 2355 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2356 "%s: [%d] uCode section being loaded...\n", 2357 __func__, section_num); 2358 2359 v_addr = dma->vaddr; 2360 p_addr = dma->paddr; 2361 2362 for (offset = 0; offset < section->len; offset += chunk_sz) { 2363 uint32_t copy_size, dst_addr; 2364 int extended_addr = FALSE; 2365 2366 copy_size = MIN(chunk_sz, section->len - offset); 2367 dst_addr = section->offset + offset; 2368 2369 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2370 dst_addr <= IWM_FW_MEM_EXTENDED_END) 2371 extended_addr = TRUE; 2372 2373 if (extended_addr) 2374 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2375 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2376 2377 memcpy(v_addr, (const uint8_t *)section->data + offset, 2378 copy_size); 2379 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2380 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr, 2381 copy_size); 2382 2383 if (extended_addr) 2384 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2385 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2386 2387 if (ret) { 2388 device_printf(sc->sc_dev, 2389 "%s: Could not load the [%d] uCode section\n", 2390 __func__, section_num); 2391 break; 2392 } 2393 } 2394 2395 return ret; 2396 } 2397 2398 /* 2399 * ucode 2400 */ 2401 static int 2402 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2403 bus_addr_t phy_addr, uint32_t byte_cnt) 2404 { 2405 sc->sc_fw_chunk_done = 0; 2406 2407 if (!iwm_nic_lock(sc)) 2408 return EBUSY; 2409 2410 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2411 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2412 2413 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2414 dst_addr); 2415 2416 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2417 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2418 2419 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2420 (iwm_get_dma_hi_addr(phy_addr) 2421 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2422 2423 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2424 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2425 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2426 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2427 2428 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2429 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2430 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2431 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2432 2433 iwm_nic_unlock(sc); 2434 2435 /* wait up to 5s for this segment to load */ 2436 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5); 2437 2438 if (!sc->sc_fw_chunk_done) { 2439 device_printf(sc->sc_dev, 2440 "fw chunk addr 0x%x len %d failed to load\n", 2441 dst_addr, byte_cnt); 2442 return ETIMEDOUT; 2443 } 2444 2445 return 0; 2446 } 2447 2448 static int 2449 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 2450 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2451 { 2452 int shift_param; 2453 int i, ret = 0, sec_num = 0x1; 2454 uint32_t val, last_read_idx = 0; 2455 2456 if (cpu == 1) { 2457 shift_param = 0; 2458 *first_ucode_section = 0; 2459 } else { 2460 shift_param = 16; 2461 (*first_ucode_section)++; 2462 } 2463 2464 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2465 last_read_idx = i; 2466 2467 /* 2468 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2469 * CPU1 to CPU2. 2470 * PAGING_SEPARATOR_SECTION delimiter - separate between 2471 * CPU2 non paged to CPU2 paging sec. 2472 */ 2473 if (!image->sec[i].data || 2474 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2475 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2476 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2477 "Break since Data not valid or Empty section, sec = %d\n", 2478 i); 2479 break; 2480 } 2481 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2482 if (ret) 2483 return ret; 2484 2485 /* Notify the ucode of the loaded section number and status */ 2486 if (iwm_nic_lock(sc)) { 2487 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2488 val = val | (sec_num << shift_param); 2489 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2490 sec_num = (sec_num << 1) | 0x1; 2491 iwm_nic_unlock(sc); 2492 } 2493 } 2494 2495 *first_ucode_section = last_read_idx; 2496 2497 iwm_enable_interrupts(sc); 2498 2499 if (iwm_nic_lock(sc)) { 2500 if (cpu == 1) 2501 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2502 else 2503 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2504 iwm_nic_unlock(sc); 2505 } 2506 2507 return 0; 2508 } 2509 2510 static int 2511 iwm_pcie_load_cpu_sections(struct iwm_softc *sc, 2512 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2513 { 2514 int i, ret = 0; 2515 uint32_t last_read_idx = 0; 2516 2517 if (cpu == 1) { 2518 *first_ucode_section = 0; 2519 } else { 2520 (*first_ucode_section)++; 2521 } 2522 2523 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2524 last_read_idx = i; 2525 2526 /* 2527 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2528 * CPU1 to CPU2. 2529 * PAGING_SEPARATOR_SECTION delimiter - separate between 2530 * CPU2 non paged to CPU2 paging sec. 2531 */ 2532 if (!image->sec[i].data || 2533 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2534 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2535 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2536 "Break since Data not valid or Empty section, sec = %d\n", 2537 i); 2538 break; 2539 } 2540 2541 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2542 if (ret) 2543 return ret; 2544 } 2545 2546 *first_ucode_section = last_read_idx; 2547 2548 return 0; 2549 2550 } 2551 2552 static int 2553 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image) 2554 { 2555 int ret = 0; 2556 int first_ucode_section; 2557 2558 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2559 image->is_dual_cpus ? "Dual" : "Single"); 2560 2561 /* load to FW the binary non secured sections of CPU1 */ 2562 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section); 2563 if (ret) 2564 return ret; 2565 2566 if (image->is_dual_cpus) { 2567 /* set CPU2 header address */ 2568 if (iwm_nic_lock(sc)) { 2569 iwm_write_prph(sc, 2570 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 2571 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE); 2572 iwm_nic_unlock(sc); 2573 } 2574 2575 /* load to FW the binary sections of CPU2 */ 2576 ret = iwm_pcie_load_cpu_sections(sc, image, 2, 2577 &first_ucode_section); 2578 if (ret) 2579 return ret; 2580 } 2581 2582 iwm_enable_interrupts(sc); 2583 2584 /* release CPU reset */ 2585 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2586 2587 return 0; 2588 } 2589 2590 int 2591 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc, 2592 const struct iwm_fw_img *image) 2593 { 2594 int ret = 0; 2595 int first_ucode_section; 2596 2597 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2598 image->is_dual_cpus ? "Dual" : "Single"); 2599 2600 /* configure the ucode to be ready to get the secured image */ 2601 /* release CPU reset */ 2602 if (iwm_nic_lock(sc)) { 2603 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, 2604 IWM_RELEASE_CPU_RESET_BIT); 2605 iwm_nic_unlock(sc); 2606 } 2607 2608 /* load to FW the binary Secured sections of CPU1 */ 2609 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1, 2610 &first_ucode_section); 2611 if (ret) 2612 return ret; 2613 2614 /* load to FW the binary sections of CPU2 */ 2615 return iwm_pcie_load_cpu_sections_8000(sc, image, 2, 2616 &first_ucode_section); 2617 } 2618 2619 /* XXX Get rid of this definition */ 2620 static inline void 2621 iwm_enable_fw_load_int(struct iwm_softc *sc) 2622 { 2623 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n"); 2624 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX; 2625 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 2626 } 2627 2628 /* XXX Add proper rfkill support code */ 2629 static int 2630 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw) 2631 { 2632 int ret; 2633 2634 /* This may fail if AMT took ownership of the device */ 2635 if (iwm_prepare_card_hw(sc)) { 2636 device_printf(sc->sc_dev, 2637 "%s: Exit HW not ready\n", __func__); 2638 ret = EIO; 2639 goto out; 2640 } 2641 2642 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2643 2644 iwm_disable_interrupts(sc); 2645 2646 /* make sure rfkill handshake bits are cleared */ 2647 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2648 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2649 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2650 2651 /* clear (again), then enable host interrupts */ 2652 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2653 2654 ret = iwm_nic_init(sc); 2655 if (ret) { 2656 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__); 2657 goto out; 2658 } 2659 2660 /* 2661 * Now, we load the firmware and don't want to be interrupted, even 2662 * by the RF-Kill interrupt (hence mask all the interrupt besides the 2663 * FH_TX interrupt which is needed to load the firmware). If the 2664 * RF-Kill switch is toggled, we will find out after having loaded 2665 * the firmware and return the proper value to the caller. 2666 */ 2667 iwm_enable_fw_load_int(sc); 2668 2669 /* really make sure rfkill handshake bits are cleared */ 2670 /* maybe we should write a few times more? just to make sure */ 2671 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2672 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2673 2674 /* Load the given image to the HW */ 2675 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) 2676 ret = iwm_pcie_load_given_ucode_8000(sc, fw); 2677 else 2678 ret = iwm_pcie_load_given_ucode(sc, fw); 2679 2680 /* XXX re-check RF-Kill state */ 2681 2682 out: 2683 return ret; 2684 } 2685 2686 static int 2687 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2688 { 2689 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2690 .valid = htole32(valid_tx_ant), 2691 }; 2692 2693 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2694 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2695 } 2696 2697 /* iwlwifi: mvm/fw.c */ 2698 static int 2699 iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2700 { 2701 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2702 enum iwm_ucode_type ucode_type = sc->cur_ucode; 2703 2704 /* Set parameters */ 2705 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc)); 2706 phy_cfg_cmd.calib_control.event_trigger = 2707 sc->sc_default_calib[ucode_type].event_trigger; 2708 phy_cfg_cmd.calib_control.flow_trigger = 2709 sc->sc_default_calib[ucode_type].flow_trigger; 2710 2711 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2712 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2713 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2714 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2715 } 2716 2717 static int 2718 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data) 2719 { 2720 struct iwm_alive_data *alive_data = data; 2721 struct iwm_alive_resp_v3 *palive3; 2722 struct iwm_alive_resp *palive; 2723 struct iwm_umac_alive *umac; 2724 struct iwm_lmac_alive *lmac1; 2725 struct iwm_lmac_alive *lmac2 = NULL; 2726 uint16_t status; 2727 2728 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) { 2729 palive = (void *)pkt->data; 2730 umac = &palive->umac_data; 2731 lmac1 = &palive->lmac_data[0]; 2732 lmac2 = &palive->lmac_data[1]; 2733 status = le16toh(palive->status); 2734 } else { 2735 palive3 = (void *)pkt->data; 2736 umac = &palive3->umac_data; 2737 lmac1 = &palive3->lmac_data; 2738 status = le16toh(palive3->status); 2739 } 2740 2741 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr); 2742 if (lmac2) 2743 sc->error_event_table[1] = 2744 le32toh(lmac2->error_event_table_ptr); 2745 sc->log_event_table = le32toh(lmac1->log_event_table_ptr); 2746 sc->umac_error_event_table = le32toh(umac->error_info_addr); 2747 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr); 2748 alive_data->valid = status == IWM_ALIVE_STATUS_OK; 2749 if (sc->umac_error_event_table) 2750 sc->support_umac_log = TRUE; 2751 2752 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2753 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 2754 status, lmac1->ver_type, lmac1->ver_subtype); 2755 2756 if (lmac2) 2757 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n"); 2758 2759 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2760 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2761 le32toh(umac->umac_major), 2762 le32toh(umac->umac_minor)); 2763 2764 return TRUE; 2765 } 2766 2767 static int 2768 iwm_wait_phy_db_entry(struct iwm_softc *sc, 2769 struct iwm_rx_packet *pkt, void *data) 2770 { 2771 struct iwm_phy_db *phy_db = data; 2772 2773 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) { 2774 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) { 2775 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n", 2776 __func__, pkt->hdr.code); 2777 } 2778 return TRUE; 2779 } 2780 2781 if (iwm_phy_db_set_section(phy_db, pkt)) { 2782 device_printf(sc->sc_dev, 2783 "%s: iwm_phy_db_set_section failed\n", __func__); 2784 } 2785 2786 return FALSE; 2787 } 2788 2789 static int 2790 iwm_load_ucode_wait_alive(struct iwm_softc *sc, 2791 enum iwm_ucode_type ucode_type) 2792 { 2793 struct iwm_notification_wait alive_wait; 2794 struct iwm_alive_data alive_data; 2795 const struct iwm_fw_img *fw; 2796 enum iwm_ucode_type old_type = sc->cur_ucode; 2797 int error; 2798 static const uint16_t alive_cmd[] = { IWM_ALIVE }; 2799 2800 fw = &sc->sc_fw.img[ucode_type]; 2801 sc->cur_ucode = ucode_type; 2802 sc->ucode_loaded = FALSE; 2803 2804 memset(&alive_data, 0, sizeof(alive_data)); 2805 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait, 2806 alive_cmd, nitems(alive_cmd), 2807 iwm_alive_fn, &alive_data); 2808 2809 error = iwm_start_fw(sc, fw); 2810 if (error) { 2811 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2812 sc->cur_ucode = old_type; 2813 iwm_remove_notification(sc->sc_notif_wait, &alive_wait); 2814 return error; 2815 } 2816 2817 /* 2818 * Some things may run in the background now, but we 2819 * just wait for the ALIVE notification here. 2820 */ 2821 IWM_UNLOCK(sc); 2822 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait, 2823 IWM_UCODE_ALIVE_TIMEOUT); 2824 IWM_LOCK(sc); 2825 if (error) { 2826 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2827 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a; 2828 if (iwm_nic_lock(sc)) { 2829 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS); 2830 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS); 2831 iwm_nic_unlock(sc); 2832 } 2833 device_printf(sc->sc_dev, 2834 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 2835 a, b); 2836 } 2837 sc->cur_ucode = old_type; 2838 return error; 2839 } 2840 2841 if (!alive_data.valid) { 2842 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n", 2843 __func__); 2844 sc->cur_ucode = old_type; 2845 return EIO; 2846 } 2847 2848 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr); 2849 2850 /* 2851 * configure and operate fw paging mechanism. 2852 * driver configures the paging flow only once, CPU2 paging image 2853 * included in the IWM_UCODE_INIT image. 2854 */ 2855 if (fw->paging_mem_size) { 2856 error = iwm_save_fw_paging(sc, fw); 2857 if (error) { 2858 device_printf(sc->sc_dev, 2859 "%s: failed to save the FW paging image\n", 2860 __func__); 2861 return error; 2862 } 2863 2864 error = iwm_send_paging_cmd(sc, fw); 2865 if (error) { 2866 device_printf(sc->sc_dev, 2867 "%s: failed to send the paging cmd\n", __func__); 2868 iwm_free_fw_paging(sc); 2869 return error; 2870 } 2871 } 2872 2873 if (!error) 2874 sc->ucode_loaded = TRUE; 2875 return error; 2876 } 2877 2878 /* 2879 * mvm misc bits 2880 */ 2881 2882 /* 2883 * follows iwlwifi/fw.c 2884 */ 2885 static int 2886 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm) 2887 { 2888 struct iwm_notification_wait calib_wait; 2889 static const uint16_t init_complete[] = { 2890 IWM_INIT_COMPLETE_NOTIF, 2891 IWM_CALIB_RES_NOTIF_PHY_DB 2892 }; 2893 int ret; 2894 2895 /* do not operate with rfkill switch turned on */ 2896 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2897 device_printf(sc->sc_dev, 2898 "radio is disabled by hardware switch\n"); 2899 return EPERM; 2900 } 2901 2902 iwm_init_notification_wait(sc->sc_notif_wait, 2903 &calib_wait, 2904 init_complete, 2905 nitems(init_complete), 2906 iwm_wait_phy_db_entry, 2907 sc->sc_phy_db); 2908 2909 /* Will also start the device */ 2910 ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT); 2911 if (ret) { 2912 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n", 2913 ret); 2914 goto error; 2915 } 2916 2917 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2918 ret = iwm_send_bt_init_conf(sc); 2919 if (ret) { 2920 device_printf(sc->sc_dev, 2921 "failed to send bt coex configuration: %d\n", ret); 2922 goto error; 2923 } 2924 } 2925 2926 if (justnvm) { 2927 /* Read nvm */ 2928 ret = iwm_nvm_init(sc); 2929 if (ret) { 2930 device_printf(sc->sc_dev, "failed to read nvm\n"); 2931 goto error; 2932 } 2933 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr); 2934 goto error; 2935 } 2936 2937 /* Send TX valid antennas before triggering calibrations */ 2938 ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc)); 2939 if (ret) { 2940 device_printf(sc->sc_dev, 2941 "failed to send antennas before calibration: %d\n", ret); 2942 goto error; 2943 } 2944 2945 /* 2946 * Send phy configurations command to init uCode 2947 * to start the 16.0 uCode init image internal calibrations. 2948 */ 2949 ret = iwm_send_phy_cfg_cmd(sc); 2950 if (ret) { 2951 device_printf(sc->sc_dev, 2952 "%s: Failed to run INIT calibrations: %d\n", 2953 __func__, ret); 2954 goto error; 2955 } 2956 2957 /* 2958 * Nothing to do but wait for the init complete notification 2959 * from the firmware. 2960 */ 2961 IWM_UNLOCK(sc); 2962 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait, 2963 IWM_UCODE_CALIB_TIMEOUT); 2964 IWM_LOCK(sc); 2965 2966 2967 goto out; 2968 2969 error: 2970 iwm_remove_notification(sc->sc_notif_wait, &calib_wait); 2971 out: 2972 return ret; 2973 } 2974 2975 static int 2976 iwm_config_ltr(struct iwm_softc *sc) 2977 { 2978 struct iwm_ltr_config_cmd cmd = { 2979 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE), 2980 }; 2981 2982 if (!sc->sc_ltr_enabled) 2983 return 0; 2984 2985 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd); 2986 } 2987 2988 /* 2989 * receive side 2990 */ 2991 2992 /* (re)stock rx ring, called at init-time and at runtime */ 2993 static int 2994 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 2995 { 2996 struct iwm_rx_ring *ring = &sc->rxq; 2997 struct iwm_rx_data *data = &ring->data[idx]; 2998 struct mbuf *m; 2999 bus_dmamap_t dmamap; 3000 bus_dma_segment_t seg; 3001 int nsegs, error; 3002 3003 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 3004 if (m == NULL) 3005 return ENOBUFS; 3006 3007 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3008 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m, 3009 &seg, &nsegs, BUS_DMA_NOWAIT); 3010 if (error != 0) { 3011 device_printf(sc->sc_dev, 3012 "%s: can't map mbuf, error %d\n", __func__, error); 3013 m_freem(m); 3014 return error; 3015 } 3016 3017 if (data->m != NULL) 3018 bus_dmamap_unload(ring->data_dmat, data->map); 3019 3020 /* Swap ring->spare_map with data->map */ 3021 dmamap = data->map; 3022 data->map = ring->spare_map; 3023 ring->spare_map = dmamap; 3024 3025 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 3026 data->m = m; 3027 3028 /* Update RX descriptor. */ 3029 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned")); 3030 if (sc->cfg->mqrx_supported) 3031 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr); 3032 else 3033 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8); 3034 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map, 3035 BUS_DMASYNC_PREWRITE); 3036 3037 return 0; 3038 } 3039 3040 static void 3041 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3042 { 3043 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 3044 3045 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 3046 3047 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 3048 } 3049 3050 /* 3051 * Retrieve the average noise (in dBm) among receivers. 3052 */ 3053 static int 3054 iwm_get_noise(struct iwm_softc *sc, 3055 const struct iwm_statistics_rx_non_phy *stats) 3056 { 3057 int i, noise; 3058 #ifdef IWM_DEBUG 3059 int nbant, total; 3060 #else 3061 int nbant __unused, total __unused; 3062 #endif 3063 3064 total = nbant = noise = 0; 3065 for (i = 0; i < 3; i++) { 3066 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 3067 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n", 3068 __func__, 3069 i, 3070 noise); 3071 3072 if (noise) { 3073 total += noise; 3074 nbant++; 3075 } 3076 } 3077 3078 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n", 3079 __func__, nbant, total); 3080 #if 0 3081 /* There should be at least one antenna but check anyway. */ 3082 return (nbant == 0) ? -127 : (total / nbant) - 107; 3083 #else 3084 /* For now, just hard-code it to -96 to be safe */ 3085 return (-96); 3086 #endif 3087 } 3088 3089 static void 3090 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3091 { 3092 struct iwm_notif_statistics *stats = (void *)&pkt->data; 3093 3094 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 3095 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general); 3096 } 3097 3098 /* iwlwifi: mvm/rx.c */ 3099 /* 3100 * iwm_get_signal_strength - use new rx PHY INFO API 3101 * values are reported by the fw as positive values - need to negate 3102 * to obtain their dBM. Account for missing antennas by replacing 0 3103 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 3104 */ 3105 static int 3106 iwm_rx_get_signal_strength(struct iwm_softc *sc, 3107 struct iwm_rx_phy_info *phy_info) 3108 { 3109 int energy_a, energy_b, energy_c, max_energy; 3110 uint32_t val; 3111 3112 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 3113 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 3114 IWM_RX_INFO_ENERGY_ANT_A_POS; 3115 energy_a = energy_a ? -energy_a : -256; 3116 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 3117 IWM_RX_INFO_ENERGY_ANT_B_POS; 3118 energy_b = energy_b ? -energy_b : -256; 3119 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 3120 IWM_RX_INFO_ENERGY_ANT_C_POS; 3121 energy_c = energy_c ? -energy_c : -256; 3122 max_energy = MAX(energy_a, energy_b); 3123 max_energy = MAX(max_energy, energy_c); 3124 3125 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3126 "energy In A %d B %d C %d , and max %d\n", 3127 energy_a, energy_b, energy_c, max_energy); 3128 3129 return max_energy; 3130 } 3131 3132 static int 3133 iwm_rxmq_get_signal_strength(struct iwm_softc *sc, 3134 struct iwm_rx_mpdu_desc *desc) 3135 { 3136 int energy_a, energy_b; 3137 3138 energy_a = desc->v1.energy_a; 3139 energy_b = desc->v1.energy_b; 3140 energy_a = energy_a ? -energy_a : -256; 3141 energy_b = energy_b ? -energy_b : -256; 3142 return MAX(energy_a, energy_b); 3143 } 3144 3145 /* 3146 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 3147 * 3148 * Handles the actual data of the Rx packet from the fw 3149 */ 3150 static bool 3151 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3152 bool stolen) 3153 { 3154 struct ieee80211com *ic = &sc->sc_ic; 3155 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3156 struct ieee80211_rx_stats rxs; 3157 struct iwm_rx_phy_info *phy_info; 3158 struct iwm_rx_mpdu_res_start *rx_res; 3159 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset); 3160 uint32_t len; 3161 uint32_t rx_pkt_status; 3162 int rssi; 3163 3164 phy_info = &sc->sc_last_phy_info; 3165 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 3166 len = le16toh(rx_res->byte_count); 3167 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 3168 3169 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3170 device_printf(sc->sc_dev, 3171 "dsp size out of range [0,20]: %d\n", 3172 phy_info->cfg_phy_cnt); 3173 return false; 3174 } 3175 3176 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3177 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3178 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3179 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3180 return false; 3181 } 3182 3183 rssi = iwm_rx_get_signal_strength(sc, phy_info); 3184 3185 /* Map it to relative value */ 3186 rssi = rssi - sc->sc_noise; 3187 3188 /* replenish ring for the buffer we're going to feed to the sharks */ 3189 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3190 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3191 __func__); 3192 return false; 3193 } 3194 3195 m->m_data = pkt->data + sizeof(*rx_res); 3196 m->m_pkthdr.len = m->m_len = len; 3197 3198 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3199 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3200 3201 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3202 "%s: phy_info: channel=%d, flags=0x%08x\n", 3203 __func__, 3204 le16toh(phy_info->channel), 3205 le16toh(phy_info->phy_flags)); 3206 3207 /* 3208 * Populate an RX state struct with the provided information. 3209 */ 3210 bzero(&rxs, sizeof(rxs)); 3211 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3212 rxs.r_flags |= IEEE80211_R_BAND; 3213 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3214 rxs.c_ieee = le16toh(phy_info->channel); 3215 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3216 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3217 rxs.c_band = IEEE80211_CHAN_2GHZ; 3218 } else { 3219 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3220 rxs.c_band = IEEE80211_CHAN_5GHZ; 3221 } 3222 3223 /* rssi is in 1/2db units */ 3224 rxs.c_rssi = rssi * 2; 3225 rxs.c_nf = sc->sc_noise; 3226 if (ieee80211_add_rx_params(m, &rxs) == 0) 3227 return false; 3228 3229 if (ieee80211_radiotap_active_vap(vap)) { 3230 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3231 3232 tap->wr_flags = 0; 3233 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3234 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3235 tap->wr_chan_freq = htole16(rxs.c_freq); 3236 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3237 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3238 tap->wr_dbm_antsignal = (int8_t)rssi; 3239 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3240 tap->wr_tsft = phy_info->system_timestamp; 3241 switch (phy_info->rate) { 3242 /* CCK rates. */ 3243 case 10: tap->wr_rate = 2; break; 3244 case 20: tap->wr_rate = 4; break; 3245 case 55: tap->wr_rate = 11; break; 3246 case 110: tap->wr_rate = 22; break; 3247 /* OFDM rates. */ 3248 case 0xd: tap->wr_rate = 12; break; 3249 case 0xf: tap->wr_rate = 18; break; 3250 case 0x5: tap->wr_rate = 24; break; 3251 case 0x7: tap->wr_rate = 36; break; 3252 case 0x9: tap->wr_rate = 48; break; 3253 case 0xb: tap->wr_rate = 72; break; 3254 case 0x1: tap->wr_rate = 96; break; 3255 case 0x3: tap->wr_rate = 108; break; 3256 /* Unknown rate: should not happen. */ 3257 default: tap->wr_rate = 0; 3258 } 3259 } 3260 3261 return true; 3262 } 3263 3264 static bool 3265 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3266 bool stolen) 3267 { 3268 struct ieee80211com *ic = &sc->sc_ic; 3269 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3270 struct ieee80211_frame *wh; 3271 struct ieee80211_rx_stats rxs; 3272 struct iwm_rx_mpdu_desc *desc; 3273 struct iwm_rx_packet *pkt; 3274 int rssi; 3275 uint32_t hdrlen, len, rate_n_flags; 3276 uint16_t phy_info; 3277 uint8_t channel; 3278 3279 pkt = mtodo(m, offset); 3280 desc = (void *)pkt->data; 3281 3282 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) || 3283 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) { 3284 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3285 "Bad CRC or FIFO: 0x%08X.\n", desc->status); 3286 return false; 3287 } 3288 3289 channel = desc->v1.channel; 3290 len = le16toh(desc->mpdu_len); 3291 phy_info = le16toh(desc->phy_info); 3292 rate_n_flags = desc->v1.rate_n_flags; 3293 3294 wh = mtodo(m, sizeof(*desc)); 3295 m->m_data = pkt->data + sizeof(*desc); 3296 m->m_pkthdr.len = m->m_len = len; 3297 m->m_len = len; 3298 3299 /* Account for padding following the frame header. */ 3300 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) { 3301 hdrlen = ieee80211_anyhdrsize(wh); 3302 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen); 3303 m->m_data = mtodo(m, 2); 3304 wh = mtod(m, struct ieee80211_frame *); 3305 } 3306 3307 /* Map it to relative value */ 3308 rssi = iwm_rxmq_get_signal_strength(sc, desc); 3309 rssi = rssi - sc->sc_noise; 3310 3311 /* replenish ring for the buffer we're going to feed to the sharks */ 3312 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3313 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3314 __func__); 3315 return false; 3316 } 3317 3318 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3319 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3320 3321 /* 3322 * Populate an RX state struct with the provided information. 3323 */ 3324 bzero(&rxs, sizeof(rxs)); 3325 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3326 rxs.r_flags |= IEEE80211_R_BAND; 3327 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3328 rxs.c_ieee = channel; 3329 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, 3330 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ); 3331 rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; 3332 3333 /* rssi is in 1/2db units */ 3334 rxs.c_rssi = rssi * 2; 3335 rxs.c_nf = sc->sc_noise; 3336 if (ieee80211_add_rx_params(m, &rxs) == 0) 3337 return false; 3338 3339 if (ieee80211_radiotap_active_vap(vap)) { 3340 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3341 3342 tap->wr_flags = 0; 3343 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0) 3344 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3345 tap->wr_chan_freq = htole16(rxs.c_freq); 3346 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3347 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3348 tap->wr_dbm_antsignal = (int8_t)rssi; 3349 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3350 tap->wr_tsft = desc->v1.gp2_on_air_rise; 3351 switch ((rate_n_flags & 0xff)) { 3352 /* CCK rates. */ 3353 case 10: tap->wr_rate = 2; break; 3354 case 20: tap->wr_rate = 4; break; 3355 case 55: tap->wr_rate = 11; break; 3356 case 110: tap->wr_rate = 22; break; 3357 /* OFDM rates. */ 3358 case 0xd: tap->wr_rate = 12; break; 3359 case 0xf: tap->wr_rate = 18; break; 3360 case 0x5: tap->wr_rate = 24; break; 3361 case 0x7: tap->wr_rate = 36; break; 3362 case 0x9: tap->wr_rate = 48; break; 3363 case 0xb: tap->wr_rate = 72; break; 3364 case 0x1: tap->wr_rate = 96; break; 3365 case 0x3: tap->wr_rate = 108; break; 3366 /* Unknown rate: should not happen. */ 3367 default: tap->wr_rate = 0; 3368 } 3369 } 3370 3371 return true; 3372 } 3373 3374 static bool 3375 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3376 bool stolen) 3377 { 3378 struct epoch_tracker et; 3379 struct ieee80211com *ic; 3380 struct ieee80211_frame *wh; 3381 struct ieee80211_node *ni; 3382 bool ret; 3383 3384 ic = &sc->sc_ic; 3385 3386 ret = sc->cfg->mqrx_supported ? 3387 iwm_rx_mpdu_mq(sc, m, offset, stolen) : 3388 iwm_rx_rx_mpdu(sc, m, offset, stolen); 3389 if (!ret) { 3390 counter_u64_add(ic->ic_ierrors, 1); 3391 return (ret); 3392 } 3393 3394 wh = mtod(m, struct ieee80211_frame *); 3395 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3396 3397 IWM_UNLOCK(sc); 3398 3399 NET_EPOCH_ENTER(et); 3400 if (ni != NULL) { 3401 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3402 ieee80211_input_mimo(ni, m); 3403 ieee80211_free_node(ni); 3404 } else { 3405 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3406 ieee80211_input_mimo_all(ic, m); 3407 } 3408 NET_EPOCH_EXIT(et); 3409 3410 IWM_LOCK(sc); 3411 3412 return true; 3413 } 3414 3415 static int 3416 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3417 struct iwm_node *in) 3418 { 3419 struct iwm_tx_resp *tx_resp = (void *)pkt->data; 3420 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 3421 struct ieee80211_node *ni = &in->in_ni; 3422 struct ieee80211vap *vap = ni->ni_vap; 3423 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3424 int new_rate, cur_rate = vap->iv_bss->ni_txrate; 3425 boolean_t rate_matched; 3426 uint8_t tx_resp_rate; 3427 3428 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3429 3430 /* Update rate control statistics. */ 3431 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3432 __func__, 3433 (int) le16toh(tx_resp->status.status), 3434 (int) le16toh(tx_resp->status.sequence), 3435 tx_resp->frame_count, 3436 tx_resp->bt_kill_count, 3437 tx_resp->failure_rts, 3438 tx_resp->failure_frame, 3439 le32toh(tx_resp->initial_rate), 3440 (int) le16toh(tx_resp->wireless_media_time)); 3441 3442 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate)); 3443 3444 /* For rate control, ignore frames sent at different initial rate */ 3445 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate); 3446 3447 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) { 3448 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3449 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u " 3450 "ni_txrate=%d)\n", tx_resp_rate, cur_rate); 3451 } 3452 3453 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 3454 IEEE80211_RATECTL_STATUS_LONG_RETRY; 3455 txs->short_retries = tx_resp->failure_rts; 3456 txs->long_retries = tx_resp->failure_frame; 3457 if (status != IWM_TX_STATUS_SUCCESS && 3458 status != IWM_TX_STATUS_DIRECT_DONE) { 3459 switch (status) { 3460 case IWM_TX_STATUS_FAIL_SHORT_LIMIT: 3461 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 3462 break; 3463 case IWM_TX_STATUS_FAIL_LONG_LIMIT: 3464 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 3465 break; 3466 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE: 3467 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 3468 break; 3469 default: 3470 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 3471 break; 3472 } 3473 } else { 3474 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 3475 } 3476 3477 if (rate_matched) { 3478 ieee80211_ratectl_tx_complete(ni, txs); 3479 3480 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0); 3481 new_rate = vap->iv_bss->ni_txrate; 3482 if (new_rate != 0 && new_rate != cur_rate) { 3483 struct iwm_node *in = IWM_NODE(vap->iv_bss); 3484 iwm_setrates(sc, in, rix); 3485 iwm_send_lq_cmd(sc, &in->in_lq, FALSE); 3486 } 3487 } 3488 3489 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS); 3490 } 3491 3492 static void 3493 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3494 { 3495 struct iwm_cmd_header *cmd_hdr; 3496 struct iwm_tx_ring *ring; 3497 struct iwm_tx_data *txd; 3498 struct iwm_node *in; 3499 struct mbuf *m; 3500 int idx, qid, qmsk, status; 3501 3502 cmd_hdr = &pkt->hdr; 3503 idx = cmd_hdr->idx; 3504 qid = cmd_hdr->qid; 3505 3506 ring = &sc->txq[qid]; 3507 txd = &ring->data[idx]; 3508 in = txd->in; 3509 m = txd->m; 3510 3511 KASSERT(txd->done == 0, ("txd not done")); 3512 KASSERT(txd->in != NULL, ("txd without node")); 3513 KASSERT(txd->m != NULL, ("txd without mbuf")); 3514 3515 sc->sc_tx_timer = 0; 3516 3517 status = iwm_rx_tx_cmd_single(sc, pkt, in); 3518 3519 /* Unmap and free mbuf. */ 3520 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3521 bus_dmamap_unload(ring->data_dmat, txd->map); 3522 3523 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3524 "free txd %p, in %p\n", txd, txd->in); 3525 txd->done = 1; 3526 txd->m = NULL; 3527 txd->in = NULL; 3528 3529 ieee80211_tx_complete(&in->in_ni, m, status); 3530 3531 qmsk = 1 << qid; 3532 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) { 3533 sc->qfullmsk &= ~qmsk; 3534 if (sc->qfullmsk == 0) 3535 iwm_start(sc); 3536 } 3537 } 3538 3539 /* 3540 * transmit side 3541 */ 3542 3543 /* 3544 * Process a "command done" firmware notification. This is where we wakeup 3545 * processes waiting for a synchronous command completion. 3546 * from if_iwn 3547 */ 3548 static void 3549 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3550 { 3551 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE]; 3552 struct iwm_tx_data *data; 3553 3554 if (pkt->hdr.qid != IWM_CMD_QUEUE) { 3555 return; /* Not a command ack. */ 3556 } 3557 3558 /* XXX wide commands? */ 3559 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 3560 "cmd notification type 0x%x qid %d idx %d\n", 3561 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx); 3562 3563 data = &ring->data[pkt->hdr.idx]; 3564 3565 /* If the command was mapped in an mbuf, free it. */ 3566 if (data->m != NULL) { 3567 bus_dmamap_sync(ring->data_dmat, data->map, 3568 BUS_DMASYNC_POSTWRITE); 3569 bus_dmamap_unload(ring->data_dmat, data->map); 3570 m_freem(data->m); 3571 data->m = NULL; 3572 } 3573 wakeup(&ring->desc[pkt->hdr.idx]); 3574 3575 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) { 3576 device_printf(sc->sc_dev, 3577 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n", 3578 __func__, pkt->hdr.idx, ring->queued, ring->cur); 3579 /* XXX call iwm_force_nmi() */ 3580 } 3581 3582 KASSERT(ring->queued > 0, ("ring->queued is empty?")); 3583 ring->queued--; 3584 if (ring->queued == 0) 3585 iwm_pcie_clear_cmd_in_flight(sc); 3586 } 3587 3588 #if 0 3589 /* 3590 * necessary only for block ack mode 3591 */ 3592 void 3593 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3594 uint16_t len) 3595 { 3596 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3597 uint16_t w_val; 3598 3599 scd_bc_tbl = sc->sched_dma.vaddr; 3600 3601 len += 8; /* magic numbers came naturally from paris */ 3602 len = roundup(len, 4) / 4; 3603 3604 w_val = htole16(sta_id << 12 | len); 3605 3606 /* Update TX scheduler. */ 3607 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3608 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3609 BUS_DMASYNC_PREWRITE); 3610 3611 /* I really wonder what this is ?!? */ 3612 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3613 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3614 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3615 BUS_DMASYNC_PREWRITE); 3616 } 3617 } 3618 #endif 3619 3620 static int 3621 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate) 3622 { 3623 int i; 3624 3625 for (i = 0; i < nitems(iwm_rates); i++) { 3626 if (iwm_rates[i].rate == rate) 3627 return (i); 3628 } 3629 /* XXX error? */ 3630 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3631 "%s: couldn't find an entry for rate=%d\n", 3632 __func__, 3633 rate); 3634 return (0); 3635 } 3636 3637 /* 3638 * Fill in the rate related information for a transmit command. 3639 */ 3640 static const struct iwm_rate * 3641 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3642 struct mbuf *m, struct iwm_tx_cmd *tx) 3643 { 3644 struct ieee80211_node *ni = &in->in_ni; 3645 struct ieee80211_frame *wh; 3646 const struct ieee80211_txparam *tp = ni->ni_txparms; 3647 const struct iwm_rate *rinfo; 3648 int type; 3649 int ridx, rate_flags; 3650 3651 wh = mtod(m, struct ieee80211_frame *); 3652 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3653 3654 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3655 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3656 3657 if (type == IEEE80211_FC0_TYPE_MGT || 3658 type == IEEE80211_FC0_TYPE_CTL || 3659 (m->m_flags & M_EAPOL) != 0) { 3660 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3661 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3662 "%s: MGT (%d)\n", __func__, tp->mgmtrate); 3663 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3664 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate); 3665 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3666 "%s: MCAST (%d)\n", __func__, tp->mcastrate); 3667 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3668 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate); 3669 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3670 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate); 3671 } else { 3672 /* for data frames, use RS table */ 3673 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__); 3674 ridx = iwm_rate2ridx(sc, ni->ni_txrate); 3675 if (ridx == -1) 3676 ridx = 0; 3677 3678 /* This is the index into the programmed table */ 3679 tx->initial_rate_index = 0; 3680 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3681 } 3682 3683 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3684 "%s: frame type=%d txrate %d\n", 3685 __func__, type, iwm_rates[ridx].rate); 3686 3687 rinfo = &iwm_rates[ridx]; 3688 3689 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n", 3690 __func__, ridx, 3691 rinfo->rate, 3692 !! (IWM_RIDX_IS_CCK(ridx)) 3693 ); 3694 3695 /* XXX TODO: hard-coded TX antenna? */ 3696 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000) 3697 rate_flags = IWM_RATE_MCS_ANT_B_MSK; 3698 else 3699 rate_flags = IWM_RATE_MCS_ANT_A_MSK; 3700 if (IWM_RIDX_IS_CCK(ridx)) 3701 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3702 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3703 3704 return rinfo; 3705 } 3706 3707 #define TB0_SIZE 16 3708 static int 3709 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3710 { 3711 struct ieee80211com *ic = &sc->sc_ic; 3712 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3713 struct iwm_node *in = IWM_NODE(ni); 3714 struct iwm_tx_ring *ring; 3715 struct iwm_tx_data *data; 3716 struct iwm_tfd *desc; 3717 struct iwm_device_cmd *cmd; 3718 struct iwm_tx_cmd *tx; 3719 struct ieee80211_frame *wh; 3720 struct ieee80211_key *k = NULL; 3721 struct mbuf *m1; 3722 const struct iwm_rate *rinfo; 3723 uint32_t flags; 3724 u_int hdrlen; 3725 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3726 int nsegs; 3727 uint8_t tid, type; 3728 int i, totlen, error, pad; 3729 3730 wh = mtod(m, struct ieee80211_frame *); 3731 hdrlen = ieee80211_anyhdrsize(wh); 3732 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3733 tid = 0; 3734 ring = &sc->txq[ac]; 3735 desc = &ring->desc[ring->cur]; 3736 data = &ring->data[ring->cur]; 3737 3738 /* Fill out iwm_tx_cmd to send to the firmware */ 3739 cmd = &ring->cmd[ring->cur]; 3740 cmd->hdr.code = IWM_TX_CMD; 3741 cmd->hdr.flags = 0; 3742 cmd->hdr.qid = ring->qid; 3743 cmd->hdr.idx = ring->cur; 3744 3745 tx = (void *)cmd->data; 3746 memset(tx, 0, sizeof(*tx)); 3747 3748 rinfo = iwm_tx_fill_cmd(sc, in, m, tx); 3749 3750 /* Encrypt the frame if need be. */ 3751 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3752 /* Retrieve key for TX && do software encryption. */ 3753 k = ieee80211_crypto_encap(ni, m); 3754 if (k == NULL) { 3755 m_freem(m); 3756 return (ENOBUFS); 3757 } 3758 /* 802.11 header may have moved. */ 3759 wh = mtod(m, struct ieee80211_frame *); 3760 } 3761 3762 if (ieee80211_radiotap_active_vap(vap)) { 3763 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3764 3765 tap->wt_flags = 0; 3766 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3767 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3768 tap->wt_rate = rinfo->rate; 3769 if (k != NULL) 3770 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3771 ieee80211_radiotap_tx(vap, m); 3772 } 3773 3774 flags = 0; 3775 totlen = m->m_pkthdr.len; 3776 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3777 flags |= IWM_TX_CMD_FLG_ACK; 3778 } 3779 3780 if (type == IEEE80211_FC0_TYPE_DATA && 3781 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold && 3782 !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3783 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3784 } 3785 3786 tx->sta_id = IWM_STATION_ID; 3787 3788 if (type == IEEE80211_FC0_TYPE_MGT) { 3789 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3790 3791 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3792 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { 3793 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC); 3794 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) { 3795 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3796 } else { 3797 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT); 3798 } 3799 } else { 3800 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3801 } 3802 3803 if (hdrlen & 3) { 3804 /* First segment length must be a multiple of 4. */ 3805 flags |= IWM_TX_CMD_FLG_MH_PAD; 3806 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD); 3807 pad = 4 - (hdrlen & 3); 3808 } else { 3809 tx->offload_assist = 0; 3810 pad = 0; 3811 } 3812 3813 tx->len = htole16(totlen); 3814 tx->tid_tspec = tid; 3815 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3816 3817 /* Set physical address of "scratch area". */ 3818 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3819 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3820 3821 /* Copy 802.11 header in TX command. */ 3822 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen); 3823 3824 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3825 3826 tx->sec_ctl = 0; 3827 tx->tx_flags |= htole32(flags); 3828 3829 /* Trim 802.11 header. */ 3830 m_adj(m, hdrlen); 3831 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3832 segs, &nsegs, BUS_DMA_NOWAIT); 3833 if (error != 0) { 3834 if (error != EFBIG) { 3835 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3836 error); 3837 m_freem(m); 3838 return error; 3839 } 3840 /* Too many DMA segments, linearize mbuf. */ 3841 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 3842 if (m1 == NULL) { 3843 device_printf(sc->sc_dev, 3844 "%s: could not defrag mbuf\n", __func__); 3845 m_freem(m); 3846 return (ENOBUFS); 3847 } 3848 m = m1; 3849 3850 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3851 segs, &nsegs, BUS_DMA_NOWAIT); 3852 if (error != 0) { 3853 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3854 error); 3855 m_freem(m); 3856 return error; 3857 } 3858 } 3859 data->m = m; 3860 data->in = in; 3861 data->done = 0; 3862 3863 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3864 "sending txd %p, in %p\n", data, data->in); 3865 KASSERT(data->in != NULL, ("node is NULL")); 3866 3867 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3868 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3869 ring->qid, ring->cur, totlen, nsegs, 3870 le32toh(tx->tx_flags), 3871 le32toh(tx->rate_n_flags), 3872 tx->initial_rate_index 3873 ); 3874 3875 /* Fill TX descriptor. */ 3876 memset(desc, 0, sizeof(*desc)); 3877 desc->num_tbs = 2 + nsegs; 3878 3879 desc->tbs[0].lo = htole32(data->cmd_paddr); 3880 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | 3881 (TB0_SIZE << 4)); 3882 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3883 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | 3884 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) + 3885 hdrlen + pad - TB0_SIZE) << 4)); 3886 3887 /* Other DMA segments are for data payload. */ 3888 for (i = 0; i < nsegs; i++) { 3889 seg = &segs[i]; 3890 desc->tbs[i + 2].lo = htole32(seg->ds_addr); 3891 desc->tbs[i + 2].hi_n_len = 3892 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) | 3893 (seg->ds_len << 4); 3894 } 3895 3896 bus_dmamap_sync(ring->data_dmat, data->map, 3897 BUS_DMASYNC_PREWRITE); 3898 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3899 BUS_DMASYNC_PREWRITE); 3900 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3901 BUS_DMASYNC_PREWRITE); 3902 3903 #if 0 3904 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3905 #endif 3906 3907 /* Kick TX ring. */ 3908 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3909 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3910 3911 /* Mark TX ring as full if we reach a certain threshold. */ 3912 if (++ring->queued > IWM_TX_RING_HIMARK) { 3913 sc->qfullmsk |= 1 << ring->qid; 3914 } 3915 3916 return 0; 3917 } 3918 3919 static int 3920 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3921 const struct ieee80211_bpf_params *params) 3922 { 3923 struct ieee80211com *ic = ni->ni_ic; 3924 struct iwm_softc *sc = ic->ic_softc; 3925 int error = 0; 3926 3927 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3928 "->%s begin\n", __func__); 3929 3930 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3931 m_freem(m); 3932 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3933 "<-%s not RUNNING\n", __func__); 3934 return (ENETDOWN); 3935 } 3936 3937 IWM_LOCK(sc); 3938 /* XXX fix this */ 3939 if (params == NULL) { 3940 error = iwm_tx(sc, m, ni, 0); 3941 } else { 3942 error = iwm_tx(sc, m, ni, 0); 3943 } 3944 if (sc->sc_tx_timer == 0) 3945 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 3946 sc->sc_tx_timer = 5; 3947 IWM_UNLOCK(sc); 3948 3949 return (error); 3950 } 3951 3952 /* 3953 * mvm/tx.c 3954 */ 3955 3956 /* 3957 * Note that there are transports that buffer frames before they reach 3958 * the firmware. This means that after flush_tx_path is called, the 3959 * queue might not be empty. The race-free way to handle this is to: 3960 * 1) set the station as draining 3961 * 2) flush the Tx path 3962 * 3) wait for the transport queues to be empty 3963 */ 3964 int 3965 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags) 3966 { 3967 int ret; 3968 struct iwm_tx_path_flush_cmd_v1 flush_cmd = { 3969 .queues_ctl = htole32(tfd_msk), 3970 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3971 }; 3972 3973 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags, 3974 sizeof(flush_cmd), &flush_cmd); 3975 if (ret) 3976 device_printf(sc->sc_dev, 3977 "Flushing tx queue failed: %d\n", ret); 3978 return ret; 3979 } 3980 3981 /* 3982 * BEGIN mvm/quota.c 3983 */ 3984 3985 static int 3986 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp) 3987 { 3988 struct iwm_time_quota_cmd_v1 cmd; 3989 int i, idx, ret, num_active_macs, quota, quota_rem; 3990 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 3991 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 3992 uint16_t id; 3993 3994 memset(&cmd, 0, sizeof(cmd)); 3995 3996 /* currently, PHY ID == binding ID */ 3997 if (ivp) { 3998 id = ivp->phy_ctxt->id; 3999 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 4000 colors[id] = ivp->phy_ctxt->color; 4001 4002 if (1) 4003 n_ifs[id] = 1; 4004 } 4005 4006 /* 4007 * The FW's scheduling session consists of 4008 * IWM_MAX_QUOTA fragments. Divide these fragments 4009 * equally between all the bindings that require quota 4010 */ 4011 num_active_macs = 0; 4012 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 4013 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 4014 num_active_macs += n_ifs[i]; 4015 } 4016 4017 quota = 0; 4018 quota_rem = 0; 4019 if (num_active_macs) { 4020 quota = IWM_MAX_QUOTA / num_active_macs; 4021 quota_rem = IWM_MAX_QUOTA % num_active_macs; 4022 } 4023 4024 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 4025 if (colors[i] < 0) 4026 continue; 4027 4028 cmd.quotas[idx].id_and_color = 4029 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 4030 4031 if (n_ifs[i] <= 0) { 4032 cmd.quotas[idx].quota = htole32(0); 4033 cmd.quotas[idx].max_duration = htole32(0); 4034 } else { 4035 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 4036 cmd.quotas[idx].max_duration = htole32(0); 4037 } 4038 idx++; 4039 } 4040 4041 /* Give the remainder of the session to the first binding */ 4042 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 4043 4044 ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 4045 sizeof(cmd), &cmd); 4046 if (ret) 4047 device_printf(sc->sc_dev, 4048 "%s: Failed to send quota: %d\n", __func__, ret); 4049 return ret; 4050 } 4051 4052 /* 4053 * END mvm/quota.c 4054 */ 4055 4056 /* 4057 * ieee80211 routines 4058 */ 4059 4060 /* 4061 * Change to AUTH state in 80211 state machine. Roughly matches what 4062 * Linux does in bss_info_changed(). 4063 */ 4064 static int 4065 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 4066 { 4067 struct ieee80211_node *ni; 4068 struct iwm_node *in; 4069 struct iwm_vap *iv = IWM_VAP(vap); 4070 uint32_t duration; 4071 int error; 4072 4073 /* 4074 * XXX i have a feeling that the vap node is being 4075 * freed from underneath us. Grr. 4076 */ 4077 ni = ieee80211_ref_node(vap->iv_bss); 4078 in = IWM_NODE(ni); 4079 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 4080 "%s: called; vap=%p, bss ni=%p\n", 4081 __func__, 4082 vap, 4083 ni); 4084 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n", 4085 __func__, ether_sprintf(ni->ni_bssid)); 4086 4087 in->in_assoc = 0; 4088 iv->iv_auth = 1; 4089 4090 /* 4091 * Firmware bug - it'll crash if the beacon interval is less 4092 * than 16. We can't avoid connecting at all, so refuse the 4093 * station state change, this will cause net80211 to abandon 4094 * attempts to connect to this AP, and eventually wpa_s will 4095 * blacklist the AP... 4096 */ 4097 if (ni->ni_intval < 16) { 4098 device_printf(sc->sc_dev, 4099 "AP %s beacon interval is %d, refusing due to firmware bug!\n", 4100 ether_sprintf(ni->ni_bssid), ni->ni_intval); 4101 error = EINVAL; 4102 goto out; 4103 } 4104 4105 error = iwm_allow_mcast(vap, sc); 4106 if (error) { 4107 device_printf(sc->sc_dev, 4108 "%s: failed to set multicast\n", __func__); 4109 goto out; 4110 } 4111 4112 /* 4113 * This is where it deviates from what Linux does. 4114 * 4115 * Linux iwlwifi doesn't reset the nic each time, nor does it 4116 * call ctxt_add() here. Instead, it adds it during vap creation, 4117 * and always does a mac_ctx_changed(). 4118 * 4119 * The openbsd port doesn't attempt to do that - it reset things 4120 * at odd states and does the add here. 4121 * 4122 * So, until the state handling is fixed (ie, we never reset 4123 * the NIC except for a firmware failure, which should drag 4124 * the NIC back to IDLE, re-setup and re-add all the mac/phy 4125 * contexts that are required), let's do a dirty hack here. 4126 */ 4127 if (iv->is_uploaded) { 4128 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) { 4129 device_printf(sc->sc_dev, 4130 "%s: failed to update MAC\n", __func__); 4131 goto out; 4132 } 4133 } else { 4134 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) { 4135 device_printf(sc->sc_dev, 4136 "%s: failed to add MAC\n", __func__); 4137 goto out; 4138 } 4139 } 4140 sc->sc_firmware_state = 1; 4141 4142 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 4143 in->in_ni.ni_chan, 1, 1)) != 0) { 4144 device_printf(sc->sc_dev, 4145 "%s: failed update phy ctxt\n", __func__); 4146 goto out; 4147 } 4148 iv->phy_ctxt = &sc->sc_phyctxt[0]; 4149 4150 if ((error = iwm_binding_add_vif(sc, iv)) != 0) { 4151 device_printf(sc->sc_dev, 4152 "%s: binding update cmd\n", __func__); 4153 goto out; 4154 } 4155 sc->sc_firmware_state = 2; 4156 /* 4157 * Authentication becomes unreliable when powersaving is left enabled 4158 * here. Powersaving will be activated again when association has 4159 * finished or is aborted. 4160 */ 4161 iv->ps_disabled = TRUE; 4162 error = iwm_power_update_mac(sc); 4163 iv->ps_disabled = FALSE; 4164 if (error != 0) { 4165 device_printf(sc->sc_dev, 4166 "%s: failed to update power management\n", 4167 __func__); 4168 goto out; 4169 } 4170 if ((error = iwm_add_sta(sc, in)) != 0) { 4171 device_printf(sc->sc_dev, 4172 "%s: failed to add sta\n", __func__); 4173 goto out; 4174 } 4175 sc->sc_firmware_state = 3; 4176 4177 /* 4178 * Prevent the FW from wandering off channel during association 4179 * by "protecting" the session with a time event. 4180 */ 4181 /* XXX duration is in units of TU, not MS */ 4182 duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS; 4183 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE); 4184 4185 error = 0; 4186 out: 4187 if (error != 0) 4188 iv->iv_auth = 0; 4189 ieee80211_free_node(ni); 4190 return (error); 4191 } 4192 4193 static struct ieee80211_node * 4194 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4195 { 4196 return malloc(sizeof (struct iwm_node), M_80211_NODE, 4197 M_NOWAIT | M_ZERO); 4198 } 4199 4200 static uint8_t 4201 iwm_rate_from_ucode_rate(uint32_t rate_n_flags) 4202 { 4203 uint8_t plcp = rate_n_flags & 0xff; 4204 int i; 4205 4206 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4207 if (iwm_rates[i].plcp == plcp) 4208 return iwm_rates[i].rate; 4209 } 4210 return 0; 4211 } 4212 4213 uint8_t 4214 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx) 4215 { 4216 int i; 4217 uint8_t rval; 4218 4219 for (i = 0; i < rs->rs_nrates; i++) { 4220 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 4221 if (rval == iwm_rates[ridx].rate) 4222 return rs->rs_rates[i]; 4223 } 4224 4225 return 0; 4226 } 4227 4228 static int 4229 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate) 4230 { 4231 int i; 4232 4233 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4234 if (iwm_rates[i].rate == rate) 4235 return i; 4236 } 4237 4238 device_printf(sc->sc_dev, 4239 "%s: WARNING: device rate for %u not found!\n", 4240 __func__, rate); 4241 4242 return -1; 4243 } 4244 4245 4246 static void 4247 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix) 4248 { 4249 struct ieee80211_node *ni = &in->in_ni; 4250 struct iwm_lq_cmd *lq = &in->in_lq; 4251 struct ieee80211_rateset *rs = &ni->ni_rates; 4252 int nrates = rs->rs_nrates; 4253 int i, ridx, tab = 0; 4254 // int txant = 0; 4255 4256 KASSERT(rix >= 0 && rix < nrates, ("invalid rix")); 4257 4258 if (nrates > nitems(lq->rs_table)) { 4259 device_printf(sc->sc_dev, 4260 "%s: node supports %d rates, driver handles " 4261 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4262 return; 4263 } 4264 if (nrates == 0) { 4265 device_printf(sc->sc_dev, 4266 "%s: node supports 0 rates, odd!\n", __func__); 4267 return; 4268 } 4269 nrates = imin(rix + 1, nrates); 4270 4271 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4272 "%s: nrates=%d\n", __func__, nrates); 4273 4274 /* then construct a lq_cmd based on those */ 4275 memset(lq, 0, sizeof(*lq)); 4276 lq->sta_id = IWM_STATION_ID; 4277 4278 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4279 if (ni->ni_flags & IEEE80211_NODE_HT) 4280 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4281 4282 /* 4283 * are these used? (we don't do SISO or MIMO) 4284 * need to set them to non-zero, though, or we get an error. 4285 */ 4286 lq->single_stream_ant_msk = 1; 4287 lq->dual_stream_ant_msk = 1; 4288 4289 /* 4290 * Build the actual rate selection table. 4291 * The lowest bits are the rates. Additionally, 4292 * CCK needs bit 9 to be set. The rest of the bits 4293 * we add to the table select the tx antenna 4294 * Note that we add the rates in the highest rate first 4295 * (opposite of ni_rates). 4296 */ 4297 for (i = 0; i < nrates; i++) { 4298 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL; 4299 int nextant; 4300 4301 /* Map 802.11 rate to HW rate index. */ 4302 ridx = iwm_rate2ridx(sc, rate); 4303 if (ridx == -1) 4304 continue; 4305 4306 #if 0 4307 if (txant == 0) 4308 txant = iwm_get_valid_tx_ant(sc); 4309 nextant = 1<<(ffs(txant)-1); 4310 txant &= ~nextant; 4311 #else 4312 nextant = iwm_get_valid_tx_ant(sc); 4313 #endif 4314 tab = iwm_rates[ridx].plcp; 4315 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4316 if (IWM_RIDX_IS_CCK(ridx)) 4317 tab |= IWM_RATE_MCS_CCK_MSK; 4318 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4319 "station rate i=%d, rate=%d, hw=%x\n", 4320 i, iwm_rates[ridx].rate, tab); 4321 lq->rs_table[i] = htole32(tab); 4322 } 4323 /* then fill the rest with the lowest possible rate */ 4324 for (i = nrates; i < nitems(lq->rs_table); i++) { 4325 KASSERT(tab != 0, ("invalid tab")); 4326 lq->rs_table[i] = htole32(tab); 4327 } 4328 } 4329 4330 static void 4331 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap) 4332 { 4333 struct iwm_vap *ivp = IWM_VAP(vap); 4334 int error; 4335 4336 /* Avoid Tx watchdog triggering, when transfers get dropped here. */ 4337 sc->sc_tx_timer = 0; 4338 4339 ivp->iv_auth = 0; 4340 if (sc->sc_firmware_state == 3) { 4341 iwm_xmit_queue_drain(sc); 4342 // iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC); 4343 error = iwm_rm_sta(sc, vap, TRUE); 4344 if (error) { 4345 device_printf(sc->sc_dev, 4346 "%s: Failed to remove station: %d\n", 4347 __func__, error); 4348 } 4349 } 4350 if (sc->sc_firmware_state == 3) { 4351 error = iwm_mac_ctxt_changed(sc, vap); 4352 if (error) { 4353 device_printf(sc->sc_dev, 4354 "%s: Failed to change mac context: %d\n", 4355 __func__, error); 4356 } 4357 } 4358 if (sc->sc_firmware_state == 3) { 4359 error = iwm_sf_update(sc, vap, FALSE); 4360 if (error) { 4361 device_printf(sc->sc_dev, 4362 "%s: Failed to update smart FIFO: %d\n", 4363 __func__, error); 4364 } 4365 } 4366 if (sc->sc_firmware_state == 3) { 4367 error = iwm_rm_sta_id(sc, vap); 4368 if (error) { 4369 device_printf(sc->sc_dev, 4370 "%s: Failed to remove station id: %d\n", 4371 __func__, error); 4372 } 4373 } 4374 if (sc->sc_firmware_state == 3) { 4375 error = iwm_update_quotas(sc, NULL); 4376 if (error) { 4377 device_printf(sc->sc_dev, 4378 "%s: Failed to update PHY quota: %d\n", 4379 __func__, error); 4380 } 4381 } 4382 if (sc->sc_firmware_state == 3) { 4383 /* XXX Might need to specify bssid correctly. */ 4384 error = iwm_mac_ctxt_changed(sc, vap); 4385 if (error) { 4386 device_printf(sc->sc_dev, 4387 "%s: Failed to change mac context: %d\n", 4388 __func__, error); 4389 } 4390 } 4391 if (sc->sc_firmware_state == 3) { 4392 sc->sc_firmware_state = 2; 4393 } 4394 if (sc->sc_firmware_state > 1) { 4395 error = iwm_binding_remove_vif(sc, ivp); 4396 if (error) { 4397 device_printf(sc->sc_dev, 4398 "%s: Failed to remove channel ctx: %d\n", 4399 __func__, error); 4400 } 4401 } 4402 if (sc->sc_firmware_state > 1) { 4403 sc->sc_firmware_state = 1; 4404 } 4405 ivp->phy_ctxt = NULL; 4406 if (sc->sc_firmware_state > 0) { 4407 error = iwm_mac_ctxt_changed(sc, vap); 4408 if (error) { 4409 device_printf(sc->sc_dev, 4410 "%s: Failed to change mac context: %d\n", 4411 __func__, error); 4412 } 4413 } 4414 if (sc->sc_firmware_state > 0) { 4415 error = iwm_power_update_mac(sc); 4416 if (error != 0) { 4417 device_printf(sc->sc_dev, 4418 "%s: failed to update power management\n", 4419 __func__); 4420 } 4421 } 4422 sc->sc_firmware_state = 0; 4423 } 4424 4425 static int 4426 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4427 { 4428 struct iwm_vap *ivp = IWM_VAP(vap); 4429 struct ieee80211com *ic = vap->iv_ic; 4430 struct iwm_softc *sc = ic->ic_softc; 4431 struct iwm_node *in; 4432 int error; 4433 4434 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4435 "switching state %s -> %s arg=0x%x\n", 4436 ieee80211_state_name[vap->iv_state], 4437 ieee80211_state_name[nstate], 4438 arg); 4439 4440 IEEE80211_UNLOCK(ic); 4441 IWM_LOCK(sc); 4442 4443 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) && 4444 (nstate == IEEE80211_S_AUTH || 4445 nstate == IEEE80211_S_ASSOC || 4446 nstate == IEEE80211_S_RUN)) { 4447 /* Stop blinking for a scan, when authenticating. */ 4448 iwm_led_blink_stop(sc); 4449 } 4450 4451 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 4452 iwm_led_disable(sc); 4453 /* disable beacon filtering if we're hopping out of RUN */ 4454 iwm_disable_beacon_filter(sc); 4455 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4456 in->in_assoc = 0; 4457 } 4458 4459 if ((vap->iv_state == IEEE80211_S_AUTH || 4460 vap->iv_state == IEEE80211_S_ASSOC || 4461 vap->iv_state == IEEE80211_S_RUN) && 4462 (nstate == IEEE80211_S_INIT || 4463 nstate == IEEE80211_S_SCAN || 4464 nstate == IEEE80211_S_AUTH)) { 4465 iwm_stop_session_protection(sc, ivp); 4466 } 4467 4468 if ((vap->iv_state == IEEE80211_S_RUN || 4469 vap->iv_state == IEEE80211_S_ASSOC) && 4470 nstate == IEEE80211_S_INIT) { 4471 /* 4472 * In this case, iv_newstate() wants to send an 80211 frame on 4473 * the network that we are leaving. So we need to call it, 4474 * before tearing down all the firmware state. 4475 */ 4476 IWM_UNLOCK(sc); 4477 IEEE80211_LOCK(ic); 4478 ivp->iv_newstate(vap, nstate, arg); 4479 IEEE80211_UNLOCK(ic); 4480 IWM_LOCK(sc); 4481 iwm_bring_down_firmware(sc, vap); 4482 IWM_UNLOCK(sc); 4483 IEEE80211_LOCK(ic); 4484 return 0; 4485 } 4486 4487 switch (nstate) { 4488 case IEEE80211_S_INIT: 4489 case IEEE80211_S_SCAN: 4490 break; 4491 4492 case IEEE80211_S_AUTH: 4493 iwm_bring_down_firmware(sc, vap); 4494 if ((error = iwm_auth(vap, sc)) != 0) { 4495 device_printf(sc->sc_dev, 4496 "%s: could not move to auth state: %d\n", 4497 __func__, error); 4498 iwm_bring_down_firmware(sc, vap); 4499 IWM_UNLOCK(sc); 4500 IEEE80211_LOCK(ic); 4501 return 1; 4502 } 4503 break; 4504 4505 case IEEE80211_S_ASSOC: 4506 /* 4507 * EBS may be disabled due to previous failures reported by FW. 4508 * Reset EBS status here assuming environment has been changed. 4509 */ 4510 sc->last_ebs_successful = TRUE; 4511 break; 4512 4513 case IEEE80211_S_RUN: 4514 in = IWM_NODE(vap->iv_bss); 4515 /* Update the association state, now we have it all */ 4516 /* (eg associd comes in at this point */ 4517 error = iwm_update_sta(sc, in); 4518 if (error != 0) { 4519 device_printf(sc->sc_dev, 4520 "%s: failed to update STA\n", __func__); 4521 IWM_UNLOCK(sc); 4522 IEEE80211_LOCK(ic); 4523 return error; 4524 } 4525 in->in_assoc = 1; 4526 error = iwm_mac_ctxt_changed(sc, vap); 4527 if (error != 0) { 4528 device_printf(sc->sc_dev, 4529 "%s: failed to update MAC: %d\n", __func__, error); 4530 } 4531 4532 iwm_sf_update(sc, vap, FALSE); 4533 iwm_enable_beacon_filter(sc, ivp); 4534 iwm_power_update_mac(sc); 4535 iwm_update_quotas(sc, ivp); 4536 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0); 4537 iwm_setrates(sc, in, rix); 4538 4539 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) { 4540 device_printf(sc->sc_dev, 4541 "%s: IWM_LQ_CMD failed: %d\n", __func__, error); 4542 } 4543 4544 iwm_led_enable(sc); 4545 break; 4546 4547 default: 4548 break; 4549 } 4550 IWM_UNLOCK(sc); 4551 IEEE80211_LOCK(ic); 4552 4553 return (ivp->iv_newstate(vap, nstate, arg)); 4554 } 4555 4556 void 4557 iwm_endscan_cb(void *arg, int pending) 4558 { 4559 struct iwm_softc *sc = arg; 4560 struct ieee80211com *ic = &sc->sc_ic; 4561 4562 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4563 "%s: scan ended\n", 4564 __func__); 4565 4566 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4567 } 4568 4569 static int 4570 iwm_send_bt_init_conf(struct iwm_softc *sc) 4571 { 4572 struct iwm_bt_coex_cmd bt_cmd; 4573 4574 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4575 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4576 4577 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4578 &bt_cmd); 4579 } 4580 4581 static boolean_t 4582 iwm_is_lar_supported(struct iwm_softc *sc) 4583 { 4584 boolean_t nvm_lar = sc->nvm_data->lar_enabled; 4585 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT); 4586 4587 if (iwm_lar_disable) 4588 return FALSE; 4589 4590 /* 4591 * Enable LAR only if it is supported by the FW (TLV) && 4592 * enabled in the NVM 4593 */ 4594 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) 4595 return nvm_lar && tlv_lar; 4596 else 4597 return tlv_lar; 4598 } 4599 4600 static boolean_t 4601 iwm_is_wifi_mcc_supported(struct iwm_softc *sc) 4602 { 4603 return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4604 iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC); 4605 } 4606 4607 static int 4608 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4609 { 4610 struct iwm_mcc_update_cmd mcc_cmd; 4611 struct iwm_host_cmd hcmd = { 4612 .id = IWM_MCC_UPDATE_CMD, 4613 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4614 .data = { &mcc_cmd }, 4615 }; 4616 int ret; 4617 #ifdef IWM_DEBUG 4618 struct iwm_rx_packet *pkt; 4619 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4620 struct iwm_mcc_update_resp_v2 *mcc_resp; 4621 int n_channels; 4622 uint16_t mcc; 4623 #endif 4624 int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4625 4626 if (!iwm_is_lar_supported(sc)) { 4627 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n", 4628 __func__); 4629 return 0; 4630 } 4631 4632 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4633 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4634 if (iwm_is_wifi_mcc_supported(sc)) 4635 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4636 else 4637 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4638 4639 if (resp_v2) 4640 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4641 else 4642 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4643 4644 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4645 "send MCC update to FW with '%c%c' src = %d\n", 4646 alpha2[0], alpha2[1], mcc_cmd.source_id); 4647 4648 ret = iwm_send_cmd(sc, &hcmd); 4649 if (ret) 4650 return ret; 4651 4652 #ifdef IWM_DEBUG 4653 pkt = hcmd.resp_pkt; 4654 4655 /* Extract MCC response */ 4656 if (resp_v2) { 4657 mcc_resp = (void *)pkt->data; 4658 mcc = mcc_resp->mcc; 4659 n_channels = le32toh(mcc_resp->n_channels); 4660 } else { 4661 mcc_resp_v1 = (void *)pkt->data; 4662 mcc = mcc_resp_v1->mcc; 4663 n_channels = le32toh(mcc_resp_v1->n_channels); 4664 } 4665 4666 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4667 if (mcc == 0) 4668 mcc = 0x3030; /* "00" - world */ 4669 4670 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4671 "regulatory domain '%c%c' (%d channels available)\n", 4672 mcc >> 8, mcc & 0xff, n_channels); 4673 #endif 4674 iwm_free_resp(sc, &hcmd); 4675 4676 return 0; 4677 } 4678 4679 static void 4680 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4681 { 4682 struct iwm_host_cmd cmd = { 4683 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4684 .len = { sizeof(uint32_t), }, 4685 .data = { &backoff, }, 4686 }; 4687 4688 if (iwm_send_cmd(sc, &cmd) != 0) { 4689 device_printf(sc->sc_dev, 4690 "failed to change thermal tx backoff\n"); 4691 } 4692 } 4693 4694 static int 4695 iwm_init_hw(struct iwm_softc *sc) 4696 { 4697 struct ieee80211com *ic = &sc->sc_ic; 4698 int error, i, ac; 4699 4700 sc->sf_state = IWM_SF_UNINIT; 4701 4702 if ((error = iwm_start_hw(sc)) != 0) { 4703 printf("iwm_start_hw: failed %d\n", error); 4704 return error; 4705 } 4706 4707 if ((error = iwm_run_init_ucode(sc, 0)) != 0) { 4708 printf("iwm_run_init_ucode: failed %d\n", error); 4709 return error; 4710 } 4711 4712 /* 4713 * should stop and start HW since that INIT 4714 * image just loaded 4715 */ 4716 iwm_stop_device(sc); 4717 sc->sc_ps_disabled = FALSE; 4718 if ((error = iwm_start_hw(sc)) != 0) { 4719 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4720 return error; 4721 } 4722 4723 /* omstart, this time with the regular firmware */ 4724 error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR); 4725 if (error) { 4726 device_printf(sc->sc_dev, "could not load firmware\n"); 4727 goto error; 4728 } 4729 4730 error = iwm_sf_update(sc, NULL, FALSE); 4731 if (error) 4732 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n"); 4733 4734 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4735 device_printf(sc->sc_dev, "bt init conf failed\n"); 4736 goto error; 4737 } 4738 4739 error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc)); 4740 if (error != 0) { 4741 device_printf(sc->sc_dev, "antenna config failed\n"); 4742 goto error; 4743 } 4744 4745 /* Send phy db control command and then phy db calibration */ 4746 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0) 4747 goto error; 4748 4749 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4750 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4751 goto error; 4752 } 4753 4754 /* Add auxiliary station for scanning */ 4755 if ((error = iwm_add_aux_sta(sc)) != 0) { 4756 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4757 goto error; 4758 } 4759 4760 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4761 /* 4762 * The channel used here isn't relevant as it's 4763 * going to be overwritten in the other flows. 4764 * For now use the first channel we have. 4765 */ 4766 if ((error = iwm_phy_ctxt_add(sc, 4767 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4768 goto error; 4769 } 4770 4771 /* Initialize tx backoffs to the minimum. */ 4772 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 4773 iwm_tt_tx_backoff(sc, 0); 4774 4775 if (iwm_config_ltr(sc) != 0) 4776 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n"); 4777 4778 error = iwm_power_update_device(sc); 4779 if (error) 4780 goto error; 4781 4782 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4783 goto error; 4784 4785 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4786 if ((error = iwm_config_umac_scan(sc)) != 0) 4787 goto error; 4788 } 4789 4790 /* Enable Tx queues. */ 4791 for (ac = 0; ac < WME_NUM_AC; ac++) { 4792 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4793 iwm_ac_to_tx_fifo[ac]); 4794 if (error) 4795 goto error; 4796 } 4797 4798 if ((error = iwm_disable_beacon_filter(sc)) != 0) { 4799 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4800 goto error; 4801 } 4802 4803 return 0; 4804 4805 error: 4806 iwm_stop_device(sc); 4807 return error; 4808 } 4809 4810 /* Allow multicast from our BSSID. */ 4811 static int 4812 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4813 { 4814 struct ieee80211_node *ni = vap->iv_bss; 4815 struct iwm_mcast_filter_cmd *cmd; 4816 size_t size; 4817 int error; 4818 4819 size = roundup(sizeof(*cmd), 4); 4820 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 4821 if (cmd == NULL) 4822 return ENOMEM; 4823 cmd->filter_own = 1; 4824 cmd->port_id = 0; 4825 cmd->count = 0; 4826 cmd->pass_all = 1; 4827 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4828 4829 error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4830 IWM_CMD_SYNC, size, cmd); 4831 free(cmd, M_DEVBUF); 4832 4833 return (error); 4834 } 4835 4836 /* 4837 * ifnet interfaces 4838 */ 4839 4840 static void 4841 iwm_init(struct iwm_softc *sc) 4842 { 4843 int error; 4844 4845 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4846 return; 4847 } 4848 sc->sc_generation++; 4849 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4850 4851 if ((error = iwm_init_hw(sc)) != 0) { 4852 printf("iwm_init_hw failed %d\n", error); 4853 iwm_stop(sc); 4854 return; 4855 } 4856 4857 /* 4858 * Ok, firmware loaded and we are jogging 4859 */ 4860 sc->sc_flags |= IWM_FLAG_HW_INITED; 4861 } 4862 4863 static int 4864 iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4865 { 4866 struct iwm_softc *sc; 4867 int error; 4868 4869 sc = ic->ic_softc; 4870 4871 IWM_LOCK(sc); 4872 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4873 IWM_UNLOCK(sc); 4874 return (ENXIO); 4875 } 4876 error = mbufq_enqueue(&sc->sc_snd, m); 4877 if (error) { 4878 IWM_UNLOCK(sc); 4879 return (error); 4880 } 4881 iwm_start(sc); 4882 IWM_UNLOCK(sc); 4883 return (0); 4884 } 4885 4886 /* 4887 * Dequeue packets from sendq and call send. 4888 */ 4889 static void 4890 iwm_start(struct iwm_softc *sc) 4891 { 4892 struct ieee80211_node *ni; 4893 struct mbuf *m; 4894 int ac = 0; 4895 4896 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4897 while (sc->qfullmsk == 0 && 4898 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4899 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4900 if (iwm_tx(sc, m, ni, ac) != 0) { 4901 if_inc_counter(ni->ni_vap->iv_ifp, 4902 IFCOUNTER_OERRORS, 1); 4903 ieee80211_free_node(ni); 4904 continue; 4905 } 4906 if (sc->sc_tx_timer == 0) { 4907 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, 4908 sc); 4909 } 4910 sc->sc_tx_timer = 15; 4911 } 4912 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4913 } 4914 4915 static void 4916 iwm_stop(struct iwm_softc *sc) 4917 { 4918 4919 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 4920 sc->sc_flags |= IWM_FLAG_STOPPED; 4921 sc->sc_generation++; 4922 iwm_led_blink_stop(sc); 4923 sc->sc_tx_timer = 0; 4924 iwm_stop_device(sc); 4925 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 4926 } 4927 4928 static void 4929 iwm_watchdog(void *arg) 4930 { 4931 struct iwm_softc *sc = arg; 4932 struct ieee80211com *ic = &sc->sc_ic; 4933 4934 if (sc->sc_attached == 0) 4935 return; 4936 4937 if (sc->sc_tx_timer > 0) { 4938 if (--sc->sc_tx_timer == 0) { 4939 device_printf(sc->sc_dev, "device timeout\n"); 4940 #ifdef IWM_DEBUG 4941 iwm_nic_error(sc); 4942 #endif 4943 ieee80211_restart_all(ic); 4944 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4945 return; 4946 } 4947 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4948 } 4949 } 4950 4951 static void 4952 iwm_parent(struct ieee80211com *ic) 4953 { 4954 struct iwm_softc *sc = ic->ic_softc; 4955 int startall = 0; 4956 int rfkill = 0; 4957 4958 IWM_LOCK(sc); 4959 if (ic->ic_nrunning > 0) { 4960 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 4961 iwm_init(sc); 4962 rfkill = iwm_check_rfkill(sc); 4963 if (!rfkill) 4964 startall = 1; 4965 } 4966 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 4967 iwm_stop(sc); 4968 IWM_UNLOCK(sc); 4969 if (startall) 4970 ieee80211_start_all(ic); 4971 else if (rfkill) 4972 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 4973 } 4974 4975 static void 4976 iwm_rftoggle_task(void *arg, int npending __unused) 4977 { 4978 struct iwm_softc *sc = arg; 4979 struct ieee80211com *ic = &sc->sc_ic; 4980 int rfkill; 4981 4982 IWM_LOCK(sc); 4983 rfkill = iwm_check_rfkill(sc); 4984 IWM_UNLOCK(sc); 4985 if (rfkill) { 4986 device_printf(sc->sc_dev, 4987 "%s: rfkill switch, disabling interface\n", __func__); 4988 ieee80211_suspend_all(ic); 4989 ieee80211_notify_radio(ic, 0); 4990 } else { 4991 device_printf(sc->sc_dev, 4992 "%s: rfkill cleared, re-enabling interface\n", __func__); 4993 ieee80211_resume_all(ic); 4994 ieee80211_notify_radio(ic, 1); 4995 } 4996 } 4997 4998 /* 4999 * The interrupt side of things 5000 */ 5001 5002 /* 5003 * error dumping routines are from iwlwifi/mvm/utils.c 5004 */ 5005 5006 /* 5007 * Note: This structure is read from the device with IO accesses, 5008 * and the reading already does the endian conversion. As it is 5009 * read with uint32_t-sized accesses, any members with a different size 5010 * need to be ordered correctly though! 5011 */ 5012 struct iwm_error_event_table { 5013 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5014 uint32_t error_id; /* type of error */ 5015 uint32_t trm_hw_status0; /* TRM HW status */ 5016 uint32_t trm_hw_status1; /* TRM HW status */ 5017 uint32_t blink2; /* branch link */ 5018 uint32_t ilink1; /* interrupt link */ 5019 uint32_t ilink2; /* interrupt link */ 5020 uint32_t data1; /* error-specific data */ 5021 uint32_t data2; /* error-specific data */ 5022 uint32_t data3; /* error-specific data */ 5023 uint32_t bcon_time; /* beacon timer */ 5024 uint32_t tsf_low; /* network timestamp function timer */ 5025 uint32_t tsf_hi; /* network timestamp function timer */ 5026 uint32_t gp1; /* GP1 timer register */ 5027 uint32_t gp2; /* GP2 timer register */ 5028 uint32_t fw_rev_type; /* firmware revision type */ 5029 uint32_t major; /* uCode version major */ 5030 uint32_t minor; /* uCode version minor */ 5031 uint32_t hw_ver; /* HW Silicon version */ 5032 uint32_t brd_ver; /* HW board version */ 5033 uint32_t log_pc; /* log program counter */ 5034 uint32_t frame_ptr; /* frame pointer */ 5035 uint32_t stack_ptr; /* stack pointer */ 5036 uint32_t hcmd; /* last host command header */ 5037 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 5038 * rxtx_flag */ 5039 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 5040 * host_flag */ 5041 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 5042 * enc_flag */ 5043 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 5044 * time_flag */ 5045 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 5046 * wico interrupt */ 5047 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 5048 uint32_t wait_event; /* wait event() caller address */ 5049 uint32_t l2p_control; /* L2pControlField */ 5050 uint32_t l2p_duration; /* L2pDurationField */ 5051 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 5052 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 5053 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 5054 * (LMPM_PMG_SEL) */ 5055 uint32_t u_timestamp; /* indicate when the date and time of the 5056 * compilation */ 5057 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 5058 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 5059 5060 /* 5061 * UMAC error struct - relevant starting from family 8000 chip. 5062 * Note: This structure is read from the device with IO accesses, 5063 * and the reading already does the endian conversion. As it is 5064 * read with u32-sized accesses, any members with a different size 5065 * need to be ordered correctly though! 5066 */ 5067 struct iwm_umac_error_event_table { 5068 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5069 uint32_t error_id; /* type of error */ 5070 uint32_t blink1; /* branch link */ 5071 uint32_t blink2; /* branch link */ 5072 uint32_t ilink1; /* interrupt link */ 5073 uint32_t ilink2; /* interrupt link */ 5074 uint32_t data1; /* error-specific data */ 5075 uint32_t data2; /* error-specific data */ 5076 uint32_t data3; /* error-specific data */ 5077 uint32_t umac_major; 5078 uint32_t umac_minor; 5079 uint32_t frame_pointer; /* core register 27*/ 5080 uint32_t stack_pointer; /* core register 28 */ 5081 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 5082 uint32_t nic_isr_pref; /* ISR status register */ 5083 } __packed; 5084 5085 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 5086 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 5087 5088 #ifdef IWM_DEBUG 5089 struct { 5090 const char *name; 5091 uint8_t num; 5092 } advanced_lookup[] = { 5093 { "NMI_INTERRUPT_WDG", 0x34 }, 5094 { "SYSASSERT", 0x35 }, 5095 { "UCODE_VERSION_MISMATCH", 0x37 }, 5096 { "BAD_COMMAND", 0x38 }, 5097 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 5098 { "FATAL_ERROR", 0x3D }, 5099 { "NMI_TRM_HW_ERR", 0x46 }, 5100 { "NMI_INTERRUPT_TRM", 0x4C }, 5101 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 5102 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 5103 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 5104 { "NMI_INTERRUPT_HOST", 0x66 }, 5105 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 5106 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 5107 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 5108 { "ADVANCED_SYSASSERT", 0 }, 5109 }; 5110 5111 static const char * 5112 iwm_desc_lookup(uint32_t num) 5113 { 5114 int i; 5115 5116 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 5117 if (advanced_lookup[i].num == num) 5118 return advanced_lookup[i].name; 5119 5120 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 5121 return advanced_lookup[i].name; 5122 } 5123 5124 static void 5125 iwm_nic_umac_error(struct iwm_softc *sc) 5126 { 5127 struct iwm_umac_error_event_table table; 5128 uint32_t base; 5129 5130 base = sc->umac_error_event_table; 5131 5132 if (base < 0x800000) { 5133 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 5134 base); 5135 return; 5136 } 5137 5138 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5139 device_printf(sc->sc_dev, "reading errlog failed\n"); 5140 return; 5141 } 5142 5143 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5144 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 5145 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5146 sc->sc_flags, table.valid); 5147 } 5148 5149 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 5150 iwm_desc_lookup(table.error_id)); 5151 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 5152 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 5153 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 5154 table.ilink1); 5155 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 5156 table.ilink2); 5157 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 5158 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 5159 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 5160 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 5161 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 5162 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 5163 table.frame_pointer); 5164 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 5165 table.stack_pointer); 5166 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 5167 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 5168 table.nic_isr_pref); 5169 } 5170 5171 /* 5172 * Support for dumping the error log seemed like a good idea ... 5173 * but it's mostly hex junk and the only sensible thing is the 5174 * hw/ucode revision (which we know anyway). Since it's here, 5175 * I'll just leave it in, just in case e.g. the Intel guys want to 5176 * help us decipher some "ADVANCED_SYSASSERT" later. 5177 */ 5178 static void 5179 iwm_nic_error(struct iwm_softc *sc) 5180 { 5181 struct iwm_error_event_table table; 5182 uint32_t base; 5183 5184 device_printf(sc->sc_dev, "dumping device error log\n"); 5185 base = sc->error_event_table[0]; 5186 if (base < 0x800000) { 5187 device_printf(sc->sc_dev, 5188 "Invalid error log pointer 0x%08x\n", base); 5189 return; 5190 } 5191 5192 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5193 device_printf(sc->sc_dev, "reading errlog failed\n"); 5194 return; 5195 } 5196 5197 if (!table.valid) { 5198 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5199 return; 5200 } 5201 5202 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5203 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5204 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5205 sc->sc_flags, table.valid); 5206 } 5207 5208 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5209 iwm_desc_lookup(table.error_id)); 5210 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5211 table.trm_hw_status0); 5212 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5213 table.trm_hw_status1); 5214 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5215 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5216 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5217 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5218 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5219 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5220 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5221 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5222 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5223 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5224 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5225 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5226 table.fw_rev_type); 5227 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5228 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5229 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5230 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5231 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5232 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5233 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5234 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5235 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5236 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5237 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5238 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5239 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5240 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5241 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5242 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5243 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5244 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5245 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5246 5247 if (sc->umac_error_event_table) 5248 iwm_nic_umac_error(sc); 5249 } 5250 #endif 5251 5252 static void 5253 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m) 5254 { 5255 struct ieee80211com *ic = &sc->sc_ic; 5256 struct iwm_cmd_response *cresp; 5257 struct mbuf *m1; 5258 uint32_t offset = 0; 5259 uint32_t maxoff = IWM_RBUF_SIZE; 5260 uint32_t nextoff; 5261 boolean_t stolen = FALSE; 5262 5263 #define HAVEROOM(a) \ 5264 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff) 5265 5266 while (HAVEROOM(offset)) { 5267 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, 5268 offset); 5269 int qid, idx, code, len; 5270 5271 qid = pkt->hdr.qid; 5272 idx = pkt->hdr.idx; 5273 5274 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5275 5276 /* 5277 * randomly get these from the firmware, no idea why. 5278 * they at least seem harmless, so just ignore them for now 5279 */ 5280 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) || 5281 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) { 5282 break; 5283 } 5284 5285 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5286 "rx packet qid=%d idx=%d type=%x\n", 5287 qid & ~0x80, pkt->hdr.idx, code); 5288 5289 len = iwm_rx_packet_len(pkt); 5290 len += sizeof(uint32_t); /* account for status word */ 5291 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN); 5292 5293 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt); 5294 5295 switch (code) { 5296 case IWM_REPLY_RX_PHY_CMD: 5297 iwm_rx_rx_phy_cmd(sc, pkt); 5298 break; 5299 5300 case IWM_REPLY_RX_MPDU_CMD: { 5301 /* 5302 * If this is the last frame in the RX buffer, we 5303 * can directly feed the mbuf to the sharks here. 5304 */ 5305 struct iwm_rx_packet *nextpkt = mtodoff(m, 5306 struct iwm_rx_packet *, nextoff); 5307 if (!HAVEROOM(nextoff) || 5308 (nextpkt->hdr.code == 0 && 5309 (nextpkt->hdr.qid & ~0x80) == 0 && 5310 nextpkt->hdr.idx == 0) || 5311 (nextpkt->len_n_flags == 5312 htole32(IWM_FH_RSCSR_FRAME_INVALID))) { 5313 if (iwm_rx_mpdu(sc, m, offset, stolen)) { 5314 stolen = FALSE; 5315 /* Make sure we abort the loop */ 5316 nextoff = maxoff; 5317 } 5318 break; 5319 } 5320 5321 /* 5322 * Use m_copym instead of m_split, because that 5323 * makes it easier to keep a valid rx buffer in 5324 * the ring, when iwm_rx_mpdu() fails. 5325 * 5326 * We need to start m_copym() at offset 0, to get the 5327 * M_PKTHDR flag preserved. 5328 */ 5329 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 5330 if (m1) { 5331 if (iwm_rx_mpdu(sc, m1, offset, stolen)) 5332 stolen = TRUE; 5333 else 5334 m_freem(m1); 5335 } 5336 break; 5337 } 5338 5339 case IWM_TX_CMD: 5340 iwm_rx_tx_cmd(sc, pkt); 5341 break; 5342 5343 case IWM_MISSED_BEACONS_NOTIFICATION: { 5344 struct iwm_missed_beacons_notif *resp; 5345 int missed; 5346 5347 /* XXX look at mac_id to determine interface ID */ 5348 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5349 5350 resp = (void *)pkt->data; 5351 missed = le32toh(resp->consec_missed_beacons); 5352 5353 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5354 "%s: MISSED_BEACON: mac_id=%d, " 5355 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5356 "num_rx=%d\n", 5357 __func__, 5358 le32toh(resp->mac_id), 5359 le32toh(resp->consec_missed_beacons_since_last_rx), 5360 le32toh(resp->consec_missed_beacons), 5361 le32toh(resp->num_expected_beacons), 5362 le32toh(resp->num_recvd_beacons)); 5363 5364 /* Be paranoid */ 5365 if (vap == NULL) 5366 break; 5367 5368 /* XXX no net80211 locking? */ 5369 if (vap->iv_state == IEEE80211_S_RUN && 5370 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5371 if (missed > vap->iv_bmissthreshold) { 5372 /* XXX bad locking; turn into task */ 5373 IWM_UNLOCK(sc); 5374 ieee80211_beacon_miss(ic); 5375 IWM_LOCK(sc); 5376 } 5377 } 5378 5379 break; 5380 } 5381 5382 case IWM_MFUART_LOAD_NOTIFICATION: 5383 break; 5384 5385 case IWM_ALIVE: 5386 break; 5387 5388 case IWM_CALIB_RES_NOTIF_PHY_DB: 5389 break; 5390 5391 case IWM_STATISTICS_NOTIFICATION: 5392 iwm_handle_rx_statistics(sc, pkt); 5393 break; 5394 5395 case IWM_NVM_ACCESS_CMD: 5396 case IWM_MCC_UPDATE_CMD: 5397 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5398 memcpy(sc->sc_cmd_resp, 5399 pkt, sizeof(sc->sc_cmd_resp)); 5400 } 5401 break; 5402 5403 case IWM_MCC_CHUB_UPDATE_CMD: { 5404 struct iwm_mcc_chub_notif *notif; 5405 notif = (void *)pkt->data; 5406 5407 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5408 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5409 sc->sc_fw_mcc[2] = '\0'; 5410 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 5411 "fw source %d sent CC '%s'\n", 5412 notif->source_id, sc->sc_fw_mcc); 5413 break; 5414 } 5415 5416 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5417 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP, 5418 IWM_DTS_MEASUREMENT_NOTIF_WIDE): { 5419 struct iwm_dts_measurement_notif_v1 *notif; 5420 5421 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) { 5422 device_printf(sc->sc_dev, 5423 "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 5424 break; 5425 } 5426 notif = (void *)pkt->data; 5427 IWM_DPRINTF(sc, IWM_DEBUG_TEMP, 5428 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n", 5429 notif->temp); 5430 break; 5431 } 5432 5433 case IWM_PHY_CONFIGURATION_CMD: 5434 case IWM_TX_ANT_CONFIGURATION_CMD: 5435 case IWM_ADD_STA: 5436 case IWM_MAC_CONTEXT_CMD: 5437 case IWM_REPLY_SF_CFG_CMD: 5438 case IWM_POWER_TABLE_CMD: 5439 case IWM_LTR_CONFIG: 5440 case IWM_PHY_CONTEXT_CMD: 5441 case IWM_BINDING_CONTEXT_CMD: 5442 case IWM_TIME_EVENT_CMD: 5443 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5444 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5445 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC): 5446 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5447 case IWM_SCAN_OFFLOAD_ABORT_CMD: 5448 case IWM_REPLY_BEACON_FILTERING_CMD: 5449 case IWM_MAC_PM_POWER_TABLE: 5450 case IWM_TIME_QUOTA_CMD: 5451 case IWM_REMOVE_STA: 5452 case IWM_TXPATH_FLUSH: 5453 case IWM_LQ_CMD: 5454 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, 5455 IWM_FW_PAGING_BLOCK_CMD): 5456 case IWM_BT_CONFIG: 5457 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5458 cresp = (void *)pkt->data; 5459 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5460 memcpy(sc->sc_cmd_resp, 5461 pkt, sizeof(*pkt)+sizeof(*cresp)); 5462 } 5463 break; 5464 5465 /* ignore */ 5466 case IWM_PHY_DB_CMD: 5467 break; 5468 5469 case IWM_INIT_COMPLETE_NOTIF: 5470 break; 5471 5472 case IWM_SCAN_OFFLOAD_COMPLETE: 5473 iwm_rx_lmac_scan_complete_notif(sc, pkt); 5474 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5475 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5476 ieee80211_runtask(ic, &sc->sc_es_task); 5477 } 5478 break; 5479 5480 case IWM_SCAN_ITERATION_COMPLETE: { 5481 break; 5482 } 5483 5484 case IWM_SCAN_COMPLETE_UMAC: 5485 iwm_rx_umac_scan_complete_notif(sc, pkt); 5486 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5487 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5488 ieee80211_runtask(ic, &sc->sc_es_task); 5489 } 5490 break; 5491 5492 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5493 #ifdef IWM_DEBUG 5494 struct iwm_umac_scan_iter_complete_notif *notif; 5495 notif = (void *)pkt->data; 5496 5497 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5498 "complete, status=0x%x, %d channels scanned\n", 5499 notif->status, notif->scanned_channels); 5500 #endif 5501 break; 5502 } 5503 5504 case IWM_REPLY_ERROR: { 5505 struct iwm_error_resp *resp; 5506 resp = (void *)pkt->data; 5507 5508 device_printf(sc->sc_dev, 5509 "firmware error 0x%x, cmd 0x%x\n", 5510 le32toh(resp->error_type), 5511 resp->cmd_id); 5512 break; 5513 } 5514 5515 case IWM_TIME_EVENT_NOTIFICATION: 5516 iwm_rx_time_event_notif(sc, pkt); 5517 break; 5518 5519 /* 5520 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 5521 * messages. Just ignore them for now. 5522 */ 5523 case IWM_DEBUG_LOG_MSG: 5524 break; 5525 5526 case IWM_MCAST_FILTER_CMD: 5527 break; 5528 5529 case IWM_SCD_QUEUE_CFG: { 5530 #ifdef IWM_DEBUG 5531 struct iwm_scd_txq_cfg_rsp *rsp; 5532 rsp = (void *)pkt->data; 5533 5534 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5535 "queue cfg token=0x%x sta_id=%d " 5536 "tid=%d scd_queue=%d\n", 5537 rsp->token, rsp->sta_id, rsp->tid, 5538 rsp->scd_queue); 5539 #endif 5540 break; 5541 } 5542 5543 default: 5544 device_printf(sc->sc_dev, 5545 "code %x, frame %d/%d %x unhandled\n", 5546 code, qid & ~0x80, idx, pkt->len_n_flags); 5547 break; 5548 } 5549 5550 /* 5551 * Why test bit 0x80? The Linux driver: 5552 * 5553 * There is one exception: uCode sets bit 15 when it 5554 * originates the response/notification, i.e. when the 5555 * response/notification is not a direct response to a 5556 * command sent by the driver. For example, uCode issues 5557 * IWM_REPLY_RX when it sends a received frame to the driver; 5558 * it is not a direct response to any driver command. 5559 * 5560 * Ok, so since when is 7 == 15? Well, the Linux driver 5561 * uses a slightly different format for pkt->hdr, and "qid" 5562 * is actually the upper byte of a two-byte field. 5563 */ 5564 if (!(qid & (1 << 7))) 5565 iwm_cmd_done(sc, pkt); 5566 5567 offset = nextoff; 5568 } 5569 if (stolen) 5570 m_freem(m); 5571 #undef HAVEROOM 5572 } 5573 5574 /* 5575 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5576 * Basic structure from if_iwn 5577 */ 5578 static void 5579 iwm_notif_intr(struct iwm_softc *sc) 5580 { 5581 int count; 5582 uint32_t wreg; 5583 uint16_t hw; 5584 5585 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5586 BUS_DMASYNC_POSTREAD); 5587 5588 if (sc->cfg->mqrx_supported) { 5589 count = IWM_RX_MQ_RING_COUNT; 5590 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG; 5591 } else { 5592 count = IWM_RX_LEGACY_RING_COUNT; 5593 wreg = IWM_FH_RSCSR_CHNL0_WPTR; 5594 } 5595 5596 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5597 5598 /* 5599 * Process responses 5600 */ 5601 while (sc->rxq.cur != hw) { 5602 struct iwm_rx_ring *ring = &sc->rxq; 5603 struct iwm_rx_data *data = &ring->data[ring->cur]; 5604 5605 bus_dmamap_sync(ring->data_dmat, data->map, 5606 BUS_DMASYNC_POSTREAD); 5607 5608 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5609 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur); 5610 iwm_handle_rxb(sc, data->m); 5611 5612 ring->cur = (ring->cur + 1) % count; 5613 } 5614 5615 /* 5616 * Tell the firmware that it can reuse the ring entries that 5617 * we have just processed. 5618 * Seems like the hardware gets upset unless we align 5619 * the write by 8?? 5620 */ 5621 hw = (hw == 0) ? count - 1 : hw - 1; 5622 IWM_WRITE(sc, wreg, rounddown2(hw, 8)); 5623 } 5624 5625 static void 5626 iwm_intr(void *arg) 5627 { 5628 struct iwm_softc *sc = arg; 5629 int handled = 0; 5630 int r1, r2; 5631 int isperiodic = 0; 5632 5633 IWM_LOCK(sc); 5634 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5635 5636 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5637 uint32_t *ict = sc->ict_dma.vaddr; 5638 int tmp; 5639 5640 tmp = htole32(ict[sc->ict_cur]); 5641 if (!tmp) 5642 goto out_ena; 5643 5644 /* 5645 * ok, there was something. keep plowing until we have all. 5646 */ 5647 r1 = r2 = 0; 5648 while (tmp) { 5649 r1 |= tmp; 5650 ict[sc->ict_cur] = 0; 5651 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5652 tmp = htole32(ict[sc->ict_cur]); 5653 } 5654 5655 /* this is where the fun begins. don't ask */ 5656 if (r1 == 0xffffffff) 5657 r1 = 0; 5658 5659 /* i am not expected to understand this */ 5660 if (r1 & 0xc0000) 5661 r1 |= 0x8000; 5662 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5663 } else { 5664 r1 = IWM_READ(sc, IWM_CSR_INT); 5665 /* "hardware gone" (where, fishing?) */ 5666 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5667 goto out; 5668 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5669 } 5670 if (r1 == 0 && r2 == 0) { 5671 goto out_ena; 5672 } 5673 5674 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5675 5676 /* Safely ignore these bits for debug checks below */ 5677 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD); 5678 5679 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5680 int i; 5681 struct ieee80211com *ic = &sc->sc_ic; 5682 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5683 5684 #ifdef IWM_DEBUG 5685 iwm_nic_error(sc); 5686 #endif 5687 /* Dump driver status (TX and RX rings) while we're here. */ 5688 device_printf(sc->sc_dev, "driver status:\n"); 5689 for (i = 0; i < IWM_MAX_QUEUES; i++) { 5690 struct iwm_tx_ring *ring = &sc->txq[i]; 5691 device_printf(sc->sc_dev, 5692 " tx ring %2d: qid=%-2d cur=%-3d " 5693 "queued=%-3d\n", 5694 i, ring->qid, ring->cur, ring->queued); 5695 } 5696 device_printf(sc->sc_dev, 5697 " rx ring: cur=%d\n", sc->rxq.cur); 5698 device_printf(sc->sc_dev, 5699 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5700 5701 /* Reset our firmware state tracking. */ 5702 sc->sc_firmware_state = 0; 5703 /* Don't stop the device; just do a VAP restart */ 5704 IWM_UNLOCK(sc); 5705 5706 if (vap == NULL) { 5707 printf("%s: null vap\n", __func__); 5708 return; 5709 } 5710 5711 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5712 "restarting\n", __func__, vap->iv_state); 5713 5714 ieee80211_restart_all(ic); 5715 return; 5716 } 5717 5718 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5719 handled |= IWM_CSR_INT_BIT_HW_ERR; 5720 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5721 iwm_stop(sc); 5722 goto out; 5723 } 5724 5725 /* firmware chunk loaded */ 5726 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5727 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5728 handled |= IWM_CSR_INT_BIT_FH_TX; 5729 sc->sc_fw_chunk_done = 1; 5730 wakeup(&sc->sc_fw); 5731 } 5732 5733 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5734 handled |= IWM_CSR_INT_BIT_RF_KILL; 5735 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 5736 } 5737 5738 /* 5739 * The Linux driver uses periodic interrupts to avoid races. 5740 * We cargo-cult like it's going out of fashion. 5741 */ 5742 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5743 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5744 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5745 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5746 IWM_WRITE_1(sc, 5747 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5748 isperiodic = 1; 5749 } 5750 5751 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5752 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5753 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5754 5755 iwm_notif_intr(sc); 5756 5757 /* enable periodic interrupt, see above */ 5758 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5759 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5760 IWM_CSR_INT_PERIODIC_ENA); 5761 } 5762 5763 if (__predict_false(r1 & ~handled)) 5764 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5765 "%s: unhandled interrupts: %x\n", __func__, r1); 5766 out_ena: 5767 iwm_restore_interrupts(sc); 5768 out: 5769 IWM_UNLOCK(sc); 5770 return; 5771 } 5772 5773 /* 5774 * Autoconf glue-sniffing 5775 */ 5776 #define PCI_VENDOR_INTEL 0x8086 5777 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5778 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5779 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5780 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5781 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb 5782 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5783 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5784 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5785 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5786 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5787 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5788 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd 5789 #define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0 5790 #define PCI_PRODUCT_INTEL_WL_9560_2 0xa370 5791 #define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc 5792 #define PCI_PRODUCT_INTEL_WL_9260_1 0x2526 5793 5794 static const struct iwm_devices { 5795 uint16_t device; 5796 const struct iwm_cfg *cfg; 5797 } iwm_devices[] = { 5798 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg }, 5799 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg }, 5800 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg }, 5801 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg }, 5802 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg }, 5803 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg }, 5804 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg }, 5805 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg }, 5806 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg }, 5807 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg }, 5808 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg }, 5809 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg }, 5810 { PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg }, 5811 { PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg }, 5812 { PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg }, 5813 { PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg }, 5814 }; 5815 5816 static int 5817 iwm_probe(device_t dev) 5818 { 5819 int i; 5820 5821 for (i = 0; i < nitems(iwm_devices); i++) { 5822 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5823 pci_get_device(dev) == iwm_devices[i].device) { 5824 device_set_desc(dev, iwm_devices[i].cfg->name); 5825 return (BUS_PROBE_DEFAULT); 5826 } 5827 } 5828 5829 return (ENXIO); 5830 } 5831 5832 static int 5833 iwm_dev_check(device_t dev) 5834 { 5835 struct iwm_softc *sc; 5836 uint16_t devid; 5837 int i; 5838 5839 sc = device_get_softc(dev); 5840 5841 devid = pci_get_device(dev); 5842 for (i = 0; i < nitems(iwm_devices); i++) { 5843 if (iwm_devices[i].device == devid) { 5844 sc->cfg = iwm_devices[i].cfg; 5845 return (0); 5846 } 5847 } 5848 device_printf(dev, "unknown adapter type\n"); 5849 return ENXIO; 5850 } 5851 5852 /* PCI registers */ 5853 #define PCI_CFG_RETRY_TIMEOUT 0x041 5854 5855 static int 5856 iwm_pci_attach(device_t dev) 5857 { 5858 struct iwm_softc *sc; 5859 int count, error, rid; 5860 uint16_t reg; 5861 5862 sc = device_get_softc(dev); 5863 5864 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5865 * PCI Tx retries from interfering with C3 CPU state */ 5866 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 5867 5868 /* Enable bus-mastering and hardware bug workaround. */ 5869 pci_enable_busmaster(dev); 5870 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5871 /* if !MSI */ 5872 if (reg & PCIM_STATUS_INTxSTATE) { 5873 reg &= ~PCIM_STATUS_INTxSTATE; 5874 } 5875 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5876 5877 rid = PCIR_BAR(0); 5878 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5879 RF_ACTIVE); 5880 if (sc->sc_mem == NULL) { 5881 device_printf(sc->sc_dev, "can't map mem space\n"); 5882 return (ENXIO); 5883 } 5884 sc->sc_st = rman_get_bustag(sc->sc_mem); 5885 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5886 5887 /* Install interrupt handler. */ 5888 count = 1; 5889 rid = 0; 5890 if (pci_alloc_msi(dev, &count) == 0) 5891 rid = 1; 5892 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5893 (rid != 0 ? 0 : RF_SHAREABLE)); 5894 if (sc->sc_irq == NULL) { 5895 device_printf(dev, "can't map interrupt\n"); 5896 return (ENXIO); 5897 } 5898 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5899 NULL, iwm_intr, sc, &sc->sc_ih); 5900 if (error != 0) { 5901 device_printf(dev, "can't establish interrupt"); 5902 return (error); 5903 } 5904 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5905 5906 return (0); 5907 } 5908 5909 static void 5910 iwm_pci_detach(device_t dev) 5911 { 5912 struct iwm_softc *sc = device_get_softc(dev); 5913 5914 if (sc->sc_irq != NULL) { 5915 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5916 bus_release_resource(dev, SYS_RES_IRQ, 5917 rman_get_rid(sc->sc_irq), sc->sc_irq); 5918 pci_release_msi(dev); 5919 } 5920 if (sc->sc_mem != NULL) 5921 bus_release_resource(dev, SYS_RES_MEMORY, 5922 rman_get_rid(sc->sc_mem), sc->sc_mem); 5923 } 5924 5925 static int 5926 iwm_attach(device_t dev) 5927 { 5928 struct iwm_softc *sc = device_get_softc(dev); 5929 struct ieee80211com *ic = &sc->sc_ic; 5930 int error; 5931 int txq_i, i; 5932 5933 sc->sc_dev = dev; 5934 sc->sc_attached = 1; 5935 IWM_LOCK_INIT(sc); 5936 mbufq_init(&sc->sc_snd, ifqmaxlen); 5937 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5938 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0); 5939 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5940 TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc); 5941 5942 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK, 5943 taskqueue_thread_enqueue, &sc->sc_tq); 5944 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq"); 5945 if (error != 0) { 5946 device_printf(dev, "can't start taskq thread, error %d\n", 5947 error); 5948 goto fail; 5949 } 5950 5951 error = iwm_dev_check(dev); 5952 if (error != 0) 5953 goto fail; 5954 5955 sc->sc_notif_wait = iwm_notification_wait_init(sc); 5956 if (sc->sc_notif_wait == NULL) { 5957 device_printf(dev, "failed to init notification wait struct\n"); 5958 goto fail; 5959 } 5960 5961 sc->sf_state = IWM_SF_UNINIT; 5962 5963 /* Init phy db */ 5964 sc->sc_phy_db = iwm_phy_db_init(sc); 5965 if (!sc->sc_phy_db) { 5966 device_printf(dev, "Cannot init phy_db\n"); 5967 goto fail; 5968 } 5969 5970 /* Set EBS as successful as long as not stated otherwise by the FW. */ 5971 sc->last_ebs_successful = TRUE; 5972 5973 /* PCI attach */ 5974 error = iwm_pci_attach(dev); 5975 if (error != 0) 5976 goto fail; 5977 5978 sc->sc_wantresp = -1; 5979 5980 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 5981 /* 5982 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 5983 * changed, and now the revision step also includes bit 0-1 (no more 5984 * "dash" value). To keep hw_rev backwards compatible - we'll store it 5985 * in the old format. 5986 */ 5987 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 5988 int ret; 5989 uint32_t hw_step; 5990 5991 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 5992 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 5993 5994 if (iwm_prepare_card_hw(sc) != 0) { 5995 device_printf(dev, "could not initialize hardware\n"); 5996 goto fail; 5997 } 5998 5999 /* 6000 * In order to recognize C step the driver should read the 6001 * chip version id located at the AUX bus MISC address. 6002 */ 6003 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 6004 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 6005 DELAY(2); 6006 6007 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 6008 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6009 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6010 25000); 6011 if (!ret) { 6012 device_printf(sc->sc_dev, 6013 "Failed to wake up the nic\n"); 6014 goto fail; 6015 } 6016 6017 if (iwm_nic_lock(sc)) { 6018 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 6019 hw_step |= IWM_ENABLE_WFPM; 6020 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 6021 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 6022 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 6023 if (hw_step == 0x3) 6024 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 6025 (IWM_SILICON_C_STEP << 2); 6026 iwm_nic_unlock(sc); 6027 } else { 6028 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 6029 goto fail; 6030 } 6031 } 6032 6033 /* special-case 7265D, it has the same PCI IDs. */ 6034 if (sc->cfg == &iwm7265_cfg && 6035 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) { 6036 sc->cfg = &iwm7265d_cfg; 6037 } 6038 6039 /* Allocate DMA memory for firmware transfers. */ 6040 if ((error = iwm_alloc_fwmem(sc)) != 0) { 6041 device_printf(dev, "could not allocate memory for firmware\n"); 6042 goto fail; 6043 } 6044 6045 /* Allocate "Keep Warm" page. */ 6046 if ((error = iwm_alloc_kw(sc)) != 0) { 6047 device_printf(dev, "could not allocate keep warm page\n"); 6048 goto fail; 6049 } 6050 6051 /* We use ICT interrupts */ 6052 if ((error = iwm_alloc_ict(sc)) != 0) { 6053 device_printf(dev, "could not allocate ICT table\n"); 6054 goto fail; 6055 } 6056 6057 /* Allocate TX scheduler "rings". */ 6058 if ((error = iwm_alloc_sched(sc)) != 0) { 6059 device_printf(dev, "could not allocate TX scheduler rings\n"); 6060 goto fail; 6061 } 6062 6063 /* Allocate TX rings */ 6064 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 6065 if ((error = iwm_alloc_tx_ring(sc, 6066 &sc->txq[txq_i], txq_i)) != 0) { 6067 device_printf(dev, 6068 "could not allocate TX ring %d\n", 6069 txq_i); 6070 goto fail; 6071 } 6072 } 6073 6074 /* Allocate RX ring. */ 6075 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 6076 device_printf(dev, "could not allocate RX ring\n"); 6077 goto fail; 6078 } 6079 6080 /* Clear pending interrupts. */ 6081 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 6082 6083 ic->ic_softc = sc; 6084 ic->ic_name = device_get_nameunit(sc->sc_dev); 6085 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 6086 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 6087 6088 /* Set device capabilities. */ 6089 ic->ic_caps = 6090 IEEE80211_C_STA | 6091 IEEE80211_C_WPA | /* WPA/RSN */ 6092 IEEE80211_C_WME | 6093 IEEE80211_C_PMGT | 6094 IEEE80211_C_SHSLOT | /* short slot time supported */ 6095 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 6096 // IEEE80211_C_BGSCAN /* capable of bg scanning */ 6097 ; 6098 /* Advertise full-offload scanning */ 6099 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 6100 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 6101 sc->sc_phyctxt[i].id = i; 6102 sc->sc_phyctxt[i].color = 0; 6103 sc->sc_phyctxt[i].ref = 0; 6104 sc->sc_phyctxt[i].channel = NULL; 6105 } 6106 6107 /* Default noise floor */ 6108 sc->sc_noise = -96; 6109 6110 /* Max RSSI */ 6111 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 6112 6113 #ifdef IWM_DEBUG 6114 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 6115 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 6116 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 6117 #endif 6118 6119 error = iwm_read_firmware(sc); 6120 if (error) { 6121 goto fail; 6122 } else if (sc->sc_fw.fw_fp == NULL) { 6123 /* 6124 * XXX Add a solution for properly deferring firmware load 6125 * during bootup. 6126 */ 6127 goto fail; 6128 } else { 6129 sc->sc_preinit_hook.ich_func = iwm_preinit; 6130 sc->sc_preinit_hook.ich_arg = sc; 6131 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 6132 device_printf(dev, 6133 "config_intrhook_establish failed\n"); 6134 goto fail; 6135 } 6136 } 6137 6138 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6139 "<-%s\n", __func__); 6140 6141 return 0; 6142 6143 /* Free allocated memory if something failed during attachment. */ 6144 fail: 6145 iwm_detach_local(sc, 0); 6146 6147 return ENXIO; 6148 } 6149 6150 static int 6151 iwm_is_valid_ether_addr(uint8_t *addr) 6152 { 6153 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 6154 6155 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 6156 return (FALSE); 6157 6158 return (TRUE); 6159 } 6160 6161 static int 6162 iwm_wme_update(struct ieee80211com *ic) 6163 { 6164 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6165 struct iwm_softc *sc = ic->ic_softc; 6166 struct chanAccParams chp; 6167 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6168 struct iwm_vap *ivp = IWM_VAP(vap); 6169 struct iwm_node *in; 6170 struct wmeParams tmp[WME_NUM_AC]; 6171 int aci, error; 6172 6173 if (vap == NULL) 6174 return (0); 6175 6176 ieee80211_wme_ic_getparams(ic, &chp); 6177 6178 IEEE80211_LOCK(ic); 6179 for (aci = 0; aci < WME_NUM_AC; aci++) 6180 tmp[aci] = chp.cap_wmeParams[aci]; 6181 IEEE80211_UNLOCK(ic); 6182 6183 IWM_LOCK(sc); 6184 for (aci = 0; aci < WME_NUM_AC; aci++) { 6185 const struct wmeParams *ac = &tmp[aci]; 6186 ivp->queue_params[aci].aifsn = ac->wmep_aifsn; 6187 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin); 6188 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax); 6189 ivp->queue_params[aci].edca_txop = 6190 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit); 6191 } 6192 ivp->have_wme = TRUE; 6193 if (ivp->is_uploaded && vap->iv_bss != NULL) { 6194 in = IWM_NODE(vap->iv_bss); 6195 if (in->in_assoc) { 6196 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) { 6197 device_printf(sc->sc_dev, 6198 "%s: failed to update MAC\n", __func__); 6199 } 6200 } 6201 } 6202 IWM_UNLOCK(sc); 6203 6204 return (0); 6205 #undef IWM_EXP2 6206 } 6207 6208 static void 6209 iwm_preinit(void *arg) 6210 { 6211 struct iwm_softc *sc = arg; 6212 device_t dev = sc->sc_dev; 6213 struct ieee80211com *ic = &sc->sc_ic; 6214 int error; 6215 6216 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6217 "->%s\n", __func__); 6218 6219 IWM_LOCK(sc); 6220 if ((error = iwm_start_hw(sc)) != 0) { 6221 device_printf(dev, "could not initialize hardware\n"); 6222 IWM_UNLOCK(sc); 6223 goto fail; 6224 } 6225 6226 error = iwm_run_init_ucode(sc, 1); 6227 iwm_stop_device(sc); 6228 if (error) { 6229 IWM_UNLOCK(sc); 6230 goto fail; 6231 } 6232 device_printf(dev, 6233 "hw rev 0x%x, fw ver %s, address %s\n", 6234 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6235 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr)); 6236 6237 /* not all hardware can do 5GHz band */ 6238 if (!sc->nvm_data->sku_cap_band_52GHz_enable) 6239 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6240 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6241 IWM_UNLOCK(sc); 6242 6243 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6244 ic->ic_channels); 6245 6246 /* 6247 * At this point we've committed - if we fail to do setup, 6248 * we now also have to tear down the net80211 state. 6249 */ 6250 ieee80211_ifattach(ic); 6251 ic->ic_vap_create = iwm_vap_create; 6252 ic->ic_vap_delete = iwm_vap_delete; 6253 ic->ic_raw_xmit = iwm_raw_xmit; 6254 ic->ic_node_alloc = iwm_node_alloc; 6255 ic->ic_scan_start = iwm_scan_start; 6256 ic->ic_scan_end = iwm_scan_end; 6257 ic->ic_update_mcast = iwm_update_mcast; 6258 ic->ic_getradiocaps = iwm_init_channel_map; 6259 ic->ic_set_channel = iwm_set_channel; 6260 ic->ic_scan_curchan = iwm_scan_curchan; 6261 ic->ic_scan_mindwell = iwm_scan_mindwell; 6262 ic->ic_wme.wme_update = iwm_wme_update; 6263 ic->ic_parent = iwm_parent; 6264 ic->ic_transmit = iwm_transmit; 6265 iwm_radiotap_attach(sc); 6266 if (bootverbose) 6267 ieee80211_announce(ic); 6268 6269 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6270 "<-%s\n", __func__); 6271 config_intrhook_disestablish(&sc->sc_preinit_hook); 6272 6273 return; 6274 fail: 6275 config_intrhook_disestablish(&sc->sc_preinit_hook); 6276 iwm_detach_local(sc, 0); 6277 } 6278 6279 /* 6280 * Attach the interface to 802.11 radiotap. 6281 */ 6282 static void 6283 iwm_radiotap_attach(struct iwm_softc *sc) 6284 { 6285 struct ieee80211com *ic = &sc->sc_ic; 6286 6287 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6288 "->%s begin\n", __func__); 6289 ieee80211_radiotap_attach(ic, 6290 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6291 IWM_TX_RADIOTAP_PRESENT, 6292 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6293 IWM_RX_RADIOTAP_PRESENT); 6294 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6295 "->%s end\n", __func__); 6296 } 6297 6298 static struct ieee80211vap * 6299 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6300 enum ieee80211_opmode opmode, int flags, 6301 const uint8_t bssid[IEEE80211_ADDR_LEN], 6302 const uint8_t mac[IEEE80211_ADDR_LEN]) 6303 { 6304 struct iwm_vap *ivp; 6305 struct ieee80211vap *vap; 6306 6307 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6308 return NULL; 6309 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO); 6310 vap = &ivp->iv_vap; 6311 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6312 vap->iv_bmissthreshold = 10; /* override default */ 6313 /* Override with driver methods. */ 6314 ivp->iv_newstate = vap->iv_newstate; 6315 vap->iv_newstate = iwm_newstate; 6316 6317 ivp->id = IWM_DEFAULT_MACID; 6318 ivp->color = IWM_DEFAULT_COLOR; 6319 6320 ivp->have_wme = FALSE; 6321 ivp->ps_disabled = FALSE; 6322 6323 ieee80211_ratectl_init(vap); 6324 /* Complete setup. */ 6325 ieee80211_vap_attach(vap, ieee80211_media_change, 6326 ieee80211_media_status, mac); 6327 ic->ic_opmode = opmode; 6328 6329 return vap; 6330 } 6331 6332 static void 6333 iwm_vap_delete(struct ieee80211vap *vap) 6334 { 6335 struct iwm_vap *ivp = IWM_VAP(vap); 6336 6337 ieee80211_ratectl_deinit(vap); 6338 ieee80211_vap_detach(vap); 6339 free(ivp, M_80211_VAP); 6340 } 6341 6342 static void 6343 iwm_xmit_queue_drain(struct iwm_softc *sc) 6344 { 6345 struct mbuf *m; 6346 struct ieee80211_node *ni; 6347 6348 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 6349 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 6350 ieee80211_free_node(ni); 6351 m_freem(m); 6352 } 6353 } 6354 6355 static void 6356 iwm_scan_start(struct ieee80211com *ic) 6357 { 6358 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6359 struct iwm_softc *sc = ic->ic_softc; 6360 int error; 6361 6362 IWM_LOCK(sc); 6363 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6364 /* This should not be possible */ 6365 device_printf(sc->sc_dev, 6366 "%s: Previous scan not completed yet\n", __func__); 6367 } 6368 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6369 error = iwm_umac_scan(sc); 6370 else 6371 error = iwm_lmac_scan(sc); 6372 if (error != 0) { 6373 device_printf(sc->sc_dev, "could not initiate scan\n"); 6374 IWM_UNLOCK(sc); 6375 ieee80211_cancel_scan(vap); 6376 } else { 6377 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING; 6378 iwm_led_blink_start(sc); 6379 IWM_UNLOCK(sc); 6380 } 6381 } 6382 6383 static void 6384 iwm_scan_end(struct ieee80211com *ic) 6385 { 6386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6387 struct iwm_softc *sc = ic->ic_softc; 6388 6389 IWM_LOCK(sc); 6390 iwm_led_blink_stop(sc); 6391 if (vap->iv_state == IEEE80211_S_RUN) 6392 iwm_led_enable(sc); 6393 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6394 /* 6395 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because 6396 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq 6397 * taskqueue. 6398 */ 6399 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 6400 iwm_scan_stop_wait(sc); 6401 } 6402 IWM_UNLOCK(sc); 6403 6404 /* 6405 * Make sure we don't race, if sc_es_task is still enqueued here. 6406 * This is to make sure that it won't call ieee80211_scan_done 6407 * when we have already started the next scan. 6408 */ 6409 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL); 6410 } 6411 6412 static void 6413 iwm_update_mcast(struct ieee80211com *ic) 6414 { 6415 } 6416 6417 static void 6418 iwm_set_channel(struct ieee80211com *ic) 6419 { 6420 } 6421 6422 static void 6423 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6424 { 6425 } 6426 6427 static void 6428 iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6429 { 6430 } 6431 6432 void 6433 iwm_init_task(void *arg1) 6434 { 6435 struct iwm_softc *sc = arg1; 6436 6437 IWM_LOCK(sc); 6438 while (sc->sc_flags & IWM_FLAG_BUSY) 6439 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6440 sc->sc_flags |= IWM_FLAG_BUSY; 6441 iwm_stop(sc); 6442 if (sc->sc_ic.ic_nrunning > 0) 6443 iwm_init(sc); 6444 sc->sc_flags &= ~IWM_FLAG_BUSY; 6445 wakeup(&sc->sc_flags); 6446 IWM_UNLOCK(sc); 6447 } 6448 6449 static int 6450 iwm_resume(device_t dev) 6451 { 6452 struct iwm_softc *sc = device_get_softc(dev); 6453 int do_reinit = 0; 6454 6455 /* 6456 * We disable the RETRY_TIMEOUT register (0x41) to keep 6457 * PCI Tx retries from interfering with C3 CPU state. 6458 */ 6459 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 6460 6461 if (!sc->sc_attached) 6462 return 0; 6463 6464 iwm_init_task(device_get_softc(dev)); 6465 6466 IWM_LOCK(sc); 6467 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6468 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6469 do_reinit = 1; 6470 } 6471 IWM_UNLOCK(sc); 6472 6473 if (do_reinit) 6474 ieee80211_resume_all(&sc->sc_ic); 6475 6476 return 0; 6477 } 6478 6479 static int 6480 iwm_suspend(device_t dev) 6481 { 6482 int do_stop = 0; 6483 struct iwm_softc *sc = device_get_softc(dev); 6484 6485 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6486 6487 if (!sc->sc_attached) 6488 return (0); 6489 6490 ieee80211_suspend_all(&sc->sc_ic); 6491 6492 if (do_stop) { 6493 IWM_LOCK(sc); 6494 iwm_stop(sc); 6495 sc->sc_flags |= IWM_FLAG_SCANNING; 6496 IWM_UNLOCK(sc); 6497 } 6498 6499 return (0); 6500 } 6501 6502 static int 6503 iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6504 { 6505 struct iwm_fw_info *fw = &sc->sc_fw; 6506 device_t dev = sc->sc_dev; 6507 int i; 6508 6509 if (!sc->sc_attached) 6510 return 0; 6511 sc->sc_attached = 0; 6512 if (do_net80211) { 6513 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task); 6514 } 6515 iwm_stop_device(sc); 6516 taskqueue_drain_all(sc->sc_tq); 6517 taskqueue_free(sc->sc_tq); 6518 if (do_net80211) { 6519 IWM_LOCK(sc); 6520 iwm_xmit_queue_drain(sc); 6521 IWM_UNLOCK(sc); 6522 ieee80211_ifdetach(&sc->sc_ic); 6523 } 6524 callout_drain(&sc->sc_led_blink_to); 6525 callout_drain(&sc->sc_watchdog_to); 6526 6527 iwm_phy_db_free(sc->sc_phy_db); 6528 sc->sc_phy_db = NULL; 6529 6530 iwm_free_nvm_data(sc->nvm_data); 6531 6532 /* Free descriptor rings */ 6533 iwm_free_rx_ring(sc, &sc->rxq); 6534 for (i = 0; i < nitems(sc->txq); i++) 6535 iwm_free_tx_ring(sc, &sc->txq[i]); 6536 6537 /* Free firmware */ 6538 if (fw->fw_fp != NULL) 6539 iwm_fw_info_free(fw); 6540 6541 /* Free scheduler */ 6542 iwm_dma_contig_free(&sc->sched_dma); 6543 iwm_dma_contig_free(&sc->ict_dma); 6544 iwm_dma_contig_free(&sc->kw_dma); 6545 iwm_dma_contig_free(&sc->fw_dma); 6546 6547 iwm_free_fw_paging(sc); 6548 6549 /* Finished with the hardware - detach things */ 6550 iwm_pci_detach(dev); 6551 6552 if (sc->sc_notif_wait != NULL) { 6553 iwm_notification_wait_free(sc->sc_notif_wait); 6554 sc->sc_notif_wait = NULL; 6555 } 6556 6557 IWM_LOCK_DESTROY(sc); 6558 6559 return (0); 6560 } 6561 6562 static int 6563 iwm_detach(device_t dev) 6564 { 6565 struct iwm_softc *sc = device_get_softc(dev); 6566 6567 return (iwm_detach_local(sc, 1)); 6568 } 6569 6570 static device_method_t iwm_pci_methods[] = { 6571 /* Device interface */ 6572 DEVMETHOD(device_probe, iwm_probe), 6573 DEVMETHOD(device_attach, iwm_attach), 6574 DEVMETHOD(device_detach, iwm_detach), 6575 DEVMETHOD(device_suspend, iwm_suspend), 6576 DEVMETHOD(device_resume, iwm_resume), 6577 6578 DEVMETHOD_END 6579 }; 6580 6581 static driver_t iwm_pci_driver = { 6582 "iwm", 6583 iwm_pci_methods, 6584 sizeof (struct iwm_softc) 6585 }; 6586 6587 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL); 6588 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver, 6589 iwm_devices, nitems(iwm_devices)); 6590 MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6591 MODULE_DEPEND(iwm, pci, 1, 1, 1); 6592 MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6593