1 /*- 2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC 3 */ 4 5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */ 6 7 /* 8 * 9 * Copyright (c) 2025 The FreeBSD Foundation 10 * 11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org> 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Permission to use, copy, modify, and distribute this software for any 15 * purpose with or without fee is hereby granted, provided that the above 16 * copyright notice and this permission notice appear in all copies. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 25 * 26 */ 27 28 /*- 29 * Copyright (c) 2024 Future Crew, LLC 30 * Author: Mikhail Pchelin <misha@FreeBSD.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45 /* 46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de> 47 * Author: Stefan Sperling <stsp@openbsd.org> 48 * Copyright (c) 2014 Fixup Software Ltd. 49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org> 50 * 51 * Permission to use, copy, modify, and distribute this software for any 52 * purpose with or without fee is hereby granted, provided that the above 53 * copyright notice and this permission notice appear in all copies. 54 * 55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 62 */ 63 64 /*- 65 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 66 * which were used as the reference documentation for this implementation. 67 * 68 ****************************************************************************** 69 * 70 * This file is provided under a dual BSD/GPLv2 license. When using or 71 * redistributing this file, you may do so under either license. 72 * 73 * GPL LICENSE SUMMARY 74 * 75 * Copyright(c) 2017 Intel Deutschland GmbH 76 * Copyright(c) 2018 - 2019 Intel Corporation 77 * 78 * This program is free software; you can redistribute it and/or modify 79 * it under the terms of version 2 of the GNU General Public License as 80 * published by the Free Software Foundation. 81 * 82 * This program is distributed in the hope that it will be useful, but 83 * WITHOUT ANY WARRANTY; without even the implied warranty of 84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 85 * General Public License for more details. 86 * 87 * BSD LICENSE 88 * 89 * Copyright(c) 2017 Intel Deutschland GmbH 90 * Copyright(c) 2018 - 2019 Intel Corporation 91 * All rights reserved. 92 * 93 * Redistribution and use in source and binary forms, with or without 94 * modification, are permitted provided that the following conditions 95 * are met: 96 * 97 * * Redistributions of source code must retain the above copyright 98 * notice, this list of conditions and the following disclaimer. 99 * * Redistributions in binary form must reproduce the above copyright 100 * notice, this list of conditions and the following disclaimer in 101 * the documentation and/or other materials provided with the 102 * distribution. 103 * * Neither the name Intel Corporation nor the names of its 104 * contributors may be used to endorse or promote products derived 105 * from this software without specific prior written permission. 106 * 107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 118 * 119 ***************************************************************************** 120 */ 121 122 /*- 123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 124 * 125 * Permission to use, copy, modify, and distribute this software for any 126 * purpose with or without fee is hereby granted, provided that the above 127 * copyright notice and this permission notice appear in all copies. 128 * 129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 136 */ 137 138 #include <sys/param.h> 139 #include <sys/bus.h> 140 #include <sys/module.h> 141 #include <sys/conf.h> 142 #include <sys/kernel.h> 143 #include <sys/malloc.h> 144 #include <sys/mbuf.h> 145 #include <sys/mutex.h> 146 #include <sys/proc.h> 147 #include <sys/rman.h> 148 #include <sys/rwlock.h> 149 #include <sys/socket.h> 150 #include <sys/sockio.h> 151 #include <sys/systm.h> 152 #include <sys/endian.h> 153 #include <sys/linker.h> 154 #include <sys/firmware.h> 155 #include <sys/epoch.h> 156 #include <sys/kdb.h> 157 158 #include <machine/bus.h> 159 #include <machine/endian.h> 160 #include <machine/resource.h> 161 162 #include <dev/pci/pcireg.h> 163 #include <dev/pci/pcivar.h> 164 165 #include <net/bpf.h> 166 167 #include <net/if.h> 168 #include <net/if_var.h> 169 #include <net/if_dl.h> 170 #include <net/if_media.h> 171 172 #include <netinet/in.h> 173 #include <netinet/if_ether.h> 174 175 #include <net80211/ieee80211_var.h> 176 #include <net80211/ieee80211_radiotap.h> 177 #include <net80211/ieee80211_regdomain.h> 178 #include <net80211/ieee80211_ratectl.h> 179 #include <net80211/ieee80211_vht.h> 180 181 int iwx_himark = 224; 182 int iwx_lomark = 192; 183 184 #define IWX_FBSD_RSP_V3 3 185 #define IWX_FBSD_RSP_V4 4 186 187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev)) 188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp) 189 190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_))) 191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_))) 192 193 #include <dev/iwx/if_iwxreg.h> 194 #include <dev/iwx/if_iwxvar.h> 195 196 #include <dev/iwx/if_iwx_debug.h> 197 198 #define PCI_CFG_RETRY_TIMEOUT 0x41 199 200 #define PCI_VENDOR_INTEL 0x8086 201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */ 202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */ 203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */ 204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */ 205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */ 206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */ 207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */ 208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */ 209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */ 210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */ 211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */ 212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */ 213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */ 214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */ 215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */ 216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */ 217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */ 218 219 static const struct iwx_devices { 220 uint16_t device; 221 char *name; 222 } iwx_devices[] = { 223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" }, 224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" }, 225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" }, 226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" }, 227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" }, 228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" }, 229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" }, 230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" }, 231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" }, 232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" }, 233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" }, 234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" }, 235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" }, 236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" }, 237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" }, 238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" }, 239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" }, 240 }; 241 242 static const uint8_t iwx_nvm_channels_8000[] = { 243 /* 2.4 GHz */ 244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 245 /* 5 GHz */ 246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 248 149, 153, 157, 161, 165, 169, 173, 177, 181 249 }; 250 251 static const uint8_t iwx_nvm_channels_uhb[] = { 252 /* 2.4 GHz */ 253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 254 /* 5 GHz */ 255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 257 149, 153, 157, 161, 165, 169, 173, 177, 181, 258 /* 6-7 GHz */ 259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185, 262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233 263 }; 264 265 #define IWX_NUM_2GHZ_CHANNELS 14 266 #define IWX_NUM_5GHZ_CHANNELS 37 267 268 const struct iwx_rate { 269 uint16_t rate; 270 uint8_t plcp; 271 uint8_t ht_plcp; 272 } iwx_rates[] = { 273 /* Legacy */ /* HT */ 274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP }, 279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP }, 281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP }, 282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP }, 283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP }, 284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP }, 285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP }, 286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP }, 287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP }, 288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP }, 289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP }, 290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP }, 291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP }, 292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP }, 293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP }, 294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP }, 295 }; 296 #define IWX_RIDX_CCK 0 297 #define IWX_RIDX_OFDM 4 298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1) 299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM) 300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM) 301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22) 302 303 /* Convert an MCS index into an iwx_rates[] index. */ 304 const int iwx_mcs2ridx[] = { 305 IWX_RATE_MCS_0_INDEX, 306 IWX_RATE_MCS_1_INDEX, 307 IWX_RATE_MCS_2_INDEX, 308 IWX_RATE_MCS_3_INDEX, 309 IWX_RATE_MCS_4_INDEX, 310 IWX_RATE_MCS_5_INDEX, 311 IWX_RATE_MCS_6_INDEX, 312 IWX_RATE_MCS_7_INDEX, 313 IWX_RATE_MCS_8_INDEX, 314 IWX_RATE_MCS_9_INDEX, 315 IWX_RATE_MCS_10_INDEX, 316 IWX_RATE_MCS_11_INDEX, 317 IWX_RATE_MCS_12_INDEX, 318 IWX_RATE_MCS_13_INDEX, 319 IWX_RATE_MCS_14_INDEX, 320 IWX_RATE_MCS_15_INDEX, 321 }; 322 323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t); 324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t); 325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t); 326 #if 0 327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t); 328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t); 329 #endif 330 static int iwx_apply_debug_destination(struct iwx_softc *); 331 static void iwx_set_ltr(struct iwx_softc *); 332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *); 333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *, 334 const struct iwx_fw_sects *); 335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *); 336 static void iwx_ctxt_info_free_paging(struct iwx_softc *); 337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *, 338 struct iwx_context_info_dram *); 339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t); 340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type, 341 const uint8_t *, size_t); 342 static int iwx_set_default_calib(struct iwx_softc *, const void *); 343 static void iwx_fw_info_free(struct iwx_fw_info *); 344 static int iwx_read_firmware(struct iwx_softc *); 345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *); 346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t); 347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t); 348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t); 349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t); 350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t); 351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t); 352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int); 353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int); 354 static int iwx_nic_lock(struct iwx_softc *); 355 static void iwx_nic_assert_locked(struct iwx_softc *); 356 static void iwx_nic_unlock(struct iwx_softc *); 357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t, 358 uint32_t); 359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t); 360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t); 361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int); 362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, 363 bus_size_t, bus_size_t); 364 static void iwx_dma_contig_free(struct iwx_dma_info *); 365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 366 static void iwx_disable_rx_dma(struct iwx_softc *); 367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int); 370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *); 371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *); 372 static void iwx_enable_rfkill_int(struct iwx_softc *); 373 static int iwx_check_rfkill(struct iwx_softc *); 374 static void iwx_enable_interrupts(struct iwx_softc *); 375 static void iwx_enable_fwload_interrupt(struct iwx_softc *); 376 #if 0 377 static void iwx_restore_interrupts(struct iwx_softc *); 378 #endif 379 static void iwx_disable_interrupts(struct iwx_softc *); 380 static void iwx_ict_reset(struct iwx_softc *); 381 static int iwx_set_hw_ready(struct iwx_softc *); 382 static int iwx_prepare_card_hw(struct iwx_softc *); 383 static int iwx_force_power_gating(struct iwx_softc *); 384 static void iwx_apm_config(struct iwx_softc *); 385 static int iwx_apm_init(struct iwx_softc *); 386 static void iwx_apm_stop(struct iwx_softc *); 387 static int iwx_allow_mcast(struct iwx_softc *); 388 static void iwx_init_msix_hw(struct iwx_softc *); 389 static void iwx_conf_msix_hw(struct iwx_softc *, int); 390 static int iwx_clear_persistence_bit(struct iwx_softc *); 391 static int iwx_start_hw(struct iwx_softc *); 392 static void iwx_stop_device(struct iwx_softc *); 393 static void iwx_nic_config(struct iwx_softc *); 394 static int iwx_nic_rx_init(struct iwx_softc *); 395 static int iwx_nic_init(struct iwx_softc *); 396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int); 397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t); 398 static void iwx_post_alive(struct iwx_softc *); 399 static int iwx_schedule_session_protection(struct iwx_softc *, 400 struct iwx_node *, uint32_t); 401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *); 402 static void iwx_init_channel_map(struct ieee80211com *, int, int *, 403 struct ieee80211_channel[]); 404 static int iwx_mimo_enabled(struct iwx_softc *); 405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t, 406 uint16_t); 407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *); 408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t, 409 uint16_t, uint16_t, int, int); 410 static void iwx_sta_tx_agg_start(struct iwx_softc *, 411 struct ieee80211_node *, uint8_t); 412 static void iwx_ba_rx_task(void *, int); 413 static void iwx_ba_tx_task(void *, int); 414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *); 415 static int iwx_is_valid_mac_addr(const uint8_t *); 416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *); 417 static int iwx_nvm_get(struct iwx_softc *); 418 static int iwx_load_firmware(struct iwx_softc *); 419 static int iwx_start_fw(struct iwx_softc *); 420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t); 421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t); 422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *); 423 static int iwx_load_pnvm(struct iwx_softc *); 424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t); 425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *); 426 static int iwx_load_ucode_wait_alive(struct iwx_softc *); 427 static int iwx_send_dqa_cmd(struct iwx_softc *); 428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int); 429 static int iwx_config_ltr(struct iwx_softc *); 430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *); 431 static int iwx_rx_addbuf(struct iwx_softc *, int, int); 432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *); 433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *, 434 struct iwx_rx_data *); 435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *); 436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t); 437 #if 0 438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *, 439 struct ieee80211_node *, struct ieee80211_rxinfo *); 440 #endif 441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, 442 int, int, uint32_t, uint8_t); 443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int); 444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *, 445 struct iwx_tx_data *); 446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t); 447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *, 448 struct iwx_rx_data *); 449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *); 450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *, 451 struct iwx_rx_data *); 452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t); 453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *); 454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, 455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int); 456 #if 0 457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *, 458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int); 459 #endif 460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, 461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t); 462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *); 463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t, 464 const void *); 465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *, 466 uint32_t *); 467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t, 468 const void *, uint32_t *); 469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *); 470 static void iwx_cmd_done(struct iwx_softc *, int, int, int); 471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t); 472 static uint32_t iwx_fw_rateidx_cck(uint8_t); 473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, 474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *, 475 struct mbuf *); 476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int, 477 uint16_t, uint16_t); 478 static int iwx_tx(struct iwx_softc *, struct mbuf *, 479 struct ieee80211_node *); 480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t); 481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int); 482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *); 483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *, 484 struct iwx_beacon_filter_cmd *); 485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, 486 int); 487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *, 488 struct iwx_mac_power_cmd *); 489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *); 490 static int iwx_power_update_device(struct iwx_softc *); 491 #if 0 492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *); 493 #endif 494 static int iwx_disable_beacon_filter(struct iwx_softc *); 495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int); 496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *); 497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *); 498 static int iwx_fill_probe_req(struct iwx_softc *, 499 struct iwx_scan_probe_req *); 500 static int iwx_config_umac_scan_reduced(struct iwx_softc *); 501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int); 502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *, 503 struct iwx_scan_general_params_v10 *, int); 504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *, 505 struct iwx_scan_general_params_v10 *, uint16_t, int); 506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *, 507 struct iwx_scan_channel_params_v6 *, uint32_t, int); 508 static int iwx_umac_scan_v14(struct iwx_softc *, int); 509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *); 510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int); 511 static int iwx_rval2ridx(int); 512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, 513 int *); 514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *, 515 struct iwx_mac_ctx_cmd *, uint32_t); 516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *, 517 struct iwx_mac_data_sta *, int); 518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, 519 uint32_t, int); 520 static int iwx_clear_statistics(struct iwx_softc *); 521 static int iwx_scan(struct iwx_softc *); 522 static int iwx_bgscan(struct ieee80211com *); 523 static int iwx_enable_mgmt_queue(struct iwx_softc *); 524 static int iwx_disable_mgmt_queue(struct iwx_softc *); 525 static int iwx_rs_rval2idx(uint8_t); 526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, 527 int); 528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int); 529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *); 530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *); 531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *); 532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *, 533 uint8_t, uint8_t); 534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *, 535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t, 536 uint8_t); 537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *); 538 static int iwx_deauth(struct iwx_softc *); 539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *); 540 static int iwx_run_stop(struct iwx_softc *); 541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *, 542 const uint8_t[IEEE80211_ADDR_LEN]); 543 #if 0 544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *, 545 struct ieee80211_key *); 546 void iwx_setkey_task(void *); 547 void iwx_delete_key(struct ieee80211com *, 548 struct ieee80211_node *, struct ieee80211_key *); 549 #endif 550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int); 551 static void iwx_endscan(struct iwx_softc *); 552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *, 553 struct ieee80211_node *); 554 static int iwx_sf_config(struct iwx_softc *, int); 555 static int iwx_send_bt_init_conf(struct iwx_softc *); 556 static int iwx_send_soc_conf(struct iwx_softc *); 557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *); 558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *); 559 static int iwx_init_hw(struct iwx_softc *); 560 static int iwx_init(struct iwx_softc *); 561 static void iwx_stop(struct iwx_softc *); 562 static void iwx_watchdog(void *); 563 static const char *iwx_desc_lookup(uint32_t); 564 static void iwx_nic_error(struct iwx_softc *); 565 static void iwx_dump_driver_status(struct iwx_softc *); 566 static void iwx_nic_umac_error(struct iwx_softc *); 567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t); 568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *); 569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *, 570 struct mbuf *); 571 static void iwx_notif_intr(struct iwx_softc *); 572 #if 0 573 /* XXX-THJ - I don't have hardware for this */ 574 static int iwx_intr(void *); 575 #endif 576 static void iwx_intr_msix(void *); 577 static int iwx_preinit(struct iwx_softc *); 578 static void iwx_attach_hook(void *); 579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *); 580 static int iwx_probe(device_t); 581 static int iwx_attach(device_t); 582 static int iwx_detach(device_t); 583 584 /* FreeBSD specific glue */ 585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = 586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 587 588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] = 589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 590 591 #if IWX_DEBUG 592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0) 593 #else 594 #define DPRINTF(x) do { ; } while (0) 595 #endif 596 597 /* FreeBSD specific functions */ 598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *, 599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int, 600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]); 601 static void iwx_vap_delete(struct ieee80211vap *); 602 static void iwx_parent(struct ieee80211com *); 603 static void iwx_scan_start(struct ieee80211com *); 604 static void iwx_scan_end(struct ieee80211com *); 605 static void iwx_update_mcast(struct ieee80211com *ic); 606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long); 607 static void iwx_scan_mindwell(struct ieee80211_scan_state *); 608 static void iwx_set_channel(struct ieee80211com *); 609 static void iwx_endscan_cb(void *, int ); 610 static int iwx_wme_update(struct ieee80211com *); 611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *, 612 const struct ieee80211_bpf_params *); 613 static int iwx_transmit(struct ieee80211com *, struct mbuf *); 614 static void iwx_start(struct iwx_softc *); 615 static int iwx_ampdu_rx_start(struct ieee80211_node *, 616 struct ieee80211_rx_ampdu *, int, int, int); 617 static void iwx_ampdu_rx_stop(struct ieee80211_node *, 618 struct ieee80211_rx_ampdu *); 619 static int iwx_addba_request(struct ieee80211_node *, 620 struct ieee80211_tx_ampdu *, int, int, int); 621 static int iwx_addba_response(struct ieee80211_node *, 622 struct ieee80211_tx_ampdu *, int, int, int); 623 static void iwx_key_update_begin(struct ieee80211vap *); 624 static void iwx_key_update_end(struct ieee80211vap *); 625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *, 626 ieee80211_keyix *,ieee80211_keyix *); 627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *); 628 static int iwx_key_delete(struct ieee80211vap *, 629 const struct ieee80211_key *); 630 static int iwx_suspend(device_t); 631 static int iwx_resume(device_t); 632 static void iwx_radiotap_attach(struct iwx_softc *); 633 634 /* OpenBSD compat defines */ 635 #define IEEE80211_HTOP0_SCO_SCN 0 636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0 637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1 638 639 #define IEEE80211_HT_RATESET_SISO 0 640 #define IEEE80211_HT_RATESET_MIMO2 2 641 642 const struct ieee80211_rateset ieee80211_std_rateset_11a = 643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } }; 644 645 const struct ieee80211_rateset ieee80211_std_rateset_11b = 646 { 4, { 2, 4, 11, 22 } }; 647 648 const struct ieee80211_rateset ieee80211_std_rateset_11g = 649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 650 651 inline int 652 ieee80211_has_addr4(const struct ieee80211_frame *wh) 653 { 654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == 655 IEEE80211_FC1_DIR_DSTODS; 656 } 657 658 static uint8_t 659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd) 660 { 661 const struct iwx_fw_cmd_version *entry; 662 int i; 663 664 for (i = 0; i < sc->n_cmd_versions; i++) { 665 entry = &sc->cmd_versions[i]; 666 if (entry->group == grp && entry->cmd == cmd) 667 return entry->cmd_ver; 668 } 669 670 return IWX_FW_CMD_VER_UNKNOWN; 671 } 672 673 uint8_t 674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd) 675 { 676 const struct iwx_fw_cmd_version *entry; 677 int i; 678 679 for (i = 0; i < sc->n_cmd_versions; i++) { 680 entry = &sc->cmd_versions[i]; 681 if (entry->group == grp && entry->cmd == cmd) 682 return entry->notif_ver; 683 } 684 685 return IWX_FW_CMD_VER_UNKNOWN; 686 } 687 688 static int 689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen) 690 { 691 const struct iwx_fw_cscheme_list *l = (const void *)data; 692 693 if (dlen < sizeof(*l) || 694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 695 return EINVAL; 696 697 /* we don't actually store anything for now, always use s/w crypto */ 698 699 return 0; 700 } 701 702 static int 703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc, 704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram) 705 { 706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1); 707 if (err) { 708 printf("%s: could not allocate context info DMA memory\n", 709 DEVNAME(sc)); 710 return err; 711 } 712 713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len); 714 715 return 0; 716 } 717 718 static void 719 iwx_ctxt_info_free_paging(struct iwx_softc *sc) 720 { 721 struct iwx_self_init_dram *dram = &sc->init_dram; 722 int i; 723 724 if (!dram->paging) 725 return; 726 727 /* free paging*/ 728 for (i = 0; i < dram->paging_cnt; i++) 729 iwx_dma_contig_free(&dram->paging[i]); 730 731 free(dram->paging, M_DEVBUF); 732 dram->paging_cnt = 0; 733 dram->paging = NULL; 734 } 735 736 static int 737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start) 738 { 739 int i = 0; 740 741 while (start < fws->fw_count && 742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION && 743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) { 744 start++; 745 i++; 746 } 747 748 return i; 749 } 750 751 static int 752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws, 753 struct iwx_context_info_dram *ctxt_dram) 754 { 755 struct iwx_self_init_dram *dram = &sc->init_dram; 756 int i, ret, fw_cnt = 0; 757 758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec")); 759 760 dram->lmac_cnt = iwx_get_num_sections(fws, 0); 761 /* add 1 due to separator */ 762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1); 763 /* add 2 due to separators */ 764 dram->paging_cnt = iwx_get_num_sections(fws, 765 dram->lmac_cnt + dram->umac_cnt + 2); 766 767 IWX_UNLOCK(sc); 768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt, 769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT); 770 if (!dram->fw) { 771 printf("%s: could not allocate memory for firmware sections\n", 772 DEVNAME(sc)); 773 IWX_LOCK(sc); 774 return ENOMEM; 775 } 776 777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging), 778 M_DEVBUF, M_ZERO | M_WAITOK); 779 IWX_LOCK(sc); 780 if (!dram->paging) { 781 printf("%s: could not allocate memory for firmware paging\n", 782 DEVNAME(sc)); 783 return ENOMEM; 784 } 785 786 /* initialize lmac sections */ 787 for (i = 0; i < dram->lmac_cnt; i++) { 788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i], 789 &dram->fw[fw_cnt]); 790 if (ret) 791 return ret; 792 ctxt_dram->lmac_img[i] = 793 htole64(dram->fw[fw_cnt].paddr); 794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 795 "%s: firmware LMAC section %d at 0x%llx size %lld\n", 796 __func__, i, 797 (unsigned long long)dram->fw[fw_cnt].paddr, 798 (unsigned long long)dram->fw[fw_cnt].size); 799 fw_cnt++; 800 } 801 802 /* initialize umac sections */ 803 for (i = 0; i < dram->umac_cnt; i++) { 804 /* access FW with +1 to make up for lmac separator */ 805 ret = iwx_ctxt_info_alloc_dma(sc, 806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]); 807 if (ret) 808 return ret; 809 ctxt_dram->umac_img[i] = 810 htole64(dram->fw[fw_cnt].paddr); 811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 812 "%s: firmware UMAC section %d at 0x%llx size %lld\n", 813 __func__, i, 814 (unsigned long long)dram->fw[fw_cnt].paddr, 815 (unsigned long long)dram->fw[fw_cnt].size); 816 fw_cnt++; 817 } 818 819 /* 820 * Initialize paging. 821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is 822 * stored separately. 823 * This is since the timing of its release is different - 824 * while fw memory can be released on alive, the paging memory can be 825 * freed only when the device goes down. 826 * Given that, the logic here in accessing the fw image is a bit 827 * different - fw_cnt isn't changing so loop counter is added to it. 828 */ 829 for (i = 0; i < dram->paging_cnt; i++) { 830 /* access FW with +2 to make up for lmac & umac separators */ 831 int fw_idx = fw_cnt + i + 2; 832 833 ret = iwx_ctxt_info_alloc_dma(sc, 834 &fws->fw_sect[fw_idx], &dram->paging[i]); 835 if (ret) 836 return ret; 837 838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr); 839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 840 "%s: firmware paging section %d at 0x%llx size %lld\n", 841 __func__, i, 842 (unsigned long long)dram->paging[i].paddr, 843 (unsigned long long)dram->paging[i].size); 844 } 845 846 return 0; 847 } 848 849 static void 850 iwx_fw_version_str(char *buf, size_t bufsize, 851 uint32_t major, uint32_t minor, uint32_t api) 852 { 853 /* 854 * Starting with major version 35 the Linux driver prints the minor 855 * version in hexadecimal. 856 */ 857 if (major >= 35) 858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api); 859 else 860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api); 861 } 862 #if 0 863 static int 864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power, 865 uint8_t min_power) 866 { 867 struct iwx_dma_info *fw_mon = &sc->fw_mon; 868 uint32_t size = 0; 869 uint8_t power; 870 int err; 871 872 if (fw_mon->size) 873 return 0; 874 875 for (power = max_power; power >= min_power; power--) { 876 size = (1 << power); 877 878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0); 879 if (err) 880 continue; 881 882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 883 "%s: allocated 0x%08x bytes for firmware monitor.\n", 884 DEVNAME(sc), size); 885 break; 886 } 887 888 if (err) { 889 fw_mon->size = 0; 890 return err; 891 } 892 893 if (power != max_power) 894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n", 896 DEVNAME(sc), (unsigned long)(1 << (power - 10)), 897 (unsigned long)(1 << (max_power - 10))); 898 899 return 0; 900 } 901 902 static int 903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power) 904 { 905 if (!max_power) { 906 /* default max_power is maximum */ 907 max_power = 26; 908 } else { 909 max_power += 11; 910 } 911 912 if (max_power > 26) { 913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 914 "%s: External buffer size for monitor is too big %d, " 915 "check the FW TLV\n", DEVNAME(sc), max_power); 916 return 0; 917 } 918 919 if (sc->fw_mon.size) 920 return 0; 921 922 return iwx_alloc_fw_monitor_block(sc, max_power, 11); 923 } 924 #endif 925 926 static int 927 iwx_apply_debug_destination(struct iwx_softc *sc) 928 { 929 #if 0 930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1; 931 int i, err; 932 uint8_t mon_mode, size_power, base_shift, end_shift; 933 uint32_t base_reg, end_reg; 934 935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1; 936 mon_mode = dest_v1->monitor_mode; 937 size_power = dest_v1->size_power; 938 base_reg = le32toh(dest_v1->base_reg); 939 end_reg = le32toh(dest_v1->end_reg); 940 base_shift = dest_v1->base_shift; 941 end_shift = dest_v1->end_shift; 942 943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode)); 944 945 if (mon_mode == EXTERNAL_MODE) { 946 err = iwx_alloc_fw_monitor(sc, size_power); 947 if (err) 948 return err; 949 } 950 951 if (!iwx_nic_lock(sc)) 952 return EBUSY; 953 954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) { 955 uint32_t addr, val; 956 uint8_t op; 957 958 addr = le32toh(dest_v1->reg_ops[i].addr); 959 val = le32toh(dest_v1->reg_ops[i].val); 960 op = dest_v1->reg_ops[i].op; 961 962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val)); 963 switch (op) { 964 case CSR_ASSIGN: 965 IWX_WRITE(sc, addr, val); 966 break; 967 case CSR_SETBIT: 968 IWX_SETBITS(sc, addr, (1 << val)); 969 break; 970 case CSR_CLEARBIT: 971 IWX_CLRBITS(sc, addr, (1 << val)); 972 break; 973 case PRPH_ASSIGN: 974 iwx_write_prph(sc, addr, val); 975 break; 976 case PRPH_SETBIT: 977 err = iwx_set_bits_prph(sc, addr, (1 << val)); 978 if (err) 979 return err; 980 break; 981 case PRPH_CLEARBIT: 982 err = iwx_clear_bits_prph(sc, addr, (1 << val)); 983 if (err) 984 return err; 985 break; 986 case PRPH_BLOCKBIT: 987 if (iwx_read_prph(sc, addr) & (1 << val)) 988 goto monitor; 989 break; 990 default: 991 DPRINTF(("%s: FW debug - unknown OP %d\n", 992 DEVNAME(sc), op)); 993 break; 994 } 995 } 996 997 monitor: 998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) { 999 iwx_write_prph(sc, le32toh(base_reg), 1000 sc->fw_mon.paddr >> base_shift); 1001 iwx_write_prph(sc, end_reg, 1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256) 1003 >> end_shift); 1004 } 1005 1006 iwx_nic_unlock(sc); 1007 return 0; 1008 #else 1009 return 0; 1010 #endif 1011 } 1012 1013 static void 1014 iwx_set_ltr(struct iwx_softc *sc) 1015 { 1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | 1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC << 1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) & 1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) | 1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) & 1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) | 1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ | 1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC << 1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) & 1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) | 1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL); 1027 1028 /* 1029 * To workaround hardware latency issues during the boot process, 1030 * initialize the LTR to ~250 usec (see ltr_val above). 1031 * The firmware initializes this again later (to a smaller value). 1032 */ 1033 if (!sc->sc_integrated) { 1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val); 1035 } else if (sc->sc_integrated && 1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) { 1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR, 1038 IWX_HPM_MAC_LRT_ENABLE_ALL); 1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val); 1040 } 1041 } 1042 1043 int 1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws) 1045 { 1046 struct iwx_context_info *ctxt_info; 1047 struct iwx_context_info_rbd_cfg *rx_cfg; 1048 uint32_t control_flags = 0; 1049 uint64_t paddr; 1050 int err; 1051 1052 ctxt_info = sc->ctxt_info_dma.vaddr; 1053 memset(ctxt_info, 0, sizeof(*ctxt_info)); 1054 1055 ctxt_info->version.version = 0; 1056 ctxt_info->version.mac_id = 1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV)); 1058 /* size is in DWs */ 1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4); 1060 1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF, 1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size")); 1063 1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG | 1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) << 1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) | 1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS); 1068 ctxt_info->control.control_flags = htole32(control_flags); 1069 1070 /* initialize RX default queue */ 1071 rx_cfg = &ctxt_info->rbd_cfg; 1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr); 1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr); 1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr); 1075 1076 /* initialize TX command queue */ 1077 ctxt_info->hcmd_cfg.cmd_queue_addr = 1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr); 1079 ctxt_info->hcmd_cfg.cmd_queue_size = 1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT); 1081 1082 /* allocate ucode sections in dram and set addresses */ 1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram); 1084 if (err) { 1085 iwx_ctxt_info_free_fw_img(sc); 1086 return err; 1087 } 1088 1089 /* Configure debug, if exists */ 1090 if (sc->sc_fw.dbg_dest_tlv_v1) { 1091 #if 1 1092 err = iwx_apply_debug_destination(sc); 1093 if (err) { 1094 iwx_ctxt_info_free_fw_img(sc); 1095 return err; 1096 } 1097 #endif 1098 } 1099 1100 /* 1101 * Write the context info DMA base address. The device expects a 1102 * 64-bit address but a simple bus_space_write_8 to this register 1103 * won't work on some devices, such as the AX201. 1104 */ 1105 paddr = sc->ctxt_info_dma.paddr; 1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff); 1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32); 1108 1109 /* kick FW self load */ 1110 if (!iwx_nic_lock(sc)) { 1111 iwx_ctxt_info_free_fw_img(sc); 1112 return EBUSY; 1113 } 1114 1115 iwx_set_ltr(sc); 1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1); 1117 iwx_nic_unlock(sc); 1118 1119 /* Context info will be released upon alive or failure to get one */ 1120 1121 return 0; 1122 } 1123 1124 static int 1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws) 1126 { 1127 struct iwx_context_info_gen3 *ctxt_info_gen3; 1128 struct iwx_prph_scratch *prph_scratch; 1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl; 1130 uint16_t cb_size; 1131 uint32_t control_flags, scratch_size; 1132 uint64_t paddr; 1133 int err; 1134 1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) { 1136 printf("%s: no image loader found in firmware file\n", 1137 DEVNAME(sc)); 1138 iwx_ctxt_info_free_fw_img(sc); 1139 return EINVAL; 1140 } 1141 1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma, 1143 sc->sc_fw.iml_len, 1); 1144 if (err) { 1145 printf("%s: could not allocate DMA memory for " 1146 "firmware image loader\n", DEVNAME(sc)); 1147 iwx_ctxt_info_free_fw_img(sc); 1148 return ENOMEM; 1149 } 1150 1151 prph_scratch = sc->prph_scratch_dma.vaddr; 1152 memset(prph_scratch, 0, sizeof(*prph_scratch)); 1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg; 1154 prph_sc_ctrl->version.version = 0; 1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV)); 1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4); 1157 1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K | 1159 IWX_PRPH_SCRATCH_MTR_MODE | 1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT); 1161 if (sc->sc_imr_enabled) 1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN; 1163 prph_sc_ctrl->control.control_flags = htole32(control_flags); 1164 1165 /* initialize RX default queue */ 1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr = 1167 htole64(sc->rxq.free_desc_dma.paddr); 1168 1169 /* allocate ucode sections in dram and set addresses */ 1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram); 1171 if (err) { 1172 iwx_dma_contig_free(&sc->iml_dma); 1173 iwx_ctxt_info_free_fw_img(sc); 1174 return err; 1175 } 1176 1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr; 1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3)); 1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr); 1180 ctxt_info_gen3->prph_scratch_base_addr = 1181 htole64(sc->prph_scratch_dma.paddr); 1182 scratch_size = sizeof(*prph_scratch); 1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size); 1184 ctxt_info_gen3->cr_head_idx_arr_base_addr = 1185 htole64(sc->rxq.stat_dma.paddr); 1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr = 1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2); 1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr = 1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4); 1190 ctxt_info_gen3->mtr_base_addr = 1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr); 1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr); 1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT); 1194 ctxt_info_gen3->mtr_size = htole16(cb_size); 1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE); 1196 ctxt_info_gen3->mcr_size = htole16(cb_size); 1197 1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len); 1199 1200 paddr = sc->ctxt_info_dma.paddr; 1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff); 1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32); 1203 1204 paddr = sc->iml_dma.paddr; 1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff); 1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32); 1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len); 1208 1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL, 1210 IWX_CSR_AUTO_FUNC_BOOT_ENA); 1211 1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1213 "%s:%d kicking fw to get going\n", __func__, __LINE__); 1214 1215 /* kick FW self load */ 1216 if (!iwx_nic_lock(sc)) { 1217 iwx_dma_contig_free(&sc->iml_dma); 1218 iwx_ctxt_info_free_fw_img(sc); 1219 return EBUSY; 1220 } 1221 iwx_set_ltr(sc); 1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1); 1223 iwx_nic_unlock(sc); 1224 1225 /* Context info will be released upon alive or failure to get one */ 1226 return 0; 1227 } 1228 1229 static void 1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc) 1231 { 1232 struct iwx_self_init_dram *dram = &sc->init_dram; 1233 int i; 1234 1235 if (!dram->fw) 1236 return; 1237 1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++) 1239 iwx_dma_contig_free(&dram->fw[i]); 1240 1241 free(dram->fw, M_DEVBUF); 1242 dram->lmac_cnt = 0; 1243 dram->umac_cnt = 0; 1244 dram->fw = NULL; 1245 } 1246 1247 static int 1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type, 1249 const uint8_t *data, size_t dlen) 1250 { 1251 struct iwx_fw_sects *fws; 1252 struct iwx_fw_onesect *fwone; 1253 1254 if (type >= IWX_UCODE_TYPE_MAX) 1255 return EINVAL; 1256 if (dlen < sizeof(uint32_t)) 1257 return EINVAL; 1258 1259 fws = &sc->sc_fw.fw_sects[type]; 1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count); 1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX) 1263 return EINVAL; 1264 1265 fwone = &fws->fw_sect[fws->fw_count]; 1266 1267 /* first 32bit are device load offset */ 1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t)); 1269 1270 /* rest is data */ 1271 fwone->fws_data = data + sizeof(uint32_t); 1272 fwone->fws_len = dlen - sizeof(uint32_t); 1273 1274 fws->fw_count++; 1275 fws->fw_totlen += fwone->fws_len; 1276 1277 return 0; 1278 } 1279 1280 #define IWX_DEFAULT_SCAN_CHANNELS 40 1281 /* Newer firmware might support more channels. Raise this value if needed. */ 1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */ 1283 1284 struct iwx_tlv_calib_data { 1285 uint32_t ucode_type; 1286 struct iwx_tlv_calib_ctrl calib; 1287 } __packed; 1288 1289 static int 1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data) 1291 { 1292 const struct iwx_tlv_calib_data *def_calib = data; 1293 uint32_t ucode_type = le32toh(def_calib->ucode_type); 1294 1295 if (ucode_type >= IWX_UCODE_TYPE_MAX) 1296 return EINVAL; 1297 1298 sc->sc_default_calib[ucode_type].flow_trigger = 1299 def_calib->calib.flow_trigger; 1300 sc->sc_default_calib[ucode_type].event_trigger = 1301 def_calib->calib.event_trigger; 1302 1303 return 0; 1304 } 1305 1306 static void 1307 iwx_fw_info_free(struct iwx_fw_info *fw) 1308 { 1309 free(fw->fw_rawdata, M_DEVBUF); 1310 fw->fw_rawdata = NULL; 1311 fw->fw_rawsize = 0; 1312 /* don't touch fw->fw_status */ 1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects)); 1314 free(fw->iml, M_DEVBUF); 1315 fw->iml = NULL; 1316 fw->iml_len = 0; 1317 } 1318 1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000 1320 1321 static int 1322 iwx_read_firmware(struct iwx_softc *sc) 1323 { 1324 struct iwx_fw_info *fw = &sc->sc_fw; 1325 const struct iwx_tlv_ucode_header *uhdr; 1326 struct iwx_ucode_tlv tlv; 1327 uint32_t tlv_type; 1328 const uint8_t *data; 1329 int err = 0; 1330 size_t len; 1331 const struct firmware *fwp; 1332 1333 if (fw->fw_status == IWX_FW_STATUS_DONE) 1334 return 0; 1335 1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS; 1337 fwp = firmware_get(sc->sc_fwname); 1338 sc->sc_fwp = fwp; 1339 1340 if (fwp == NULL) { 1341 printf("%s: could not read firmware %s\n", 1342 DEVNAME(sc), sc->sc_fwname); 1343 err = ENOENT; 1344 goto out; 1345 } 1346 1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n", 1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname); 1349 1350 1351 sc->sc_capaflags = 0; 1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS; 1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa)); 1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api)); 1355 sc->n_cmd_versions = 0; 1356 1357 uhdr = (const void *)(fwp->data); 1358 if (*(const uint32_t *)fwp->data != 0 1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) { 1360 printf("%s: invalid firmware %s\n", 1361 DEVNAME(sc), sc->sc_fwname); 1362 err = EINVAL; 1363 goto out; 1364 } 1365 1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), 1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)), 1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)), 1369 IWX_UCODE_API(le32toh(uhdr->ver))); 1370 1371 data = uhdr->data; 1372 len = fwp->datasize - sizeof(*uhdr); 1373 1374 while (len >= sizeof(tlv)) { 1375 size_t tlv_len; 1376 const void *tlv_data; 1377 1378 memcpy(&tlv, data, sizeof(tlv)); 1379 tlv_len = le32toh(tlv.length); 1380 tlv_type = le32toh(tlv.type); 1381 1382 len -= sizeof(tlv); 1383 data += sizeof(tlv); 1384 tlv_data = data; 1385 1386 if (len < tlv_len) { 1387 printf("%s: firmware too short: %zu bytes\n", 1388 DEVNAME(sc), len); 1389 err = EINVAL; 1390 goto parse_out; 1391 } 1392 1393 switch (tlv_type) { 1394 case IWX_UCODE_TLV_PROBE_MAX_LEN: 1395 if (tlv_len < sizeof(uint32_t)) { 1396 err = EINVAL; 1397 goto parse_out; 1398 } 1399 sc->sc_capa_max_probe_len 1400 = le32toh(*(const uint32_t *)tlv_data); 1401 if (sc->sc_capa_max_probe_len > 1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 1403 err = EINVAL; 1404 goto parse_out; 1405 } 1406 break; 1407 case IWX_UCODE_TLV_PAN: 1408 if (tlv_len) { 1409 err = EINVAL; 1410 goto parse_out; 1411 } 1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN; 1413 break; 1414 case IWX_UCODE_TLV_FLAGS: 1415 if (tlv_len < sizeof(uint32_t)) { 1416 err = EINVAL; 1417 goto parse_out; 1418 } 1419 /* 1420 * Apparently there can be many flags, but Linux driver 1421 * parses only the first one, and so do we. 1422 * 1423 * XXX: why does this override IWX_UCODE_TLV_PAN? 1424 * Intentional or a bug? Observations from 1425 * current firmware file: 1426 * 1) TLV_PAN is parsed first 1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 1428 * ==> this resets TLV_PAN to itself... hnnnk 1429 */ 1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data); 1431 break; 1432 case IWX_UCODE_TLV_CSCHEME: 1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len); 1434 if (err) 1435 goto parse_out; 1436 break; 1437 case IWX_UCODE_TLV_NUM_OF_CPU: { 1438 uint32_t num_cpu; 1439 if (tlv_len != sizeof(uint32_t)) { 1440 err = EINVAL; 1441 goto parse_out; 1442 } 1443 num_cpu = le32toh(*(const uint32_t *)tlv_data); 1444 if (num_cpu < 1 || num_cpu > 2) { 1445 err = EINVAL; 1446 goto parse_out; 1447 } 1448 break; 1449 } 1450 case IWX_UCODE_TLV_SEC_RT: 1451 err = iwx_firmware_store_section(sc, 1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len); 1453 if (err) 1454 goto parse_out; 1455 break; 1456 case IWX_UCODE_TLV_SEC_INIT: 1457 err = iwx_firmware_store_section(sc, 1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len); 1459 if (err) 1460 goto parse_out; 1461 break; 1462 case IWX_UCODE_TLV_SEC_WOWLAN: 1463 err = iwx_firmware_store_section(sc, 1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len); 1465 if (err) 1466 goto parse_out; 1467 break; 1468 case IWX_UCODE_TLV_DEF_CALIB: 1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) { 1470 err = EINVAL; 1471 goto parse_out; 1472 } 1473 err = iwx_set_default_calib(sc, tlv_data); 1474 if (err) 1475 goto parse_out; 1476 break; 1477 case IWX_UCODE_TLV_PHY_SKU: 1478 if (tlv_len != sizeof(uint32_t)) { 1479 err = EINVAL; 1480 goto parse_out; 1481 } 1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data); 1483 break; 1484 1485 case IWX_UCODE_TLV_API_CHANGES_SET: { 1486 const struct iwx_ucode_api *api; 1487 int idx, i; 1488 if (tlv_len != sizeof(*api)) { 1489 err = EINVAL; 1490 goto parse_out; 1491 } 1492 api = (const struct iwx_ucode_api *)tlv_data; 1493 idx = le32toh(api->api_index); 1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) { 1495 err = EINVAL; 1496 goto parse_out; 1497 } 1498 for (i = 0; i < 32; i++) { 1499 if ((le32toh(api->api_flags) & (1 << i)) == 0) 1500 continue; 1501 setbit(sc->sc_ucode_api, i + (32 * idx)); 1502 } 1503 break; 1504 } 1505 1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: { 1507 const struct iwx_ucode_capa *capa; 1508 int idx, i; 1509 if (tlv_len != sizeof(*capa)) { 1510 err = EINVAL; 1511 goto parse_out; 1512 } 1513 capa = (const struct iwx_ucode_capa *)tlv_data; 1514 idx = le32toh(capa->api_index); 1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) { 1516 goto parse_out; 1517 } 1518 for (i = 0; i < 32; i++) { 1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0) 1520 continue; 1521 setbit(sc->sc_enabled_capa, i + (32 * idx)); 1522 } 1523 break; 1524 } 1525 1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR: 1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA: 1528 /* ignore, not used by current driver */ 1529 break; 1530 1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER: 1532 err = iwx_firmware_store_section(sc, 1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data, 1534 tlv_len); 1535 if (err) 1536 goto parse_out; 1537 break; 1538 1539 case IWX_UCODE_TLV_PAGING: 1540 if (tlv_len != sizeof(uint32_t)) { 1541 err = EINVAL; 1542 goto parse_out; 1543 } 1544 break; 1545 1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS: 1547 if (tlv_len != sizeof(uint32_t)) { 1548 err = EINVAL; 1549 goto parse_out; 1550 } 1551 sc->sc_capa_n_scan_channels = 1552 le32toh(*(const uint32_t *)tlv_data); 1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) { 1554 err = ERANGE; 1555 goto parse_out; 1556 } 1557 break; 1558 1559 case IWX_UCODE_TLV_FW_VERSION: 1560 if (tlv_len != sizeof(uint32_t) * 3) { 1561 err = EINVAL; 1562 goto parse_out; 1563 } 1564 1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), 1566 le32toh(((const uint32_t *)tlv_data)[0]), 1567 le32toh(((const uint32_t *)tlv_data)[1]), 1568 le32toh(((const uint32_t *)tlv_data)[2])); 1569 break; 1570 1571 case IWX_UCODE_TLV_FW_DBG_DEST: { 1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; 1573 1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data; 1575 if (*fw->dbg_dest_ver != 0) { 1576 err = EINVAL; 1577 goto parse_out; 1578 } 1579 1580 if (fw->dbg_dest_tlv_init) 1581 break; 1582 fw->dbg_dest_tlv_init = true; 1583 1584 dest_v1 = (const void *)tlv_data; 1585 fw->dbg_dest_tlv_v1 = dest_v1; 1586 fw->n_dest_reg = tlv_len - 1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops); 1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]); 1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1590 "%s: found debug dest; n_dest_reg=%d\n", 1591 __func__, fw->n_dest_reg); 1592 break; 1593 } 1594 1595 case IWX_UCODE_TLV_FW_DBG_CONF: { 1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data; 1597 1598 if (!fw->dbg_dest_tlv_init || 1599 conf->id >= nitems(fw->dbg_conf_tlv) || 1600 fw->dbg_conf_tlv[conf->id] != NULL) 1601 break; 1602 1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1604 "Found debug configuration: %d\n", conf->id); 1605 fw->dbg_conf_tlv[conf->id] = conf; 1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len; 1607 break; 1608 } 1609 1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: { 1611 const struct iwx_umac_debug_addrs *dbg_ptrs = 1612 (const void *)tlv_data; 1613 1614 if (tlv_len != sizeof(*dbg_ptrs)) { 1615 err = EINVAL; 1616 goto parse_out; 1617 } 1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000) 1619 break; 1620 sc->sc_uc.uc_umac_error_event_table = 1621 le32toh(dbg_ptrs->error_info_addr) & 1622 ~IWX_FW_ADDR_CACHE_CONTROL; 1623 sc->sc_uc.error_event_table_tlv_status |= 1624 IWX_ERROR_EVENT_TABLE_UMAC; 1625 break; 1626 } 1627 1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: { 1629 const struct iwx_lmac_debug_addrs *dbg_ptrs = 1630 (const void *)tlv_data; 1631 1632 if (tlv_len != sizeof(*dbg_ptrs)) { 1633 err = EINVAL; 1634 goto parse_out; 1635 } 1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000) 1637 break; 1638 sc->sc_uc.uc_lmac_error_event_table[0] = 1639 le32toh(dbg_ptrs->error_event_table_ptr) & 1640 ~IWX_FW_ADDR_CACHE_CONTROL; 1641 sc->sc_uc.error_event_table_tlv_status |= 1642 IWX_ERROR_EVENT_TABLE_LMAC1; 1643 break; 1644 } 1645 1646 case IWX_UCODE_TLV_FW_MEM_SEG: 1647 break; 1648 1649 case IWX_UCODE_TLV_IML: 1650 if (sc->sc_fw.iml != NULL) { 1651 free(fw->iml, M_DEVBUF); 1652 fw->iml_len = 0; 1653 } 1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF, 1655 M_WAITOK | M_ZERO); 1656 if (sc->sc_fw.iml == NULL) { 1657 err = ENOMEM; 1658 goto parse_out; 1659 } 1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len); 1661 sc->sc_fw.iml_len = tlv_len; 1662 break; 1663 1664 case IWX_UCODE_TLV_CMD_VERSIONS: 1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) { 1666 tlv_len /= sizeof(struct iwx_fw_cmd_version); 1667 tlv_len *= sizeof(struct iwx_fw_cmd_version); 1668 } 1669 if (sc->n_cmd_versions != 0) { 1670 err = EINVAL; 1671 goto parse_out; 1672 } 1673 if (tlv_len > sizeof(sc->cmd_versions)) { 1674 err = EINVAL; 1675 goto parse_out; 1676 } 1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len); 1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version); 1679 break; 1680 1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO: 1682 break; 1683 1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION: 1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION: 1686 case IWX_UCODE_TLV_FW_NUM_STATIONS: 1687 case IWX_UCODE_TLV_FW_NUM_BEACONS: 1688 break; 1689 1690 /* undocumented TLVs found in iwx-cc-a0-46 image */ 1691 case 58: 1692 case 0x1000003: 1693 case 0x1000004: 1694 break; 1695 1696 /* undocumented TLVs found in iwx-cc-a0-48 image */ 1697 case 0x1000000: 1698 case 0x1000002: 1699 break; 1700 1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO: 1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION: 1703 case IWX_UCODE_TLV_TYPE_HCMD: 1704 case IWX_UCODE_TLV_TYPE_REGIONS: 1705 case IWX_UCODE_TLV_TYPE_TRIGGERS: 1706 case IWX_UCODE_TLV_TYPE_CONF_SET: 1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR: 1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR: 1709 case IWX_UCODE_TLV_CURRENT_PC: 1710 break; 1711 1712 /* undocumented TLV found in iwx-cc-a0-67 image */ 1713 case 0x100000b: 1714 break; 1715 1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */ 1717 case 0x101: 1718 break; 1719 1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */ 1721 case 0x100000c: 1722 break; 1723 1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */ 1725 case 69: 1726 break; 1727 1728 default: 1729 err = EINVAL; 1730 goto parse_out; 1731 } 1732 1733 /* 1734 * Check for size_t overflow and ignore missing padding at 1735 * end of firmware file. 1736 */ 1737 if (roundup(tlv_len, 4) > len) 1738 break; 1739 1740 len -= roundup(tlv_len, 4); 1741 data += roundup(tlv_len, 4); 1742 } 1743 1744 KASSERT(err == 0, ("unhandled fw parse error")); 1745 1746 parse_out: 1747 if (err) { 1748 printf("%s: firmware parse error %d, " 1749 "section type %d\n", DEVNAME(sc), err, tlv_type); 1750 } 1751 1752 out: 1753 if (err) { 1754 fw->fw_status = IWX_FW_STATUS_NONE; 1755 if (fw->fw_rawdata != NULL) 1756 iwx_fw_info_free(fw); 1757 } else 1758 fw->fw_status = IWX_FW_STATUS_DONE; 1759 return err; 1760 } 1761 1762 static uint32_t 1763 iwx_prph_addr_mask(struct iwx_softc *sc) 1764 { 1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 1766 return 0x00ffffff; 1767 else 1768 return 0x000fffff; 1769 } 1770 1771 static uint32_t 1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr) 1773 { 1774 uint32_t mask = iwx_prph_addr_mask(sc); 1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24))); 1776 IWX_BARRIER_READ_WRITE(sc); 1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT); 1778 } 1779 1780 uint32_t 1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr) 1782 { 1783 iwx_nic_assert_locked(sc); 1784 return iwx_read_prph_unlocked(sc, addr); 1785 } 1786 1787 static void 1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1789 { 1790 uint32_t mask = iwx_prph_addr_mask(sc); 1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24))); 1792 IWX_BARRIER_WRITE(sc); 1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val); 1794 } 1795 1796 static void 1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1798 { 1799 iwx_nic_assert_locked(sc); 1800 iwx_write_prph_unlocked(sc, addr, val); 1801 } 1802 1803 static uint32_t 1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr) 1805 { 1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset); 1807 } 1808 1809 static void 1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1811 { 1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val); 1813 } 1814 1815 static int 1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords) 1817 { 1818 int offs, err = 0; 1819 uint32_t *vals = buf; 1820 1821 if (iwx_nic_lock(sc)) { 1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr); 1823 for (offs = 0; offs < dwords; offs++) 1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT)); 1825 iwx_nic_unlock(sc); 1826 } else { 1827 err = EBUSY; 1828 } 1829 return err; 1830 } 1831 1832 static int 1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask, 1834 int timo) 1835 { 1836 for (;;) { 1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) { 1838 return 1; 1839 } 1840 if (timo < 10) { 1841 return 0; 1842 } 1843 timo -= 10; 1844 DELAY(10); 1845 } 1846 } 1847 1848 static int 1849 iwx_nic_lock(struct iwx_softc *sc) 1850 { 1851 if (sc->sc_nic_locks > 0) { 1852 iwx_nic_assert_locked(sc); 1853 sc->sc_nic_locks++; 1854 return 1; /* already locked */ 1855 } 1856 1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1859 1860 DELAY(2); 1861 1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY 1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) { 1866 sc->sc_nic_locks++; 1867 return 1; 1868 } 1869 1870 printf("%s: acquiring device failed\n", DEVNAME(sc)); 1871 return 0; 1872 } 1873 1874 static void 1875 iwx_nic_assert_locked(struct iwx_softc *sc) 1876 { 1877 if (sc->sc_nic_locks <= 0) 1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks); 1879 } 1880 1881 static void 1882 iwx_nic_unlock(struct iwx_softc *sc) 1883 { 1884 if (sc->sc_nic_locks > 0) { 1885 if (--sc->sc_nic_locks == 0) 1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1888 } else 1889 printf("%s: NIC already unlocked\n", DEVNAME(sc)); 1890 } 1891 1892 static int 1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits, 1894 uint32_t mask) 1895 { 1896 uint32_t val; 1897 1898 if (iwx_nic_lock(sc)) { 1899 val = iwx_read_prph(sc, reg) & mask; 1900 val |= bits; 1901 iwx_write_prph(sc, reg, val); 1902 iwx_nic_unlock(sc); 1903 return 0; 1904 } 1905 return EBUSY; 1906 } 1907 1908 static int 1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits) 1910 { 1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0); 1912 } 1913 1914 static int 1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits) 1916 { 1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits); 1918 } 1919 1920 static void 1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1922 { 1923 if (error != 0) 1924 return; 1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1926 *(bus_addr_t *)arg = segs[0].ds_addr; 1927 } 1928 1929 static int 1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma, 1931 bus_size_t size, bus_size_t alignment) 1932 { 1933 int error; 1934 1935 dma->tag = NULL; 1936 dma->map = NULL; 1937 dma->size = size; 1938 dma->vaddr = NULL; 1939 1940 error = bus_dma_tag_create(tag, alignment, 1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1942 1, size, 0, NULL, NULL, &dma->tag); 1943 if (error != 0) 1944 goto fail; 1945 1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1948 if (error != 0) 1949 goto fail; 1950 1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1953 if (error != 0) { 1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1955 dma->vaddr = NULL; 1956 goto fail; 1957 } 1958 1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1960 1961 return 0; 1962 1963 fail: 1964 iwx_dma_contig_free(dma); 1965 return error; 1966 } 1967 1968 static void 1969 iwx_dma_contig_free(struct iwx_dma_info *dma) 1970 { 1971 if (dma->vaddr != NULL) { 1972 bus_dmamap_sync(dma->tag, dma->map, 1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1974 bus_dmamap_unload(dma->tag, dma->map); 1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1976 dma->vaddr = NULL; 1977 } 1978 if (dma->tag != NULL) { 1979 bus_dma_tag_destroy(dma->tag); 1980 dma->tag = NULL; 1981 } 1982 } 1983 1984 static int 1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 1986 { 1987 bus_size_t size; 1988 int i, err; 1989 1990 ring->cur = 0; 1991 1992 /* Allocate RX descriptors (256-byte aligned). */ 1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 1994 size = sizeof(struct iwx_rx_transfer_desc); 1995 else 1996 size = sizeof(uint64_t); 1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, 1998 size * IWX_RX_MQ_RING_COUNT, 256); 1999 if (err) { 2000 device_printf(sc->sc_dev, 2001 "could not allocate RX ring DMA memory\n"); 2002 goto fail; 2003 } 2004 ring->desc = ring->free_desc_dma.vaddr; 2005 2006 /* Allocate RX status area (16-byte aligned). */ 2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2008 size = sizeof(uint16_t); 2009 else 2010 size = sizeof(*ring->stat); 2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16); 2012 if (err) { 2013 device_printf(sc->sc_dev, 2014 "could not allocate RX status DMA memory\n"); 2015 goto fail; 2016 } 2017 ring->stat = ring->stat_dma.vaddr; 2018 2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2020 size = sizeof(struct iwx_rx_completion_desc); 2021 else 2022 size = sizeof(uint32_t); 2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, 2024 size * IWX_RX_MQ_RING_COUNT, 256); 2025 if (err) { 2026 device_printf(sc->sc_dev, 2027 "could not allocate RX ring DMA memory\n"); 2028 goto fail; 2029 } 2030 2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, 2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE, 2033 0, NULL, NULL, &ring->data_dmat); 2034 2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) { 2036 struct iwx_rx_data *data = &ring->data[i]; 2037 2038 memset(data, 0, sizeof(*data)); 2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2040 if (err) { 2041 device_printf(sc->sc_dev, 2042 "could not create RX buf DMA map\n"); 2043 goto fail; 2044 } 2045 2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i); 2047 if (err) 2048 goto fail; 2049 } 2050 return 0; 2051 2052 fail: iwx_free_rx_ring(sc, ring); 2053 return err; 2054 } 2055 2056 static void 2057 iwx_disable_rx_dma(struct iwx_softc *sc) 2058 { 2059 int ntries; 2060 2061 if (iwx_nic_lock(sc)) { 2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0); 2064 else 2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0); 2066 for (ntries = 0; ntries < 1000; ntries++) { 2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2068 if (iwx_read_umac_prph(sc, 2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE) 2070 break; 2071 } else { 2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) & 2073 IWX_RXF_DMA_IDLE) 2074 break; 2075 } 2076 DELAY(10); 2077 } 2078 iwx_nic_unlock(sc); 2079 } 2080 } 2081 2082 static void 2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 2084 { 2085 ring->cur = 0; 2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 2087 BUS_DMASYNC_PREWRITE); 2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2089 uint16_t *status = sc->rxq.stat_dma.vaddr; 2090 *status = 0; 2091 } else 2092 memset(ring->stat, 0, sizeof(*ring->stat)); 2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 2094 BUS_DMASYNC_POSTWRITE); 2095 2096 } 2097 2098 static void 2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 2100 { 2101 int i; 2102 2103 iwx_dma_contig_free(&ring->free_desc_dma); 2104 iwx_dma_contig_free(&ring->stat_dma); 2105 iwx_dma_contig_free(&ring->used_desc_dma); 2106 2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) { 2108 struct iwx_rx_data *data = &ring->data[i]; 2109 if (data->m != NULL) { 2110 bus_dmamap_sync(ring->data_dmat, data->map, 2111 BUS_DMASYNC_POSTREAD); 2112 bus_dmamap_unload(ring->data_dmat, data->map); 2113 m_freem(data->m); 2114 data->m = NULL; 2115 } 2116 if (data->map != NULL) { 2117 bus_dmamap_destroy(ring->data_dmat, data->map); 2118 data->map = NULL; 2119 } 2120 } 2121 if (ring->data_dmat != NULL) { 2122 bus_dma_tag_destroy(ring->data_dmat); 2123 ring->data_dmat = NULL; 2124 } 2125 } 2126 2127 static int 2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid) 2129 { 2130 bus_addr_t paddr; 2131 bus_size_t size; 2132 int i, err; 2133 size_t bc_tbl_size; 2134 bus_size_t bc_align; 2135 size_t mapsize; 2136 2137 ring->qid = qid; 2138 ring->queued = 0; 2139 ring->cur = 0; 2140 ring->cur_hw = 0; 2141 ring->tail = 0; 2142 ring->tail_hw = 0; 2143 2144 /* Allocate TX descriptors (256-byte aligned). */ 2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd); 2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 2147 if (err) { 2148 device_printf(sc->sc_dev, 2149 "could not allocate TX ring DMA memory\n"); 2150 goto fail; 2151 } 2152 ring->desc = ring->desc_dma.vaddr; 2153 2154 /* 2155 * The hardware supports up to 512 Tx rings which is more 2156 * than we currently need. 2157 * 2158 * In DQA mode we use 1 command queue + 1 default queue for 2159 * management, control, and non-QoS data frames. 2160 * The command is queue sc->txq[0], our default queue is sc->txq[1]. 2161 * 2162 * Tx aggregation requires additional queues, one queue per TID for 2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9]. 2164 * Firmware may assign its own internal IDs for these queues 2165 * depending on which TID gets aggregation enabled first. 2166 * The driver maintains a table mapping driver-side queue IDs 2167 * to firmware-side queue IDs. 2168 */ 2169 2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) * 2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210; 2173 bc_align = 128; 2174 } else { 2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl); 2176 bc_align = 64; 2177 } 2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size, 2179 bc_align); 2180 if (err) { 2181 device_printf(sc->sc_dev, 2182 "could not allocate byte count table DMA memory\n"); 2183 goto fail; 2184 } 2185 2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd); 2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 2188 IWX_FIRST_TB_SIZE_ALIGN); 2189 if (err) { 2190 device_printf(sc->sc_dev, 2191 "could not allocate cmd DMA memory\n"); 2192 goto fail; 2193 } 2194 ring->cmd = ring->cmd_dma.vaddr; 2195 2196 /* FW commands may require more mapped space than packets. */ 2197 if (qid == IWX_DQA_CMD_QUEUE) 2198 mapsize = (sizeof(struct iwx_cmd_header) + 2199 IWX_MAX_CMD_PAYLOAD_SIZE); 2200 else 2201 mapsize = MCLBYTES; 2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, 2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2, 2204 mapsize, 0, NULL, NULL, &ring->data_dmat); 2205 2206 paddr = ring->cmd_dma.paddr; 2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2208 struct iwx_tx_data *data = &ring->data[i]; 2209 2210 data->cmd_paddr = paddr; 2211 paddr += sizeof(struct iwx_device_cmd); 2212 2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2214 if (err) { 2215 device_printf(sc->sc_dev, 2216 "could not create TX buf DMA map\n"); 2217 goto fail; 2218 } 2219 } 2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc")); 2221 return 0; 2222 2223 fail: 2224 return err; 2225 } 2226 2227 static void 2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring) 2229 { 2230 int i; 2231 2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2233 struct iwx_tx_data *data = &ring->data[i]; 2234 2235 if (data->m != NULL) { 2236 bus_dmamap_sync(ring->data_dmat, data->map, 2237 BUS_DMASYNC_POSTWRITE); 2238 bus_dmamap_unload(ring->data_dmat, data->map); 2239 m_freem(data->m); 2240 data->m = NULL; 2241 } 2242 } 2243 2244 /* Clear byte count table. */ 2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size); 2246 2247 /* Clear TX descriptors. */ 2248 memset(ring->desc, 0, ring->desc_dma.size); 2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2250 BUS_DMASYNC_PREWRITE); 2251 sc->qfullmsk &= ~(1 << ring->qid); 2252 sc->qenablemsk &= ~(1 << ring->qid); 2253 for (i = 0; i < nitems(sc->aggqid); i++) { 2254 if (sc->aggqid[i] == ring->qid) { 2255 sc->aggqid[i] = 0; 2256 break; 2257 } 2258 } 2259 ring->queued = 0; 2260 ring->cur = 0; 2261 ring->cur_hw = 0; 2262 ring->tail = 0; 2263 ring->tail_hw = 0; 2264 ring->tid = 0; 2265 } 2266 2267 static void 2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring) 2269 { 2270 int i; 2271 2272 iwx_dma_contig_free(&ring->desc_dma); 2273 iwx_dma_contig_free(&ring->cmd_dma); 2274 iwx_dma_contig_free(&ring->bc_tbl); 2275 2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2277 struct iwx_tx_data *data = &ring->data[i]; 2278 2279 if (data->m != NULL) { 2280 bus_dmamap_sync(ring->data_dmat, data->map, 2281 BUS_DMASYNC_POSTWRITE); 2282 bus_dmamap_unload(ring->data_dmat, data->map); 2283 m_freem(data->m); 2284 data->m = NULL; 2285 } 2286 if (data->map != NULL) { 2287 bus_dmamap_destroy(ring->data_dmat, data->map); 2288 data->map = NULL; 2289 } 2290 } 2291 if (ring->data_dmat != NULL) { 2292 bus_dma_tag_destroy(ring->data_dmat); 2293 ring->data_dmat = NULL; 2294 } 2295 } 2296 2297 static void 2298 iwx_enable_rfkill_int(struct iwx_softc *sc) 2299 { 2300 if (!sc->sc_msix) { 2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL; 2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2303 } else { 2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2305 sc->sc_fh_init_mask); 2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL); 2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL; 2309 } 2310 2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 2313 } 2314 2315 static int 2316 iwx_check_rfkill(struct iwx_softc *sc) 2317 { 2318 uint32_t v; 2319 int rv; 2320 2321 /* 2322 * "documentation" is not really helpful here: 2323 * 27: HW_RF_KILL_SW 2324 * Indicates state of (platform's) hardware RF-Kill switch 2325 * 2326 * But apparently when it's off, it's on ... 2327 */ 2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL); 2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0; 2330 if (rv) { 2331 sc->sc_flags |= IWX_FLAG_RFKILL; 2332 } else { 2333 sc->sc_flags &= ~IWX_FLAG_RFKILL; 2334 } 2335 2336 return rv; 2337 } 2338 2339 static void 2340 iwx_enable_interrupts(struct iwx_softc *sc) 2341 { 2342 if (!sc->sc_msix) { 2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK; 2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2345 } else { 2346 /* 2347 * fh/hw_mask keeps all the unmasked causes. 2348 * Unlike msi, in msix cause is enabled when it is unset. 2349 */ 2350 sc->sc_hw_mask = sc->sc_hw_init_mask; 2351 sc->sc_fh_mask = sc->sc_fh_init_mask; 2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2353 ~sc->sc_fh_mask); 2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2355 ~sc->sc_hw_mask); 2356 } 2357 } 2358 2359 static void 2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc) 2361 { 2362 if (!sc->sc_msix) { 2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX; 2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2365 } else { 2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE); 2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE; 2369 /* 2370 * Leave all the FH causes enabled to get the ALIVE 2371 * notification. 2372 */ 2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2374 ~sc->sc_fh_init_mask); 2375 sc->sc_fh_mask = sc->sc_fh_init_mask; 2376 } 2377 } 2378 2379 #if 0 2380 static void 2381 iwx_restore_interrupts(struct iwx_softc *sc) 2382 { 2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2384 } 2385 #endif 2386 2387 static void 2388 iwx_disable_interrupts(struct iwx_softc *sc) 2389 { 2390 if (!sc->sc_msix) { 2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 2392 2393 /* acknowledge all interrupts */ 2394 IWX_WRITE(sc, IWX_CSR_INT, ~0); 2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0); 2396 } else { 2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2398 sc->sc_fh_init_mask); 2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2400 sc->sc_hw_init_mask); 2401 } 2402 } 2403 2404 static void 2405 iwx_ict_reset(struct iwx_softc *sc) 2406 { 2407 iwx_disable_interrupts(sc); 2408 2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE); 2410 sc->ict_cur = 0; 2411 2412 /* Set physical address of ICT (4KB aligned). */ 2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG, 2414 IWX_CSR_DRAM_INT_TBL_ENABLE 2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK 2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER 2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT); 2418 2419 /* Switch to ICT interrupt mode in driver. */ 2420 sc->sc_flags |= IWX_FLAG_USE_ICT; 2421 2422 IWX_WRITE(sc, IWX_CSR_INT, ~0); 2423 iwx_enable_interrupts(sc); 2424 } 2425 2426 #define IWX_HW_READY_TIMEOUT 50 2427 static int 2428 iwx_set_hw_ready(struct iwx_softc *sc) 2429 { 2430 int ready; 2431 2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 2434 2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG, 2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2438 IWX_HW_READY_TIMEOUT); 2439 if (ready) 2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG, 2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE); 2442 2443 DPRINTF(("%s: ready=%d\n", __func__, ready)); 2444 return ready; 2445 } 2446 #undef IWX_HW_READY_TIMEOUT 2447 2448 static int 2449 iwx_prepare_card_hw(struct iwx_softc *sc) 2450 { 2451 int t = 0; 2452 int ntries; 2453 2454 if (iwx_set_hw_ready(sc)) 2455 return 0; 2456 2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2459 DELAY(1000); 2460 2461 for (ntries = 0; ntries < 10; ntries++) { 2462 /* If HW is not ready, prepare the conditions to check again */ 2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE); 2465 2466 do { 2467 if (iwx_set_hw_ready(sc)) 2468 return 0; 2469 DELAY(200); 2470 t += 200; 2471 } while (t < 150000); 2472 DELAY(25000); 2473 } 2474 2475 return ETIMEDOUT; 2476 } 2477 2478 static int 2479 iwx_force_power_gating(struct iwx_softc *sc) 2480 { 2481 int err; 2482 2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 2485 if (err) 2486 return err; 2487 DELAY(20); 2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN | 2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN); 2491 if (err) 2492 return err; 2493 DELAY(20); 2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 2496 return err; 2497 } 2498 2499 static void 2500 iwx_apm_config(struct iwx_softc *sc) 2501 { 2502 uint16_t lctl, cap; 2503 int pcie_ptr; 2504 int error; 2505 2506 /* 2507 * L0S states have been found to be unstable with our devices 2508 * and in newer hardware they are not officially supported at 2509 * all, so we must always set the L0S_DISABLED bit. 2510 */ 2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED); 2512 2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr); 2514 if (error != 0) { 2515 printf("can't fill pcie_ptr\n"); 2516 return; 2517 } 2518 2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL, 2520 sizeof(lctl)); 2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001 2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S); 2523 #define PCI_PCIE_DCSR2 0x28 2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2, 2525 sizeof(lctl)); 2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400 2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0; 2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002 2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n", 2530 DEVNAME(sc), 2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis", 2532 sc->sc_ltr_enabled ? "En" : "Dis")); 2533 #undef PCI_PCIE_LCSR_ASPM_L0S 2534 #undef PCI_PCIE_DCSR2 2535 #undef PCI_PCIE_DCSR2_LTREN 2536 #undef PCI_PCIE_LCSR_ASPM_L1 2537 } 2538 2539 /* 2540 * Start up NIC's basic functionality after it has been reset 2541 * e.g. after platform boot or shutdown. 2542 * NOTE: This does not load uCode nor start the embedded processor 2543 */ 2544 static int 2545 iwx_apm_init(struct iwx_softc *sc) 2546 { 2547 int err = 0; 2548 2549 /* 2550 * Disable L0s without affecting L1; 2551 * don't wait for ICH L0s (ICH bug W/A) 2552 */ 2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS, 2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 2555 2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL); 2558 2559 /* 2560 * Enable HAP INTA (interrupt from management bus) to 2561 * wake device's PCI Express link L1a -> L0s 2562 */ 2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 2565 2566 iwx_apm_config(sc); 2567 2568 /* 2569 * Set "initialization complete" bit to move adapter from 2570 * D0U* --> D0A* (powered-up active) state. 2571 */ 2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2573 2574 /* 2575 * Wait for clock stabilization; once stabilized, access to 2576 * device-internal resources is supported, e.g. iwx_write_prph() 2577 * and accesses to uCode SRAM. 2578 */ 2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) { 2582 printf("%s: timeout waiting for clock stabilization\n", 2583 DEVNAME(sc)); 2584 err = ETIMEDOUT; 2585 goto out; 2586 } 2587 out: 2588 if (err) 2589 printf("%s: apm init error %d\n", DEVNAME(sc), err); 2590 return err; 2591 } 2592 2593 static void 2594 iwx_apm_stop(struct iwx_softc *sc) 2595 { 2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE | 2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME); 2601 DELAY(1000); 2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2604 DELAY(5000); 2605 2606 /* stop device's busmaster DMA activity */ 2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER); 2608 2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET, 2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100)) 2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc)); 2613 2614 /* 2615 * Clear "initialization complete" bit to move adapter from 2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 2617 */ 2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2620 } 2621 2622 static void 2623 iwx_init_msix_hw(struct iwx_softc *sc) 2624 { 2625 iwx_conf_msix_hw(sc, 0); 2626 2627 if (!sc->sc_msix) 2628 return; 2629 2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD); 2631 sc->sc_fh_mask = sc->sc_fh_init_mask; 2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD); 2633 sc->sc_hw_mask = sc->sc_hw_init_mask; 2634 } 2635 2636 static void 2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped) 2638 { 2639 int vector = 0; 2640 2641 if (!sc->sc_msix) { 2642 /* Newer chips default to MSIX. */ 2643 if (!stopped && iwx_nic_lock(sc)) { 2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK, 2645 IWX_UREG_CHICK_MSI_ENABLE); 2646 iwx_nic_unlock(sc); 2647 } 2648 return; 2649 } 2650 2651 if (!stopped && iwx_nic_lock(sc)) { 2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK, 2653 IWX_UREG_CHICK_MSIX_ENABLE); 2654 iwx_nic_unlock(sc); 2655 } 2656 2657 /* Disable all interrupts */ 2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0); 2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0); 2660 2661 /* Map fallback-queue (command/mgmt) to a single vector */ 2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0), 2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2664 /* Map RSS queue (data) to the same vector */ 2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1), 2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2667 2668 /* Enable the RX queues cause interrupts */ 2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1); 2671 2672 /* Map non-RX causes to the same vector */ 2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM), 2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM), 2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D), 2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR), 2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE), 2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP), 2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE), 2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL), 2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL), 2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC), 2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR), 2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD), 2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX), 2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR), 2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP), 2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2703 2704 /* Enable non-RX causes interrupts */ 2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | 2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | 2708 IWX_MSIX_FH_INT_CAUSES_S2D | 2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR); 2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE | 2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | 2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE | 2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | 2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL | 2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | 2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR | 2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD | 2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX | 2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | 2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP); 2722 } 2723 2724 static int 2725 iwx_clear_persistence_bit(struct iwx_softc *sc) 2726 { 2727 uint32_t hpm, wprot; 2728 2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG); 2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) { 2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000); 2732 if (wprot & IWX_PREG_WFPM_ACCESS) { 2733 printf("%s: cannot clear persistence bit\n", 2734 DEVNAME(sc)); 2735 return EPERM; 2736 } 2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG, 2738 hpm & ~IWX_PERSISTENCE_BIT); 2739 } 2740 2741 return 0; 2742 } 2743 2744 static int 2745 iwx_start_hw(struct iwx_softc *sc) 2746 { 2747 int err; 2748 2749 err = iwx_prepare_card_hw(sc); 2750 if (err) 2751 return err; 2752 2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) { 2754 err = iwx_clear_persistence_bit(sc); 2755 if (err) 2756 return err; 2757 } 2758 2759 /* Reset the entire device */ 2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2761 DELAY(5000); 2762 2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 && 2764 sc->sc_integrated) { 2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2767 DELAY(20); 2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) { 2771 printf("%s: timeout waiting for clock stabilization\n", 2772 DEVNAME(sc)); 2773 return ETIMEDOUT; 2774 } 2775 2776 err = iwx_force_power_gating(sc); 2777 if (err) 2778 return err; 2779 2780 /* Reset the entire device */ 2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2782 DELAY(5000); 2783 } 2784 2785 err = iwx_apm_init(sc); 2786 if (err) 2787 return err; 2788 2789 iwx_init_msix_hw(sc); 2790 2791 iwx_enable_rfkill_int(sc); 2792 iwx_check_rfkill(sc); 2793 2794 return 0; 2795 } 2796 2797 static void 2798 iwx_stop_device(struct iwx_softc *sc) 2799 { 2800 int i; 2801 2802 iwx_disable_interrupts(sc); 2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT; 2804 2805 iwx_disable_rx_dma(sc); 2806 iwx_reset_rx_ring(sc, &sc->rxq); 2807 for (i = 0; i < nitems(sc->txq); i++) 2808 iwx_reset_tx_ring(sc, &sc->txq[i]); 2809 #if 0 2810 /* XXX-THJ: Tidy up BA state on stop */ 2811 for (i = 0; i < IEEE80211_NUM_TID; i++) { 2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i]; 2813 if (ba->ba_state != IEEE80211_BA_AGREED) 2814 continue; 2815 ieee80211_delba_request(ic, ni, 0, 1, i); 2816 } 2817 #endif 2818 /* Make sure (redundant) we've released our request to stay awake */ 2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2821 if (sc->sc_nic_locks > 0) 2822 printf("%s: %d active NIC locks forcefully cleared\n", 2823 DEVNAME(sc), sc->sc_nic_locks); 2824 sc->sc_nic_locks = 0; 2825 2826 /* Stop the device, and put it in low power state */ 2827 iwx_apm_stop(sc); 2828 2829 /* Reset the on-board processor. */ 2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2831 DELAY(5000); 2832 2833 /* 2834 * Upon stop, the IVAR table gets erased, so msi-x won't 2835 * work. This causes a bug in RF-KILL flows, since the interrupt 2836 * that enables radio won't fire on the correct irq, and the 2837 * driver won't be able to handle the interrupt. 2838 * Configure the IVAR table again after reset. 2839 */ 2840 iwx_conf_msix_hw(sc, 1); 2841 2842 /* 2843 * Upon stop, the APM issues an interrupt if HW RF kill is set. 2844 * Clear the interrupt again. 2845 */ 2846 iwx_disable_interrupts(sc); 2847 2848 /* Even though we stop the HW we still want the RF kill interrupt. */ 2849 iwx_enable_rfkill_int(sc); 2850 iwx_check_rfkill(sc); 2851 2852 iwx_prepare_card_hw(sc); 2853 2854 iwx_ctxt_info_free_paging(sc); 2855 iwx_dma_contig_free(&sc->pnvm_dma); 2856 } 2857 2858 static void 2859 iwx_nic_config(struct iwx_softc *sc) 2860 { 2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 2862 uint32_t mask, val, reg_val = 0; 2863 2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >> 2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS; 2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >> 2867 IWX_FW_PHY_CFG_RADIO_STEP_POS; 2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >> 2869 IWX_FW_PHY_CFG_RADIO_DASH_POS; 2870 2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) << 2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) << 2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 2875 2876 /* radio configuration */ 2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 2880 2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | 2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | 2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | 2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | 2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI; 2888 2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG); 2890 val &= ~mask; 2891 val |= reg_val; 2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val); 2893 } 2894 2895 static int 2896 iwx_nic_rx_init(struct iwx_softc *sc) 2897 { 2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF); 2899 2900 /* 2901 * We don't configure the RFH; the firmware will do that. 2902 * Rx descriptors are set when firmware sends an ALIVE interrupt. 2903 */ 2904 return 0; 2905 } 2906 2907 static int 2908 iwx_nic_init(struct iwx_softc *sc) 2909 { 2910 int err; 2911 2912 iwx_apm_init(sc); 2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210) 2914 iwx_nic_config(sc); 2915 2916 err = iwx_nic_rx_init(sc); 2917 if (err) 2918 return err; 2919 2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 2921 2922 return 0; 2923 } 2924 2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */ 2926 const uint8_t iwx_ac_to_tx_fifo[] = { 2927 IWX_GEN2_EDCA_TX_FIFO_BE, 2928 IWX_GEN2_EDCA_TX_FIFO_BK, 2929 IWX_GEN2_EDCA_TX_FIFO_VI, 2930 IWX_GEN2_EDCA_TX_FIFO_VO, 2931 }; 2932 2933 static int 2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid, 2935 int num_slots) 2936 { 2937 struct iwx_rx_packet *pkt; 2938 struct iwx_tx_queue_cfg_rsp *resp; 2939 struct iwx_tx_queue_cfg_cmd cmd_v0; 2940 struct iwx_scd_queue_cfg_cmd cmd_v3; 2941 struct iwx_host_cmd hcmd = { 2942 .flags = IWX_CMD_WANT_RESP, 2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 2944 }; 2945 struct iwx_tx_ring *ring = &sc->txq[qid]; 2946 int err, fwqid, cmd_ver; 2947 uint32_t wr_idx; 2948 size_t resp_len; 2949 2950 DPRINTF(("%s: tid=%i\n", __func__, tid)); 2951 DPRINTF(("%s: qid=%i\n", __func__, qid)); 2952 iwx_reset_tx_ring(sc, ring); 2953 2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 2955 IWX_SCD_QUEUE_CONFIG_CMD); 2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) { 2957 memset(&cmd_v0, 0, sizeof(cmd_v0)); 2958 cmd_v0.sta_id = sta_id; 2959 cmd_v0.tid = tid; 2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE); 2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots)); 2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr); 2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr); 2964 hcmd.id = IWX_SCD_QUEUE_CFG; 2965 hcmd.data[0] = &cmd_v0; 2966 hcmd.len[0] = sizeof(cmd_v0); 2967 } else if (cmd_ver == 3) { 2968 memset(&cmd_v3, 0, sizeof(cmd_v3)); 2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD); 2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr); 2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr); 2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots)); 2973 cmd_v3.u.add.flags = htole32(0); 2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id); 2975 cmd_v3.u.add.tid = tid; 2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 2977 IWX_SCD_QUEUE_CONFIG_CMD); 2978 hcmd.data[0] = &cmd_v3; 2979 hcmd.len[0] = sizeof(cmd_v3); 2980 } else { 2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n", 2982 DEVNAME(sc), cmd_ver); 2983 return ENOTSUP; 2984 } 2985 2986 err = iwx_send_cmd(sc, &hcmd); 2987 if (err) 2988 return err; 2989 2990 pkt = hcmd.resp_pkt; 2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 2992 err = EIO; 2993 goto out; 2994 } 2995 2996 resp_len = iwx_rx_packet_payload_len(pkt); 2997 if (resp_len != sizeof(*resp)) { 2998 err = EIO; 2999 goto out; 3000 } 3001 3002 resp = (void *)pkt->data; 3003 fwqid = le16toh(resp->queue_number); 3004 wr_idx = le16toh(resp->write_pointer); 3005 3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */ 3007 if (fwqid != qid) { 3008 DPRINTF(("%s: === fwqid != qid\n", __func__)); 3009 err = EIO; 3010 goto out; 3011 } 3012 3013 if (wr_idx != ring->cur_hw) { 3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__)); 3015 err = EIO; 3016 goto out; 3017 } 3018 3019 sc->qenablemsk |= (1 << qid); 3020 ring->tid = tid; 3021 out: 3022 iwx_free_resp(sc, &hcmd); 3023 return err; 3024 } 3025 3026 static int 3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid) 3028 { 3029 struct iwx_rx_packet *pkt; 3030 struct iwx_tx_queue_cfg_rsp *resp; 3031 struct iwx_tx_queue_cfg_cmd cmd_v0; 3032 struct iwx_scd_queue_cfg_cmd cmd_v3; 3033 struct iwx_host_cmd hcmd = { 3034 .flags = IWX_CMD_WANT_RESP, 3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 3036 }; 3037 struct iwx_tx_ring *ring = &sc->txq[qid]; 3038 int err, cmd_ver; 3039 3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 3041 IWX_SCD_QUEUE_CONFIG_CMD); 3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) { 3043 memset(&cmd_v0, 0, sizeof(cmd_v0)); 3044 cmd_v0.sta_id = sta_id; 3045 cmd_v0.tid = tid; 3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */ 3047 cmd_v0.cb_size = htole32(0); 3048 cmd_v0.byte_cnt_addr = htole64(0); 3049 cmd_v0.tfdq_addr = htole64(0); 3050 hcmd.id = IWX_SCD_QUEUE_CFG; 3051 hcmd.data[0] = &cmd_v0; 3052 hcmd.len[0] = sizeof(cmd_v0); 3053 } else if (cmd_ver == 3) { 3054 memset(&cmd_v3, 0, sizeof(cmd_v3)); 3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE); 3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id); 3057 cmd_v3.u.remove.tid = tid; 3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 3059 IWX_SCD_QUEUE_CONFIG_CMD); 3060 hcmd.data[0] = &cmd_v3; 3061 hcmd.len[0] = sizeof(cmd_v3); 3062 } else { 3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n", 3064 DEVNAME(sc), cmd_ver); 3065 return ENOTSUP; 3066 } 3067 3068 err = iwx_send_cmd(sc, &hcmd); 3069 if (err) 3070 return err; 3071 3072 pkt = hcmd.resp_pkt; 3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 3074 err = EIO; 3075 goto out; 3076 } 3077 3078 sc->qenablemsk &= ~(1 << qid); 3079 iwx_reset_tx_ring(sc, ring); 3080 out: 3081 iwx_free_resp(sc, &hcmd); 3082 return err; 3083 } 3084 3085 static void 3086 iwx_post_alive(struct iwx_softc *sc) 3087 { 3088 int txcmd_ver; 3089 3090 iwx_ict_reset(sc); 3091 3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ; 3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6) 3094 sc->sc_rate_n_flags_version = 2; 3095 else 3096 sc->sc_rate_n_flags_version = 1; 3097 3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD); 3099 } 3100 3101 static int 3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in, 3103 uint32_t duration_tu) 3104 { 3105 3106 struct iwx_session_prot_cmd cmd = { 3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 3108 in->in_color)), 3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD), 3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC), 3111 .duration_tu = htole32(duration_tu), 3112 }; 3113 uint32_t cmd_id; 3114 int err; 3115 3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0); 3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 3118 if (!err) 3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE; 3120 return err; 3121 } 3122 3123 static void 3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in) 3125 { 3126 struct iwx_session_prot_cmd cmd = { 3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 3128 in->in_color)), 3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE), 3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC), 3131 .duration_tu = 0, 3132 }; 3133 uint32_t cmd_id; 3134 3135 /* Do nothing if the time event has already ended. */ 3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0) 3137 return; 3138 3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0); 3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0) 3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 3142 } 3143 3144 /* 3145 * NVM read access and content parsing. We do not support 3146 * external NVM or writing NVM. 3147 */ 3148 3149 static uint8_t 3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc) 3151 { 3152 uint8_t tx_ant; 3153 3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN) 3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS); 3156 3157 if (sc->sc_nvm.valid_tx_ant) 3158 tx_ant &= sc->sc_nvm.valid_tx_ant; 3159 3160 return tx_ant; 3161 } 3162 3163 static uint8_t 3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc) 3165 { 3166 uint8_t rx_ant; 3167 3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN) 3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS); 3170 3171 if (sc->sc_nvm.valid_rx_ant) 3172 rx_ant &= sc->sc_nvm.valid_rx_ant; 3173 3174 return rx_ant; 3175 } 3176 3177 static void 3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 3179 struct ieee80211_channel chans[]) 3180 { 3181 struct iwx_softc *sc = ic->ic_softc; 3182 struct iwx_nvm_data *data = &sc->sc_nvm; 3183 uint8_t bands[IEEE80211_MODE_BYTES]; 3184 const uint8_t *nvm_channels; 3185 uint32_t ch_flags; 3186 int ch_idx, nchan; 3187 3188 if (sc->sc_uhb_supported) { 3189 nchan = nitems(iwx_nvm_channels_uhb); 3190 nvm_channels = iwx_nvm_channels_uhb; 3191 } else { 3192 nchan = nitems(iwx_nvm_channels_8000); 3193 nvm_channels = iwx_nvm_channels_8000; 3194 } 3195 3196 /* 2.4Ghz; 1-13: 11b/g channels. */ 3197 if (!data->sku_cap_band_24GHz_enable) 3198 goto band_5; 3199 3200 memset(bands, 0, sizeof(bands)); 3201 setbit(bands, IEEE80211_MODE_11B); 3202 setbit(bands, IEEE80211_MODE_11G); 3203 setbit(bands, IEEE80211_MODE_11NG); 3204 for (ch_idx = 0; 3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan; 3206 ch_idx++) { 3207 3208 uint32_t nflags = 0; 3209 int cflags = 0; 3210 3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) { 3212 ch_flags = le32_to_cpup( 3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx); 3214 } else { 3215 ch_flags = le16_to_cpup( 3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx); 3217 } 3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0) 3219 continue; 3220 3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0) 3222 cflags |= NET80211_CBW_FLAG_HT40; 3223 3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */ 3225 3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans, 3227 nvm_channels[ch_idx], 3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B), 3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22, 3230 nflags, bands, cflags); 3231 } 3232 3233 band_5: 3234 /* 5Ghz */ 3235 if (!data->sku_cap_band_52GHz_enable) 3236 goto band_6; 3237 3238 3239 memset(bands, 0, sizeof(bands)); 3240 setbit(bands, IEEE80211_MODE_11A); 3241 setbit(bands, IEEE80211_MODE_11NA); 3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ); 3243 3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS; 3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan; 3246 ch_idx++) { 3247 uint32_t nflags = 0; 3248 int cflags = 0; 3249 3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) 3251 ch_flags = le32_to_cpup( 3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx); 3253 else 3254 ch_flags = le16_to_cpup( 3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx); 3256 3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0) 3258 continue; 3259 3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0) 3261 cflags |= NET80211_CBW_FLAG_HT40; 3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0) 3263 cflags |= NET80211_CBW_FLAG_VHT80; 3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0) 3265 cflags |= NET80211_CBW_FLAG_VHT160; 3266 3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */ 3268 3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans, 3270 nvm_channels[ch_idx], 3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A), 3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22, 3273 nflags, bands, cflags); 3274 } 3275 band_6: 3276 /* 6GHz one day ... */ 3277 return; 3278 } 3279 3280 static int 3281 iwx_mimo_enabled(struct iwx_softc *sc) 3282 { 3283 3284 return !sc->sc_nvm.sku_cap_mimo_disable; 3285 } 3286 3287 static void 3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf, 3289 uint16_t ssn, uint16_t buf_size) 3290 { 3291 reorder_buf->head_sn = ssn; 3292 reorder_buf->num_stored = 0; 3293 reorder_buf->buf_size = buf_size; 3294 reorder_buf->last_amsdu = 0; 3295 reorder_buf->last_sub_index = 0; 3296 reorder_buf->removed = 0; 3297 reorder_buf->valid = 0; 3298 reorder_buf->consec_oldsn_drops = 0; 3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0; 3300 reorder_buf->consec_oldsn_prev_drop = 0; 3301 } 3302 3303 static void 3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba) 3305 { 3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf; 3307 3308 reorder_buf->removed = 1; 3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID; 3310 } 3311 3312 #define IWX_MAX_RX_BA_SESSIONS 16 3313 3314 static struct iwx_rxba_data * 3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid) 3316 { 3317 int i; 3318 3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 3320 if (sc->sc_rxba_data[i].baid == 3321 IWX_RX_REORDER_DATA_INVALID_BAID) 3322 continue; 3323 if (sc->sc_rxba_data[i].tid == tid) 3324 return &sc->sc_rxba_data[i]; 3325 } 3326 3327 return NULL; 3328 } 3329 3330 static int 3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni, 3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start, 3333 uint8_t *baid) 3334 { 3335 struct iwx_rx_baid_cfg_cmd cmd; 3336 uint32_t new_baid = 0; 3337 int err; 3338 3339 IWX_ASSERT_LOCKED(sc); 3340 3341 memset(&cmd, 0, sizeof(cmd)); 3342 3343 if (start) { 3344 cmd.action = IWX_RX_BAID_ACTION_ADD; 3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID); 3346 cmd.alloc.tid = tid; 3347 cmd.alloc.ssn = htole16(ssn); 3348 cmd.alloc.win_size = htole16(winsize); 3349 } else { 3350 struct iwx_rxba_data *rxba; 3351 3352 rxba = iwx_find_rxba_data(sc, tid); 3353 if (rxba == NULL) 3354 return ENOENT; 3355 *baid = rxba->baid; 3356 3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE; 3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) { 3360 cmd.remove_v1.baid = rxba->baid; 3361 } else { 3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID); 3363 cmd.remove.tid = tid; 3364 } 3365 } 3366 3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid); 3369 if (err) 3370 return err; 3371 3372 if (start) { 3373 if (new_baid >= nitems(sc->sc_rxba_data)) 3374 return ERANGE; 3375 *baid = new_baid; 3376 } 3377 3378 return 0; 3379 } 3380 3381 static void 3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid, 3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start) 3384 { 3385 int err; 3386 struct iwx_rxba_data *rxba = NULL; 3387 uint8_t baid = 0; 3388 3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) { 3390 return; 3391 } 3392 3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) { 3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize, 3395 timeout_val, start, &baid); 3396 } else { 3397 panic("sta_rx_agg unsupported hw"); 3398 } 3399 if (err) { 3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err)); 3401 return; 3402 } else 3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__)); 3404 3405 rxba = &sc->sc_rxba_data[baid]; 3406 3407 /* Deaggregation is done in hardware. */ 3408 if (start) { 3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) { 3410 return; 3411 } 3412 rxba->sta_id = IWX_STATION_ID; 3413 rxba->tid = tid; 3414 rxba->baid = baid; 3415 rxba->timeout = timeout_val; 3416 getmicrouptime(&rxba->last_rx); 3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn, 3418 winsize); 3419 if (timeout_val != 0) { 3420 DPRINTF(("%s: timeout_val != 0\n", __func__)); 3421 return; 3422 } 3423 } else 3424 iwx_clear_reorder_buffer(sc, rxba); 3425 3426 if (start) { 3427 sc->sc_rx_ba_sessions++; 3428 } else if (sc->sc_rx_ba_sessions > 0) 3429 sc->sc_rx_ba_sessions--; 3430 } 3431 3432 static void 3433 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni, 3434 uint8_t tid) 3435 { 3436 int err, qid; 3437 3438 qid = sc->aggqid[tid]; 3439 if (qid == 0) { 3440 /* Firmware should pick the next unused Tx queue. */ 3441 qid = fls(sc->qenablemsk); 3442 } 3443 3444 DPRINTF(("%s: qid=%i\n", __func__, qid)); 3445 3446 /* 3447 * Simply enable the queue. 3448 * Firmware handles Tx Ba session setup and teardown. 3449 */ 3450 if ((sc->qenablemsk & (1 << qid)) == 0) { 3451 if (!iwx_nic_lock(sc)) { 3452 return; 3453 } 3454 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid, 3455 IWX_TX_RING_COUNT); 3456 iwx_nic_unlock(sc); 3457 if (err) { 3458 printf("%s: could not enable Tx queue %d " 3459 "(error %d)\n", DEVNAME(sc), qid, err); 3460 return; 3461 } 3462 } 3463 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING; 3464 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid)); 3465 sc->aggqid[tid] = qid; 3466 } 3467 3468 static void 3469 iwx_ba_rx_task(void *arg, int npending __unused) 3470 { 3471 struct iwx_softc *sc = arg; 3472 struct ieee80211com *ic = &sc->sc_ic; 3473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3474 struct ieee80211_node *ni = vap->iv_bss; 3475 int tid; 3476 3477 IWX_LOCK(sc); 3478 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) { 3479 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 3480 break; 3481 if (sc->ba_rx.start_tidmask & (1 << tid)) { 3482 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid]; 3483 DPRINTF(("%s: ba->ba_flags=%x\n", __func__, 3484 ba->ba_flags)); 3485 if (ba->ba_flags == IWX_BA_DONE) { 3486 DPRINTF(("%s: ampdu for tid %i already added\n", 3487 __func__, tid)); 3488 break; 3489 } 3490 3491 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__, 3492 tid)); 3493 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart, 3494 ba->ba_winsize, ba->ba_timeout_val, 1); 3495 sc->ba_rx.start_tidmask &= ~(1 << tid); 3496 ba->ba_flags = IWX_BA_DONE; 3497 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) { 3498 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0); 3499 sc->ba_rx.stop_tidmask &= ~(1 << tid); 3500 } 3501 } 3502 IWX_UNLOCK(sc); 3503 } 3504 3505 static void 3506 iwx_ba_tx_task(void *arg, int npending __unused) 3507 { 3508 struct iwx_softc *sc = arg; 3509 struct ieee80211com *ic = &sc->sc_ic; 3510 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3511 struct ieee80211_node *ni = vap->iv_bss; 3512 int tid; 3513 3514 IWX_LOCK(sc); 3515 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) { 3516 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 3517 break; 3518 if (sc->ba_tx.start_tidmask & (1 << tid)) { 3519 DPRINTF(("%s: ampdu tx start for tid %i\n", __func__, 3520 tid)); 3521 iwx_sta_tx_agg_start(sc, ni, tid); 3522 sc->ba_tx.start_tidmask &= ~(1 << tid); 3523 sc->sc_flags |= IWX_FLAG_AMPDUTX; 3524 } 3525 } 3526 3527 IWX_UNLOCK(sc); 3528 } 3529 3530 static void 3531 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data) 3532 { 3533 uint32_t mac_addr0, mac_addr1; 3534 3535 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 3536 3537 if (!iwx_nic_lock(sc)) 3538 return; 3539 3540 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc))); 3541 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc))); 3542 3543 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 3544 3545 /* If OEM fused a valid address, use it instead of the one in OTP. */ 3546 if (iwx_is_valid_mac_addr(data->hw_addr)) { 3547 iwx_nic_unlock(sc); 3548 return; 3549 } 3550 3551 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc))); 3552 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc))); 3553 3554 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 3555 3556 iwx_nic_unlock(sc); 3557 } 3558 3559 static int 3560 iwx_is_valid_mac_addr(const uint8_t *addr) 3561 { 3562 static const uint8_t reserved_mac[] = { 3563 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 3564 }; 3565 3566 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 && 3567 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 && 3568 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 && 3569 !ETHER_IS_MULTICAST(addr)); 3570 } 3571 3572 static void 3573 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest) 3574 { 3575 const uint8_t *hw_addr; 3576 3577 hw_addr = (const uint8_t *)&mac_addr0; 3578 dest[0] = hw_addr[3]; 3579 dest[1] = hw_addr[2]; 3580 dest[2] = hw_addr[1]; 3581 dest[3] = hw_addr[0]; 3582 3583 hw_addr = (const uint8_t *)&mac_addr1; 3584 dest[4] = hw_addr[1]; 3585 dest[5] = hw_addr[0]; 3586 } 3587 3588 static int 3589 iwx_nvm_get(struct iwx_softc *sc) 3590 { 3591 struct iwx_nvm_get_info cmd = {}; 3592 struct iwx_nvm_data *nvm = &sc->sc_nvm; 3593 struct iwx_host_cmd hcmd = { 3594 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL, 3595 .data = { &cmd, }, 3596 .len = { sizeof(cmd) }, 3597 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 3598 IWX_NVM_GET_INFO) 3599 }; 3600 int err = 0; 3601 uint32_t mac_flags; 3602 /* 3603 * All the values in iwx_nvm_get_info_rsp v4 are the same as 3604 * in v3, except for the channel profile part of the 3605 * regulatory. So we can just access the new struct, with the 3606 * exception of the latter. 3607 */ 3608 struct iwx_nvm_get_info_rsp *rsp; 3609 struct iwx_nvm_get_info_rsp_v3 *rsp_v3; 3610 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO); 3611 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); 3612 3613 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len; 3614 err = iwx_send_cmd(sc, &hcmd); 3615 if (err) { 3616 printf("%s: failed to send cmd (error %d)", __func__, err); 3617 return err; 3618 } 3619 3620 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) { 3621 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__, 3622 iwx_rx_packet_payload_len(hcmd.resp_pkt)); 3623 printf("%s: resp_len=%zu\n", __func__, resp_len); 3624 err = EIO; 3625 goto out; 3626 } 3627 3628 memset(nvm, 0, sizeof(*nvm)); 3629 3630 iwx_set_mac_addr_from_csr(sc, nvm); 3631 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) { 3632 printf("%s: no valid mac address was found\n", DEVNAME(sc)); 3633 err = EINVAL; 3634 goto out; 3635 } 3636 3637 rsp = (void *)hcmd.resp_pkt->data; 3638 3639 /* Initialize general data */ 3640 nvm->nvm_version = le16toh(rsp->general.nvm_version); 3641 nvm->n_hw_addrs = rsp->general.n_hw_addrs; 3642 3643 /* Initialize MAC sku data */ 3644 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags); 3645 nvm->sku_cap_11ac_enable = 3646 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 3647 nvm->sku_cap_11n_enable = 3648 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 3649 nvm->sku_cap_11ax_enable = 3650 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); 3651 nvm->sku_cap_band_24GHz_enable = 3652 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 3653 nvm->sku_cap_band_52GHz_enable = 3654 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); 3655 nvm->sku_cap_mimo_disable = 3656 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED); 3657 3658 /* Initialize PHY sku data */ 3659 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains); 3660 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains); 3661 3662 if (le32toh(rsp->regulatory.lar_enabled) && 3663 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) { 3664 nvm->lar_enabled = 1; 3665 } 3666 3667 memcpy(&sc->sc_rsp_info, rsp, resp_len); 3668 if (v4) { 3669 sc->sc_rsp_vers = IWX_FBSD_RSP_V4; 3670 } else { 3671 sc->sc_rsp_vers = IWX_FBSD_RSP_V3; 3672 } 3673 out: 3674 iwx_free_resp(sc, &hcmd); 3675 return err; 3676 } 3677 3678 static int 3679 iwx_load_firmware(struct iwx_softc *sc) 3680 { 3681 struct iwx_fw_sects *fws; 3682 int err; 3683 3684 IWX_ASSERT_LOCKED(sc) 3685 3686 sc->sc_uc.uc_intr = 0; 3687 sc->sc_uc.uc_ok = 0; 3688 3689 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR]; 3690 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 3691 err = iwx_ctxt_info_gen3_init(sc, fws); 3692 else 3693 err = iwx_ctxt_info_init(sc, fws); 3694 if (err) { 3695 printf("%s: could not init context info\n", DEVNAME(sc)); 3696 return err; 3697 } 3698 3699 /* wait for the firmware to load */ 3700 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz); 3701 if (err || !sc->sc_uc.uc_ok) { 3702 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err); 3703 iwx_ctxt_info_free_paging(sc); 3704 } 3705 3706 iwx_dma_contig_free(&sc->iml_dma); 3707 iwx_ctxt_info_free_fw_img(sc); 3708 3709 if (!sc->sc_uc.uc_ok) 3710 return EINVAL; 3711 3712 return err; 3713 } 3714 3715 static int 3716 iwx_start_fw(struct iwx_softc *sc) 3717 { 3718 int err; 3719 3720 IWX_WRITE(sc, IWX_CSR_INT, ~0); 3721 3722 iwx_disable_interrupts(sc); 3723 3724 /* make sure rfkill handshake bits are cleared */ 3725 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL); 3726 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, 3727 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3728 3729 /* clear (again), then enable firmware load interrupt */ 3730 IWX_WRITE(sc, IWX_CSR_INT, ~0); 3731 3732 err = iwx_nic_init(sc); 3733 if (err) { 3734 printf("%s: unable to init nic\n", DEVNAME(sc)); 3735 return err; 3736 } 3737 3738 iwx_enable_fwload_interrupt(sc); 3739 3740 return iwx_load_firmware(sc); 3741 } 3742 3743 static int 3744 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data, 3745 size_t len) 3746 { 3747 const struct iwx_ucode_tlv *tlv; 3748 uint32_t sha1 = 0; 3749 uint16_t mac_type = 0, rf_id = 0; 3750 uint8_t *pnvm_data = NULL, *tmp; 3751 int hw_match = 0; 3752 uint32_t size = 0; 3753 int err; 3754 3755 while (len >= sizeof(*tlv)) { 3756 uint32_t tlv_len, tlv_type; 3757 3758 len -= sizeof(*tlv); 3759 tlv = (const void *)data; 3760 3761 tlv_len = le32toh(tlv->length); 3762 tlv_type = le32toh(tlv->type); 3763 3764 if (len < tlv_len) { 3765 printf("%s: invalid TLV len: %zd/%u\n", 3766 DEVNAME(sc), len, tlv_len); 3767 err = EINVAL; 3768 goto out; 3769 } 3770 3771 data += sizeof(*tlv); 3772 3773 switch (tlv_type) { 3774 case IWX_UCODE_TLV_PNVM_VERSION: 3775 if (tlv_len < sizeof(uint32_t)) 3776 break; 3777 3778 sha1 = le32_to_cpup((const uint32_t *)data); 3779 break; 3780 case IWX_UCODE_TLV_HW_TYPE: 3781 if (tlv_len < 2 * sizeof(uint16_t)) 3782 break; 3783 3784 if (hw_match) 3785 break; 3786 3787 mac_type = le16_to_cpup((const uint16_t *)data); 3788 rf_id = le16_to_cpup((const uint16_t *)(data + 3789 sizeof(uint16_t))); 3790 3791 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) && 3792 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id)) 3793 hw_match = 1; 3794 break; 3795 case IWX_UCODE_TLV_SEC_RT: { 3796 const struct iwx_pnvm_section *section; 3797 uint32_t data_len; 3798 3799 section = (const void *)data; 3800 data_len = tlv_len - sizeof(*section); 3801 3802 /* TODO: remove, this is a deprecated separator */ 3803 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee) 3804 break; 3805 3806 tmp = malloc(size + data_len, M_DEVBUF, 3807 M_WAITOK | M_ZERO); 3808 if (tmp == NULL) { 3809 err = ENOMEM; 3810 goto out; 3811 } 3812 // XXX:misha pnvm_data is NULL and size is 0 at first pass 3813 memcpy(tmp, pnvm_data, size); 3814 memcpy(tmp + size, section->data, data_len); 3815 free(pnvm_data, M_DEVBUF); 3816 pnvm_data = tmp; 3817 size += data_len; 3818 break; 3819 } 3820 case IWX_UCODE_TLV_PNVM_SKU: 3821 /* New PNVM section started, stop parsing. */ 3822 goto done; 3823 default: 3824 break; 3825 } 3826 3827 if (roundup(tlv_len, 4) > len) 3828 break; 3829 len -= roundup(tlv_len, 4); 3830 data += roundup(tlv_len, 4); 3831 } 3832 done: 3833 if (!hw_match || size == 0) { 3834 err = ENOENT; 3835 goto out; 3836 } 3837 3838 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1); 3839 if (err) { 3840 printf("%s: could not allocate DMA memory for PNVM\n", 3841 DEVNAME(sc)); 3842 err = ENOMEM; 3843 goto out; 3844 } 3845 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size); 3846 iwx_ctxt_info_gen3_set_pnvm(sc); 3847 sc->sc_pnvm_ver = sha1; 3848 out: 3849 free(pnvm_data, M_DEVBUF); 3850 return err; 3851 } 3852 3853 static int 3854 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len) 3855 { 3856 const struct iwx_ucode_tlv *tlv; 3857 3858 while (len >= sizeof(*tlv)) { 3859 uint32_t tlv_len, tlv_type; 3860 3861 len -= sizeof(*tlv); 3862 tlv = (const void *)data; 3863 3864 tlv_len = le32toh(tlv->length); 3865 tlv_type = le32toh(tlv->type); 3866 3867 if (len < tlv_len || roundup(tlv_len, 4) > len) 3868 return EINVAL; 3869 3870 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) { 3871 const struct iwx_sku_id *sku_id = 3872 (const void *)(data + sizeof(*tlv)); 3873 3874 data += sizeof(*tlv) + roundup(tlv_len, 4); 3875 len -= roundup(tlv_len, 4); 3876 3877 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) && 3878 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) && 3879 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) && 3880 iwx_pnvm_handle_section(sc, data, len) == 0) 3881 return 0; 3882 } else { 3883 data += sizeof(*tlv) + roundup(tlv_len, 4); 3884 len -= roundup(tlv_len, 4); 3885 } 3886 } 3887 3888 return ENOENT; 3889 } 3890 3891 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */ 3892 static void 3893 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc) 3894 { 3895 struct iwx_prph_scratch *prph_scratch; 3896 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl; 3897 3898 prph_scratch = sc->prph_scratch_dma.vaddr; 3899 prph_sc_ctrl = &prph_scratch->ctrl_cfg; 3900 3901 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr); 3902 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size); 3903 3904 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE); 3905 } 3906 3907 /* 3908 * Load platform-NVM (non-volatile-memory) data from the filesystem. 3909 * This data apparently contains regulatory information and affects device 3910 * channel configuration. 3911 * The SKU of AX210 devices tells us which PNVM file section is needed. 3912 * Pre-AX210 devices store NVM data onboard. 3913 */ 3914 static int 3915 iwx_load_pnvm(struct iwx_softc *sc) 3916 { 3917 const int wait_flags = IWX_PNVM_COMPLETE; 3918 int err = 0; 3919 const struct firmware *pnvm; 3920 3921 if (sc->sc_sku_id[0] == 0 && 3922 sc->sc_sku_id[1] == 0 && 3923 sc->sc_sku_id[2] == 0) 3924 return 0; 3925 3926 if (sc->sc_pnvm_name) { 3927 if (sc->pnvm_dma.vaddr == NULL) { 3928 IWX_UNLOCK(sc); 3929 pnvm = firmware_get(sc->sc_pnvm_name); 3930 if (pnvm == NULL) { 3931 printf("%s: could not read %s (error %d)\n", 3932 DEVNAME(sc), sc->sc_pnvm_name, err); 3933 IWX_LOCK(sc); 3934 return EINVAL; 3935 } 3936 sc->sc_pnvm = pnvm; 3937 3938 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize); 3939 IWX_LOCK(sc); 3940 if (err && err != ENOENT) { 3941 return EINVAL; 3942 } 3943 } else 3944 iwx_ctxt_info_gen3_set_pnvm(sc); 3945 } 3946 3947 if (!iwx_nic_lock(sc)) { 3948 return EBUSY; 3949 } 3950 3951 /* 3952 * If we don't have a platform NVM file simply ask firmware 3953 * to proceed without it. 3954 */ 3955 3956 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6, 3957 IWX_UREG_DOORBELL_TO_ISR6_PNVM); 3958 3959 /* Wait for the pnvm complete notification from firmware. */ 3960 while ((sc->sc_init_complete & wait_flags) != wait_flags) { 3961 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz); 3962 if (err) 3963 break; 3964 } 3965 3966 iwx_nic_unlock(sc); 3967 3968 return err; 3969 } 3970 3971 static int 3972 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant) 3973 { 3974 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = { 3975 .valid = htole32(valid_tx_ant), 3976 }; 3977 3978 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD, 3979 0, sizeof(tx_ant_cmd), &tx_ant_cmd); 3980 } 3981 3982 static int 3983 iwx_send_phy_cfg_cmd(struct iwx_softc *sc) 3984 { 3985 struct iwx_phy_cfg_cmd phy_cfg_cmd; 3986 3987 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config); 3988 phy_cfg_cmd.calib_control.event_trigger = 3989 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger; 3990 phy_cfg_cmd.calib_control.flow_trigger = 3991 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger; 3992 3993 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0, 3994 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 3995 } 3996 3997 static int 3998 iwx_send_dqa_cmd(struct iwx_softc *sc) 3999 { 4000 struct iwx_dqa_enable_cmd dqa_cmd = { 4001 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE), 4002 }; 4003 uint32_t cmd_id; 4004 4005 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0); 4006 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); 4007 } 4008 4009 static int 4010 iwx_load_ucode_wait_alive(struct iwx_softc *sc) 4011 { 4012 int err; 4013 4014 IWX_UNLOCK(sc); 4015 err = iwx_read_firmware(sc); 4016 IWX_LOCK(sc); 4017 if (err) 4018 return err; 4019 4020 err = iwx_start_fw(sc); 4021 if (err) 4022 return err; 4023 4024 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4025 err = iwx_load_pnvm(sc); 4026 if (err) 4027 return err; 4028 } 4029 4030 iwx_post_alive(sc); 4031 4032 return 0; 4033 } 4034 4035 static int 4036 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm) 4037 { 4038 const int wait_flags = IWX_INIT_COMPLETE; 4039 struct iwx_nvm_access_complete_cmd nvm_complete = {}; 4040 struct iwx_init_extended_cfg_cmd init_cfg = { 4041 .init_flags = htole32(IWX_INIT_NVM), 4042 }; 4043 4044 int err; 4045 4046 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) { 4047 printf("%s: radio is disabled by hardware switch\n", 4048 DEVNAME(sc)); 4049 return EPERM; 4050 } 4051 4052 sc->sc_init_complete = 0; 4053 err = iwx_load_ucode_wait_alive(sc); 4054 if (err) { 4055 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 4056 "%s: failed to load init firmware\n", DEVNAME(sc)); 4057 return err; 4058 } else { 4059 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 4060 "%s: successfully loaded init firmware\n", __func__); 4061 } 4062 4063 /* 4064 * Send init config command to mark that we are sending NVM 4065 * access commands 4066 */ 4067 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP, 4068 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg); 4069 if (err) { 4070 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__, 4071 err); 4072 return err; 4073 } 4074 4075 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 4076 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete); 4077 if (err) { 4078 return err; 4079 } 4080 4081 /* Wait for the init complete notification from the firmware. */ 4082 while ((sc->sc_init_complete & wait_flags) != wait_flags) { 4083 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz); 4084 if (err) { 4085 DPRINTF(("%s: will return err=%d\n", __func__, err)); 4086 return err; 4087 } else { 4088 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n", 4089 __func__)); 4090 } 4091 } 4092 4093 if (readnvm) { 4094 err = iwx_nvm_get(sc); 4095 DPRINTF(("%s: err=%d\n", __func__, err)); 4096 if (err) { 4097 printf("%s: failed to read nvm (error %d)\n", 4098 DEVNAME(sc), err); 4099 return err; 4100 } else { 4101 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc))); 4102 } 4103 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr); 4104 } 4105 return 0; 4106 } 4107 4108 static int 4109 iwx_config_ltr(struct iwx_softc *sc) 4110 { 4111 struct iwx_ltr_config_cmd cmd = { 4112 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE), 4113 }; 4114 4115 if (!sc->sc_ltr_enabled) 4116 return 0; 4117 4118 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd); 4119 } 4120 4121 static void 4122 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx, 4123 bus_dma_segment_t *seg) 4124 { 4125 struct iwx_rx_data *data = &ring->data[idx]; 4126 4127 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4128 struct iwx_rx_transfer_desc *desc = ring->desc; 4129 desc[idx].rbid = htole16(idx & 0xffff); 4130 desc[idx].addr = htole64((*seg).ds_addr); 4131 bus_dmamap_sync(ring->data_dmat, data->map, 4132 BUS_DMASYNC_PREWRITE); 4133 } else { 4134 ((uint64_t *)ring->desc)[idx] = 4135 htole64((*seg).ds_addr); 4136 bus_dmamap_sync(ring->data_dmat, data->map, 4137 BUS_DMASYNC_PREWRITE); 4138 } 4139 } 4140 4141 static int 4142 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx) 4143 { 4144 struct iwx_rx_ring *ring = &sc->rxq; 4145 struct iwx_rx_data *data = &ring->data[idx]; 4146 struct mbuf *m; 4147 int err; 4148 int fatal = 0; 4149 bus_dma_segment_t seg; 4150 int nsegs; 4151 4152 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE); 4153 if (m == NULL) 4154 return ENOBUFS; 4155 4156 if (data->m != NULL) { 4157 bus_dmamap_unload(ring->data_dmat, data->map); 4158 fatal = 1; 4159 } 4160 4161 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 4162 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg, 4163 &nsegs, BUS_DMA_NOWAIT); 4164 if (err) { 4165 /* XXX */ 4166 if (fatal) 4167 panic("could not load RX mbuf"); 4168 m_freem(m); 4169 return err; 4170 } 4171 data->m = m; 4172 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 4173 4174 /* Update RX descriptor. */ 4175 iwx_update_rx_desc(sc, ring, idx, &seg); 4176 return 0; 4177 } 4178 4179 static int 4180 iwx_rxmq_get_signal_strength(struct iwx_softc *sc, 4181 struct iwx_rx_mpdu_desc *desc) 4182 { 4183 int energy_a, energy_b; 4184 4185 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4186 energy_a = desc->v3.energy_a; 4187 energy_b = desc->v3.energy_b; 4188 } else { 4189 energy_a = desc->v1.energy_a; 4190 energy_b = desc->v1.energy_b; 4191 } 4192 energy_a = energy_a ? -energy_a : -256; 4193 energy_b = energy_b ? -energy_b : -256; 4194 return MAX(energy_a, energy_b); 4195 } 4196 4197 static void 4198 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4199 struct iwx_rx_data *data) 4200 { 4201 struct iwx_rx_phy_info *phy_info = (void *)pkt->data; 4202 struct iwx_cmd_header *cmd_hdr = &pkt->hdr; 4203 int qid = cmd_hdr->qid; 4204 struct iwx_tx_ring *ring = &sc->txq[qid]; 4205 4206 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 4207 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 4208 } 4209 4210 /* 4211 * Retrieve the average noise (in dBm) among receivers. 4212 */ 4213 static int 4214 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats) 4215 { 4216 int i, total, nbant, noise; 4217 4218 total = nbant = noise = 0; 4219 for (i = 0; i < 3; i++) { 4220 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 4221 if (noise) { 4222 total += noise; 4223 nbant++; 4224 } 4225 } 4226 4227 /* There should be at least one antenna but check anyway. */ 4228 return (nbant == 0) ? -127 : (total / nbant) - 107; 4229 } 4230 4231 #if 0 4232 int 4233 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 4234 struct ieee80211_rxinfo *rxi) 4235 { 4236 struct ieee80211com *ic = &sc->sc_ic; 4237 struct ieee80211_key *k; 4238 struct ieee80211_frame *wh; 4239 uint64_t pn, *prsc; 4240 uint8_t *ivp; 4241 uint8_t tid; 4242 int hdrlen, hasqos; 4243 4244 wh = mtod(m, struct ieee80211_frame *); 4245 hdrlen = ieee80211_get_hdrlen(wh); 4246 ivp = (uint8_t *)wh + hdrlen; 4247 4248 /* find key for decryption */ 4249 k = ieee80211_get_rxkey(ic, m, ni); 4250 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP) 4251 return 1; 4252 4253 /* Check that ExtIV bit is be set. */ 4254 if (!(ivp[3] & IEEE80211_WEP_EXTIV)) 4255 return 1; 4256 4257 hasqos = ieee80211_has_qos(wh); 4258 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0; 4259 prsc = &k->k_rsc[tid]; 4260 4261 /* Extract the 48-bit PN from the CCMP header. */ 4262 pn = (uint64_t)ivp[0] | 4263 (uint64_t)ivp[1] << 8 | 4264 (uint64_t)ivp[4] << 16 | 4265 (uint64_t)ivp[5] << 24 | 4266 (uint64_t)ivp[6] << 32 | 4267 (uint64_t)ivp[7] << 40; 4268 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) { 4269 if (pn < *prsc) { 4270 ic->ic_stats.is_ccmp_replays++; 4271 return 1; 4272 } 4273 } else if (pn <= *prsc) { 4274 ic->ic_stats.is_ccmp_replays++; 4275 return 1; 4276 } 4277 /* Last seen packet number is updated in ieee80211_inputm(). */ 4278 4279 /* 4280 * Some firmware versions strip the MIC, and some don't. It is not 4281 * clear which of the capability flags could tell us what to expect. 4282 * For now, keep things simple and just leave the MIC in place if 4283 * it is present. 4284 * 4285 * The IV will be stripped by ieee80211_inputm(). 4286 */ 4287 return 0; 4288 } 4289 #endif 4290 4291 static int 4292 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status) 4293 { 4294 struct ieee80211_frame *wh; 4295 int ret = 0; 4296 uint8_t type, subtype; 4297 4298 wh = mtod(m, struct ieee80211_frame *); 4299 4300 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4301 if (type == IEEE80211_FC0_TYPE_CTL) { 4302 return 0; 4303 } 4304 4305 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4306 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) { 4307 return 0; 4308 } 4309 4310 4311 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 4312 IEEE80211_FC0_TYPE_CTL) 4313 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { 4314 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) != 4315 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4316 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__)); 4317 ret = 1; 4318 goto out; 4319 } 4320 /* Check whether decryption was successful or not. */ 4321 if ((rx_pkt_status & 4322 (IWX_RX_MPDU_RES_STATUS_DEC_DONE | 4323 IWX_RX_MPDU_RES_STATUS_MIC_OK)) != 4324 (IWX_RX_MPDU_RES_STATUS_DEC_DONE | 4325 IWX_RX_MPDU_RES_STATUS_MIC_OK)) { 4326 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__)); 4327 ret = 1; 4328 goto out; 4329 } 4330 } 4331 out: 4332 return ret; 4333 } 4334 4335 static void 4336 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx, 4337 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags, 4338 uint32_t device_timestamp, uint8_t rssi) 4339 { 4340 struct ieee80211com *ic = &sc->sc_ic; 4341 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4342 struct ieee80211_frame *wh; 4343 struct ieee80211_node *ni; 4344 4345 /* 4346 * We need to turn the hardware provided channel index into a channel 4347 * and then find it in our ic_channels array 4348 */ 4349 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) { 4350 /* 4351 * OpenBSD points this at the ibss chan, which it defaults to 4352 * channel 1 and then never touches again. Skip a step. 4353 */ 4354 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx); 4355 chanidx = 1; 4356 } 4357 4358 int channel = chanidx; 4359 for (int i = 0; i < ic->ic_nchans; i++) { 4360 if (ic->ic_channels[i].ic_ieee == channel) { 4361 chanidx = i; 4362 } 4363 } 4364 ic->ic_curchan = &ic->ic_channels[chanidx]; 4365 4366 wh = mtod(m, struct ieee80211_frame *); 4367 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 4368 4369 #if 0 /* XXX hw decrypt */ 4370 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) && 4371 iwx_ccmp_decap(sc, m, ni, rxi) != 0) { 4372 m_freem(m); 4373 ieee80211_release_node(ic, ni); 4374 return; 4375 } 4376 #endif 4377 if (ieee80211_radiotap_active_vap(vap)) { 4378 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap; 4379 uint16_t chan_flags; 4380 int have_legacy_rate = 1; 4381 uint8_t mcs, rate; 4382 4383 tap->wr_flags = 0; 4384 if (is_shortpre) 4385 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4386 tap->wr_chan_freq = 4387 htole16(ic->ic_channels[chanidx].ic_freq); 4388 chan_flags = ic->ic_channels[chanidx].ic_flags; 4389 #if 0 4390 if (ic->ic_curmode != IEEE80211_MODE_11N && 4391 ic->ic_curmode != IEEE80211_MODE_11AC) { 4392 chan_flags &= ~IEEE80211_CHAN_HT; 4393 chan_flags &= ~IEEE80211_CHAN_40MHZ; 4394 } 4395 if (ic->ic_curmode != IEEE80211_MODE_11AC) 4396 chan_flags &= ~IEEE80211_CHAN_VHT; 4397 #else 4398 chan_flags &= ~IEEE80211_CHAN_HT; 4399 #endif 4400 tap->wr_chan_flags = htole16(chan_flags); 4401 tap->wr_dbm_antsignal = rssi; 4402 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 4403 tap->wr_tsft = device_timestamp; 4404 4405 if (sc->sc_rate_n_flags_version >= 2) { 4406 uint32_t mod_type = (rate_n_flags & 4407 IWX_RATE_MCS_MOD_TYPE_MSK); 4408 const struct ieee80211_rateset *rs = NULL; 4409 uint32_t ridx; 4410 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK || 4411 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK); 4412 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK); 4413 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK); 4414 if (mod_type == IWX_RATE_MCS_CCK_MSK) 4415 rs = &ieee80211_std_rateset_11b; 4416 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK) 4417 rs = &ieee80211_std_rateset_11a; 4418 if (rs && ridx < rs->rs_nrates) { 4419 rate = (rs->rs_rates[ridx] & 4420 IEEE80211_RATE_VAL); 4421 } else 4422 rate = 0; 4423 } else { 4424 have_legacy_rate = ((rate_n_flags & 4425 (IWX_RATE_MCS_HT_MSK_V1 | 4426 IWX_RATE_MCS_VHT_MSK_V1)) == 0); 4427 mcs = (rate_n_flags & 4428 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 | 4429 IWX_RATE_HT_MCS_NSS_MSK_V1)); 4430 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1); 4431 } 4432 if (!have_legacy_rate) { 4433 tap->wr_rate = (0x80 | mcs); 4434 } else { 4435 switch (rate) { 4436 /* CCK rates. */ 4437 case 10: tap->wr_rate = 2; break; 4438 case 20: tap->wr_rate = 4; break; 4439 case 55: tap->wr_rate = 11; break; 4440 case 110: tap->wr_rate = 22; break; 4441 /* OFDM rates. */ 4442 case 0xd: tap->wr_rate = 12; break; 4443 case 0xf: tap->wr_rate = 18; break; 4444 case 0x5: tap->wr_rate = 24; break; 4445 case 0x7: tap->wr_rate = 36; break; 4446 case 0x9: tap->wr_rate = 48; break; 4447 case 0xb: tap->wr_rate = 72; break; 4448 case 0x1: tap->wr_rate = 96; break; 4449 case 0x3: tap->wr_rate = 108; break; 4450 /* Unknown rate: should not happen. */ 4451 default: tap->wr_rate = 0; 4452 } 4453 // XXX hack - this needs rebased with the new rate stuff anyway 4454 tap->wr_rate = rate; 4455 } 4456 } 4457 4458 IWX_UNLOCK(sc); 4459 if (ni == NULL) { 4460 if (ieee80211_input_mimo_all(ic, m) == -1) 4461 printf("%s:%d input_all returned -1\n", __func__, __LINE__); 4462 } else { 4463 4464 if (ieee80211_input_mimo(ni, m) == -1) 4465 printf("%s:%d input_all returned -1\n", __func__, __LINE__); 4466 ieee80211_free_node(ni); 4467 } 4468 IWX_LOCK(sc); 4469 } 4470 4471 static void 4472 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata, 4473 size_t maxlen) 4474 { 4475 struct ieee80211com *ic = &sc->sc_ic; 4476 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4477 struct ieee80211_node *ni = vap->iv_bss; 4478 struct ieee80211_key *k; 4479 struct ieee80211_rx_stats rxs; 4480 struct iwx_rx_mpdu_desc *desc; 4481 uint32_t len, hdrlen, rate_n_flags, device_timestamp; 4482 int rssi; 4483 uint8_t chanidx; 4484 uint16_t phy_info; 4485 size_t desc_size; 4486 int pad = 0; 4487 4488 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 4489 desc_size = sizeof(*desc); 4490 else 4491 desc_size = IWX_RX_DESC_SIZE_V1; 4492 4493 if (maxlen < desc_size) { 4494 m_freem(m); 4495 return; /* drop */ 4496 } 4497 4498 desc = (struct iwx_rx_mpdu_desc *)pktdata; 4499 4500 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) || 4501 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) { 4502 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status); 4503 m_freem(m); 4504 return; /* drop */ 4505 } 4506 4507 len = le16toh(desc->mpdu_len); 4508 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4509 /* Allow control frames in monitor mode. */ 4510 if (len < sizeof(struct ieee80211_frame_cts)) { 4511 m_freem(m); 4512 return; 4513 } 4514 4515 } else if (len < sizeof(struct ieee80211_frame)) { 4516 m_freem(m); 4517 return; 4518 } 4519 if (len > maxlen - desc_size) { 4520 m_freem(m); 4521 return; 4522 } 4523 4524 // TODO: arithmetic on a pointer to void is a GNU extension 4525 m->m_data = (char *)pktdata + desc_size; 4526 m->m_pkthdr.len = m->m_len = len; 4527 4528 /* Account for padding following the frame header. */ 4529 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) { 4530 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4531 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4532 if (type == IEEE80211_FC0_TYPE_CTL) { 4533 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { 4534 case IEEE80211_FC0_SUBTYPE_CTS: 4535 hdrlen = sizeof(struct ieee80211_frame_cts); 4536 break; 4537 case IEEE80211_FC0_SUBTYPE_ACK: 4538 hdrlen = sizeof(struct ieee80211_frame_ack); 4539 break; 4540 default: 4541 hdrlen = sizeof(struct ieee80211_frame_min); 4542 break; 4543 } 4544 } else 4545 hdrlen = ieee80211_hdrsize(wh); 4546 4547 if ((le16toh(desc->status) & 4548 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) == 4549 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4550 // CCMP header length 4551 hdrlen += 8; 4552 } 4553 4554 memmove(m->m_data + 2, m->m_data, hdrlen); 4555 m_adj(m, 2); 4556 4557 } 4558 4559 if ((le16toh(desc->status) & 4560 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) == 4561 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4562 pad = 1; 4563 } 4564 4565 // /* 4566 // * Hardware de-aggregates A-MSDUs and copies the same MAC header 4567 // * in place for each subframe. But it leaves the 'A-MSDU present' 4568 // * bit set in the frame header. We need to clear this bit ourselves. 4569 // * (XXX This workaround is not required on AX200/AX201 devices that 4570 // * have been tested by me, but it's unclear when this problem was 4571 // * fixed in the hardware. It definitely affects the 9k generation. 4572 // * Leaving this in place for now since some 9k/AX200 hybrids seem 4573 // * to exist that we may eventually add support for.) 4574 // * 4575 // * And we must allow the same CCMP PN for subframes following the 4576 // * first subframe. Otherwise they would be discarded as replays. 4577 // */ 4578 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) { 4579 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__)); 4580 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4581 // uint8_t subframe_idx = (desc->amsdu_info & 4582 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK); 4583 // if (subframe_idx > 0) 4584 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN; 4585 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) && 4586 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) { 4587 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m, 4588 // struct ieee80211_qosframe_addr4 *); 4589 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU); 4590 // } else if (ieee80211_has_qos(wh) && 4591 // m->m_len >= sizeof(struct ieee80211_qosframe)) { 4592 // struct ieee80211_qosframe *qwh = mtod(m, 4593 // struct ieee80211_qosframe *); 4594 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU); 4595 // } 4596 } 4597 4598 /* 4599 * Verify decryption before duplicate detection. The latter uses 4600 * the TID supplied in QoS frame headers and this TID is implicitly 4601 * verified as part of the CCMP nonce. 4602 */ 4603 k = ieee80211_crypto_get_txkey(ni, m); 4604 if (k != NULL && 4605 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) && 4606 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) { 4607 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__)); 4608 m_freem(m); 4609 return; 4610 } 4611 4612 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4613 rate_n_flags = le32toh(desc->v3.rate_n_flags); 4614 chanidx = desc->v3.channel; 4615 device_timestamp = le32toh(desc->v3.gp2_on_air_rise); 4616 } else { 4617 rate_n_flags = le32toh(desc->v1.rate_n_flags); 4618 chanidx = desc->v1.channel; 4619 device_timestamp = le32toh(desc->v1.gp2_on_air_rise); 4620 } 4621 4622 phy_info = le16toh(desc->phy_info); 4623 4624 rssi = iwx_rxmq_get_signal_strength(sc, desc); 4625 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */ 4626 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */ 4627 4628 memset(&rxs, 0, sizeof(rxs)); 4629 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 4630 rxs.r_flags |= IEEE80211_R_BAND; 4631 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 4632 rxs.r_flags |= IEEE80211_R_RSSI | IEEE80211_R_C_RSSI; 4633 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START; 4634 4635 rxs.c_ieee = chanidx; 4636 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, 4637 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ); 4638 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; 4639 rxs.c_rx_tsf = device_timestamp; 4640 rxs.c_chain = chanidx; 4641 4642 /* rssi is in 1/2db units */ 4643 rxs.c_rssi = rssi * 2; 4644 rxs.c_nf = sc->sc_noise; 4645 4646 if (pad) { 4647 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED; 4648 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP; 4649 } 4650 4651 if (ieee80211_add_rx_params(m, &rxs) == 0) { 4652 printf("%s: ieee80211_add_rx_params failed\n", __func__); 4653 return; 4654 } 4655 4656 ieee80211_add_rx_params(m, &rxs); 4657 4658 #if 0 4659 if (iwx_rx_reorder(sc, m, chanidx, desc, 4660 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE), 4661 rate_n_flags, device_timestamp, &rxi, ml)) 4662 return; 4663 #endif 4664 4665 if (pad) { 4666 #define TRIM 8 4667 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4668 hdrlen = ieee80211_hdrsize(wh); 4669 memmove(m->m_data + TRIM, m->m_data, hdrlen); 4670 m_adj(m, TRIM); 4671 #undef TRIM 4672 } 4673 4674 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status), 4675 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE), 4676 rate_n_flags, device_timestamp, rssi); 4677 } 4678 4679 static void 4680 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx) 4681 { 4682 struct iwx_tfh_tfd *desc = &ring->desc[idx]; 4683 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f; 4684 int i; 4685 4686 /* First TB is never cleared - it is bidirectional DMA data. */ 4687 for (i = 1; i < num_tbs; i++) { 4688 struct iwx_tfh_tb *tb = &desc->tbs[i]; 4689 memset(tb, 0, sizeof(*tb)); 4690 } 4691 desc->num_tbs = htole16(1); 4692 4693 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4694 BUS_DMASYNC_PREWRITE); 4695 } 4696 4697 static void 4698 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring, 4699 struct iwx_tx_data *txd) 4700 { 4701 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 4702 bus_dmamap_unload(ring->data_dmat, txd->map); 4703 4704 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0); 4705 txd->m = NULL; 4706 txd->in = NULL; 4707 } 4708 4709 static void 4710 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx) 4711 { 4712 struct iwx_tx_data *txd; 4713 4714 while (ring->tail_hw != idx) { 4715 txd = &ring->data[ring->tail]; 4716 if (txd->m != NULL) { 4717 iwx_clear_tx_desc(sc, ring, ring->tail); 4718 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0); 4719 iwx_txd_done(sc, ring, txd); 4720 ring->queued--; 4721 if (ring->queued < 0) 4722 panic("caught negative queue count"); 4723 } 4724 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT; 4725 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size; 4726 } 4727 } 4728 4729 static void 4730 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4731 struct iwx_rx_data *data) 4732 { 4733 struct ieee80211com *ic = &sc->sc_ic; 4734 struct ifnet *ifp = IC2IFP(ic); 4735 struct iwx_cmd_header *cmd_hdr = &pkt->hdr; 4736 int qid = cmd_hdr->qid, status, txfail; 4737 struct iwx_tx_ring *ring = &sc->txq[qid]; 4738 struct iwx_tx_resp *tx_resp = (void *)pkt->data; 4739 uint32_t ssn; 4740 uint32_t len = iwx_rx_packet_len(pkt); 4741 int idx = cmd_hdr->idx; 4742 struct iwx_tx_data *txd = &ring->data[idx]; 4743 struct mbuf *m = txd->m; 4744 4745 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 4746 4747 /* Sanity checks. */ 4748 if (sizeof(*tx_resp) > len) 4749 return; 4750 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1) 4751 return; 4752 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) + 4753 tx_resp->frame_count * sizeof(tx_resp->status) > len) 4754 return; 4755 4756 sc->sc_tx_timer[qid] = 0; 4757 4758 if (tx_resp->frame_count > 1) /* A-MPDU */ 4759 return; 4760 4761 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK; 4762 txfail = (status != IWX_TX_STATUS_SUCCESS && 4763 status != IWX_TX_STATUS_DIRECT_DONE); 4764 4765 #ifdef __not_yet__ 4766 /* TODO: Replace accounting below with ieee80211_tx_complete() */ 4767 ieee80211_tx_complete(&in->in_ni, m, txfail); 4768 #else 4769 if (txfail) 4770 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 4771 else { 4772 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 4773 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 4774 if (m->m_flags & M_MCAST) 4775 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 4776 } 4777 #endif 4778 /* 4779 * On hardware supported by iwx(4) the SSN counter corresponds 4780 * to a Tx ring index rather than a sequence number. 4781 * Frames up to this index (non-inclusive) can now be freed. 4782 */ 4783 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn)); 4784 ssn = le32toh(ssn); 4785 if (ssn < sc->max_tfd_queue_size) { 4786 iwx_txq_advance(sc, ring, ssn); 4787 iwx_clear_oactive(sc, ring); 4788 } 4789 } 4790 4791 static void 4792 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring) 4793 { 4794 if (ring->queued < iwx_lomark) { 4795 sc->qfullmsk &= ~(1 << ring->qid); 4796 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) { 4797 /* 4798 * Well, we're in interrupt context, but then again 4799 * I guess net80211 does all sorts of stunts in 4800 * interrupt context, so maybe this is no biggie. 4801 */ 4802 iwx_start(sc); 4803 } 4804 } 4805 } 4806 4807 static void 4808 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt) 4809 { 4810 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data; 4811 struct ieee80211com *ic = &sc->sc_ic; 4812 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4813 struct iwx_node *in = IWX_NODE(vap->iv_bss); 4814 struct ieee80211_node *ni = &in->in_ni; 4815 struct iwx_tx_ring *ring; 4816 uint16_t i, tfd_cnt, ra_tid_cnt, idx; 4817 int qid; 4818 4819 // if (ic->ic_state != IEEE80211_S_RUN) 4820 // return; 4821 4822 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res)) 4823 return; 4824 4825 if (ba_res->sta_id != IWX_STATION_ID) 4826 return; 4827 4828 in = (void *)ni; 4829 4830 tfd_cnt = le16toh(ba_res->tfd_cnt); 4831 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt); 4832 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) + 4833 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt + 4834 sizeof(ba_res->tfd[0]) * tfd_cnt)) 4835 return; 4836 4837 for (i = 0; i < tfd_cnt; i++) { 4838 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; 4839 uint8_t tid; 4840 4841 tid = ba_tfd->tid; 4842 if (tid >= nitems(sc->aggqid)) 4843 continue; 4844 4845 qid = sc->aggqid[tid]; 4846 if (qid != htole16(ba_tfd->q_num)) 4847 continue; 4848 4849 ring = &sc->txq[qid]; 4850 4851 #if 0 4852 ba = &ni->ni_tx_ba[tid]; 4853 if (ba->ba_state != IEEE80211_BA_AGREED) 4854 continue; 4855 #endif 4856 idx = le16toh(ba_tfd->tfd_index); 4857 sc->sc_tx_timer[qid] = 0; 4858 iwx_txq_advance(sc, ring, idx); 4859 iwx_clear_oactive(sc, ring); 4860 } 4861 } 4862 4863 static void 4864 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4865 struct iwx_rx_data *data) 4866 { 4867 struct ieee80211com *ic = &sc->sc_ic; 4868 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4869 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data; 4870 uint32_t missed; 4871 4872 if ((ic->ic_opmode != IEEE80211_M_STA) || 4873 (vap->iv_state != IEEE80211_S_RUN)) 4874 return; 4875 4876 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4877 BUS_DMASYNC_POSTREAD); 4878 4879 missed = le32toh(mbn->consec_missed_beacons_since_last_rx); 4880 if (missed > vap->iv_bmissthreshold) { 4881 ieee80211_beacon_miss(ic); 4882 } 4883 4884 } 4885 4886 static int 4887 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action) 4888 { 4889 struct iwx_binding_cmd cmd; 4890 struct ieee80211com *ic = &sc->sc_ic; 4891 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4892 struct iwx_vap *ivp = IWX_VAP(vap); 4893 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt; 4894 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color); 4895 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE); 4896 uint32_t status; 4897 4898 if (action == IWX_FW_CTXT_ACTION_ADD && active) 4899 panic("binding already added"); 4900 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active) 4901 panic("binding already removed"); 4902 4903 if (phyctxt == NULL) /* XXX race with iwx_stop() */ 4904 return EINVAL; 4905 4906 memset(&cmd, 0, sizeof(cmd)); 4907 4908 cmd.id_and_color 4909 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); 4910 cmd.action = htole32(action); 4911 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); 4912 4913 cmd.macs[0] = htole32(mac_id); 4914 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++) 4915 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID); 4916 4917 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) || 4918 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 4919 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 4920 else 4921 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 4922 4923 status = 0; 4924 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd), 4925 &cmd, &status); 4926 if (err == 0 && status != 0) 4927 err = EIO; 4928 4929 return err; 4930 } 4931 4932 static uint8_t 4933 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan) 4934 { 4935 int ctlchan = ieee80211_chan2ieee(ic, chan); 4936 int midpoint = chan->ic_vht_ch_freq1; 4937 4938 /* 4939 * The FW is expected to check the control channel position only 4940 * when in HT/VHT and the channel width is not 20MHz. Return 4941 * this value as the default one: 4942 */ 4943 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 4944 4945 switch (ctlchan - midpoint) { 4946 case -6: 4947 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW; 4948 break; 4949 case -2: 4950 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 4951 break; 4952 case 2: 4953 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 4954 break; 4955 case 6: 4956 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE; 4957 break; 4958 default: 4959 break; 4960 } 4961 4962 return pos; 4963 } 4964 4965 static int 4966 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 4967 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco, 4968 uint8_t vht_chan_width, int cmdver) 4969 { 4970 struct ieee80211com *ic = &sc->sc_ic; 4971 struct iwx_phy_context_cmd_uhb cmd; 4972 uint8_t active_cnt, idle_cnt; 4973 struct ieee80211_channel *chan = ctxt->channel; 4974 4975 memset(&cmd, 0, sizeof(cmd)); 4976 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id, 4977 ctxt->color)); 4978 cmd.action = htole32(action); 4979 4980 if (IEEE80211_IS_CHAN_2GHZ(chan) || 4981 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 4982 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 4983 else 4984 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 4985 4986 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ? 4987 IWX_PHY_BAND_24 : IWX_PHY_BAND_5; 4988 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan)); 4989 4990 if (IEEE80211_IS_CHAN_VHT80(chan)) { 4991 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan); 4992 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80; 4993 } else if (IEEE80211_IS_CHAN_HT40(chan)) { 4994 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 4995 if (IEEE80211_IS_CHAN_HT40D(chan)) 4996 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 4997 else 4998 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 4999 } else { 5000 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5001 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5002 } 5003 5004 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 5005 IWX_RLC_CONFIG_CMD) != 2) { 5006 idle_cnt = chains_static; 5007 active_cnt = chains_dynamic; 5008 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) << 5009 IWX_PHY_RX_CHAIN_VALID_POS); 5010 cmd.rxchain_info |= htole32(idle_cnt << 5011 IWX_PHY_RX_CHAIN_CNT_POS); 5012 cmd.rxchain_info |= htole32(active_cnt << 5013 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 5014 } 5015 5016 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 5017 } 5018 5019 #if 0 5020 int 5021 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 5022 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco, 5023 uint8_t vht_chan_width, int cmdver) 5024 { 5025 struct ieee80211com *ic = &sc->sc_ic; 5026 struct iwx_phy_context_cmd cmd; 5027 uint8_t active_cnt, idle_cnt; 5028 struct ieee80211_channel *chan = ctxt->channel; 5029 5030 memset(&cmd, 0, sizeof(cmd)); 5031 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id, 5032 ctxt->color)); 5033 cmd.action = htole32(action); 5034 5035 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) || 5036 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 5037 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 5038 else 5039 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 5040 5041 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ? 5042 IWX_PHY_BAND_24 : IWX_PHY_BAND_5; 5043 cmd.ci.channel = ieee80211_chan2ieee(ic, chan); 5044 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) { 5045 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan); 5046 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80; 5047 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) { 5048 if (sco == IEEE80211_HTOP0_SCO_SCA) { 5049 /* secondary chan above -> control chan below */ 5050 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5051 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 5052 } else if (sco == IEEE80211_HTOP0_SCO_SCB) { 5053 /* secondary chan below -> control chan above */ 5054 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 5055 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 5056 } else { 5057 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5058 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5059 } 5060 } else { 5061 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5062 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5063 } 5064 5065 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 5066 IWX_RLC_CONFIG_CMD) != 2) { 5067 idle_cnt = chains_static; 5068 active_cnt = chains_dynamic; 5069 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) << 5070 IWX_PHY_RX_CHAIN_VALID_POS); 5071 cmd.rxchain_info |= htole32(idle_cnt << 5072 IWX_PHY_RX_CHAIN_CNT_POS); 5073 cmd.rxchain_info |= htole32(active_cnt << 5074 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 5075 } 5076 5077 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 5078 } 5079 #endif 5080 5081 static int 5082 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 5083 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, 5084 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width) 5085 { 5086 int cmdver; 5087 5088 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD); 5089 if (cmdver != 3 && cmdver != 4) { 5090 printf("%s: firmware does not support phy-context-cmd v3/v4\n", 5091 DEVNAME(sc)); 5092 return ENOTSUP; 5093 } 5094 5095 /* 5096 * Intel increased the size of the fw_channel_info struct and neglected 5097 * to bump the phy_context_cmd struct, which contains an fw_channel_info 5098 * member in the middle. 5099 * To keep things simple we use a separate function to handle the larger 5100 * variant of the phy context command. 5101 */ 5102 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) { 5103 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static, 5104 chains_dynamic, action, sco, vht_chan_width, cmdver); 5105 } else 5106 panic("Unsupported old hardware contact thj@"); 5107 5108 #if 0 5109 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic, 5110 action, sco, vht_chan_width, cmdver); 5111 #endif 5112 } 5113 5114 static int 5115 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd) 5116 { 5117 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE]; 5118 struct iwx_tfh_tfd *desc; 5119 struct iwx_tx_data *txdata; 5120 struct iwx_device_cmd *cmd; 5121 struct mbuf *m; 5122 bus_addr_t paddr; 5123 uint64_t addr; 5124 int err = 0, i, paylen, off/*, s*/; 5125 int idx, code, async, group_id; 5126 size_t hdrlen, datasz; 5127 uint8_t *data; 5128 int generation = sc->sc_generation; 5129 bus_dma_segment_t seg[10]; 5130 int nsegs; 5131 5132 code = hcmd->id; 5133 async = hcmd->flags & IWX_CMD_ASYNC; 5134 idx = ring->cur; 5135 5136 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) { 5137 paylen += hcmd->len[i]; 5138 } 5139 5140 /* If this command waits for a response, allocate response buffer. */ 5141 hcmd->resp_pkt = NULL; 5142 if (hcmd->flags & IWX_CMD_WANT_RESP) { 5143 uint8_t *resp_buf; 5144 KASSERT(!async, ("async command want response")); 5145 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet), 5146 ("wrong pkt len 1")); 5147 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX, 5148 ("wrong pkt len 2")); 5149 if (sc->sc_cmd_resp_pkt[idx] != NULL) 5150 return ENOSPC; 5151 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF, 5152 M_NOWAIT | M_ZERO); 5153 if (resp_buf == NULL) 5154 return ENOMEM; 5155 sc->sc_cmd_resp_pkt[idx] = resp_buf; 5156 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len; 5157 } else { 5158 sc->sc_cmd_resp_pkt[idx] = NULL; 5159 } 5160 5161 desc = &ring->desc[idx]; 5162 txdata = &ring->data[idx]; 5163 5164 /* 5165 * XXX Intel inside (tm) 5166 * Firmware API versions >= 50 reject old-style commands in 5167 * group 0 with a "BAD_COMMAND" firmware error. We must pretend 5168 * that such commands were in the LONG_GROUP instead in order 5169 * for firmware to accept them. 5170 */ 5171 if (iwx_cmd_groupid(code) == 0) { 5172 code = IWX_WIDE_ID(IWX_LONG_GROUP, code); 5173 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW; 5174 } else 5175 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW; 5176 5177 group_id = iwx_cmd_groupid(code); 5178 5179 hdrlen = sizeof(cmd->hdr_wide); 5180 datasz = sizeof(cmd->data_wide); 5181 5182 if (paylen > datasz) { 5183 /* Command is too large to fit in pre-allocated space. */ 5184 size_t totlen = hdrlen + paylen; 5185 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) { 5186 printf("%s: firmware command too long (%zd bytes)\n", 5187 DEVNAME(sc), totlen); 5188 err = EINVAL; 5189 goto out; 5190 } 5191 if (totlen > IWX_RBUF_SIZE) 5192 panic("totlen > IWX_RBUF_SIZE"); 5193 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE); 5194 if (m == NULL) { 5195 printf("%s: could not get fw cmd mbuf (%i bytes)\n", 5196 DEVNAME(sc), IWX_RBUF_SIZE); 5197 err = ENOMEM; 5198 goto out; 5199 } 5200 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5201 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m, 5202 seg, &nsegs, BUS_DMA_NOWAIT); 5203 if (nsegs > 20) 5204 panic("nsegs > 20"); 5205 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs)); 5206 if (err) { 5207 printf("%s: could not load fw cmd mbuf (%zd bytes)\n", 5208 DEVNAME(sc), totlen); 5209 m_freem(m); 5210 goto out; 5211 } 5212 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */ 5213 cmd = mtod(m, struct iwx_device_cmd *); 5214 paddr = seg[0].ds_addr; 5215 } else { 5216 cmd = &ring->cmd[idx]; 5217 paddr = txdata->cmd_paddr; 5218 } 5219 5220 memset(cmd, 0, sizeof(*cmd)); 5221 cmd->hdr_wide.opcode = iwx_cmd_opcode(code); 5222 cmd->hdr_wide.group_id = group_id; 5223 cmd->hdr_wide.qid = ring->qid; 5224 cmd->hdr_wide.idx = idx; 5225 cmd->hdr_wide.length = htole16(paylen); 5226 cmd->hdr_wide.version = iwx_cmd_version(code); 5227 data = cmd->data_wide; 5228 5229 for (i = 0, off = 0; i < nitems(hcmd->data); i++) { 5230 if (hcmd->len[i] == 0) 5231 continue; 5232 memcpy(data + off, hcmd->data[i], hcmd->len[i]); 5233 off += hcmd->len[i]; 5234 } 5235 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen)); 5236 5237 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE)); 5238 addr = htole64(paddr); 5239 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr)); 5240 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) { 5241 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen, 5242 paylen)); 5243 desc->tbs[1].tb_len = htole16(hdrlen + paylen - 5244 IWX_FIRST_TB_SIZE); 5245 addr = htole64(paddr + IWX_FIRST_TB_SIZE); 5246 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr)); 5247 desc->num_tbs = htole16(2); 5248 } else 5249 desc->num_tbs = htole16(1); 5250 5251 if (paylen > datasz) { 5252 bus_dmamap_sync(ring->data_dmat, txdata->map, 5253 BUS_DMASYNC_PREWRITE); 5254 } else { 5255 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 5256 BUS_DMASYNC_PREWRITE); 5257 } 5258 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5259 BUS_DMASYNC_PREWRITE); 5260 5261 /* Kick command ring. */ 5262 ring->queued++; 5263 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT; 5264 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size; 5265 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw)); 5266 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw); 5267 5268 if (!async) { 5269 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz); 5270 if (err == 0) { 5271 /* if hardware is no longer up, return error */ 5272 if (generation != sc->sc_generation) { 5273 err = ENXIO; 5274 goto out; 5275 } 5276 5277 /* Response buffer will be freed in iwx_free_resp(). */ 5278 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx]; 5279 sc->sc_cmd_resp_pkt[idx] = NULL; 5280 } else if (generation == sc->sc_generation) { 5281 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF); 5282 sc->sc_cmd_resp_pkt[idx] = NULL; 5283 } 5284 } 5285 out: 5286 return err; 5287 } 5288 5289 static int 5290 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags, 5291 uint16_t len, const void *data) 5292 { 5293 struct iwx_host_cmd cmd = { 5294 .id = id, 5295 .len = { len, }, 5296 .data = { data, }, 5297 .flags = flags, 5298 }; 5299 5300 return iwx_send_cmd(sc, &cmd); 5301 } 5302 5303 static int 5304 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd, 5305 uint32_t *status) 5306 { 5307 struct iwx_rx_packet *pkt; 5308 struct iwx_cmd_response *resp; 5309 int err, resp_len; 5310 5311 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP")); 5312 cmd->flags |= IWX_CMD_WANT_RESP; 5313 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp); 5314 5315 err = iwx_send_cmd(sc, cmd); 5316 if (err) 5317 return err; 5318 5319 pkt = cmd->resp_pkt; 5320 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) 5321 return EIO; 5322 5323 resp_len = iwx_rx_packet_payload_len(pkt); 5324 if (resp_len != sizeof(*resp)) { 5325 iwx_free_resp(sc, cmd); 5326 return EIO; 5327 } 5328 5329 resp = (void *)pkt->data; 5330 *status = le32toh(resp->status); 5331 iwx_free_resp(sc, cmd); 5332 return err; 5333 } 5334 5335 static int 5336 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len, 5337 const void *data, uint32_t *status) 5338 { 5339 struct iwx_host_cmd cmd = { 5340 .id = id, 5341 .len = { len, }, 5342 .data = { data, }, 5343 }; 5344 5345 return iwx_send_cmd_status(sc, &cmd, status); 5346 } 5347 5348 static void 5349 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd) 5350 { 5351 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP, 5352 ("hcmd flags !IWX_CMD_WANT_RESP")); 5353 free(hcmd->resp_pkt, M_DEVBUF); 5354 hcmd->resp_pkt = NULL; 5355 } 5356 5357 static void 5358 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code) 5359 { 5360 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE]; 5361 struct iwx_tx_data *data; 5362 5363 if (qid != IWX_DQA_CMD_QUEUE) { 5364 return; /* Not a command ack. */ 5365 } 5366 5367 data = &ring->data[idx]; 5368 5369 if (data->m != NULL) { 5370 bus_dmamap_sync(ring->data_dmat, data->map, 5371 BUS_DMASYNC_POSTWRITE); 5372 bus_dmamap_unload(ring->data_dmat, data->map); 5373 m_freem(data->m); 5374 data->m = NULL; 5375 } 5376 wakeup(&ring->desc[idx]); 5377 5378 DPRINTF(("%s: command 0x%x done\n", __func__, code)); 5379 if (ring->queued == 0) { 5380 DPRINTF(("%s: unexpected firmware response to command 0x%x\n", 5381 DEVNAME(sc), code)); 5382 } else if (ring->queued > 0) 5383 ring->queued--; 5384 } 5385 5386 static uint32_t 5387 iwx_fw_rateidx_ofdm(uint8_t rval) 5388 { 5389 /* Firmware expects indices which match our 11a rate set. */ 5390 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a; 5391 int i; 5392 5393 for (i = 0; i < rs->rs_nrates; i++) { 5394 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 5395 return i; 5396 } 5397 5398 return 0; 5399 } 5400 5401 static uint32_t 5402 iwx_fw_rateidx_cck(uint8_t rval) 5403 { 5404 /* Firmware expects indices which match our 11b rate set. */ 5405 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b; 5406 int i; 5407 5408 for (i = 0; i < rs->rs_nrates; i++) { 5409 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 5410 return i; 5411 } 5412 5413 return 0; 5414 } 5415 5416 static int 5417 iwx_min_basic_rate(struct ieee80211com *ic) 5418 { 5419 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5420 struct ieee80211_node *ni = vap->iv_bss; 5421 struct ieee80211_rateset *rs = &ni->ni_rates; 5422 struct ieee80211_channel *c = ni->ni_chan; 5423 int i, min, rval; 5424 5425 min = -1; 5426 5427 if (c == IEEE80211_CHAN_ANYC) { 5428 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__); 5429 return -1; 5430 } 5431 5432 for (i = 0; i < rs->rs_nrates; i++) { 5433 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0) 5434 continue; 5435 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 5436 if (min == -1) 5437 min = rval; 5438 else if (rval < min) 5439 min = rval; 5440 } 5441 5442 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */ 5443 if (min == -1) 5444 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12; 5445 5446 return min; 5447 } 5448 5449 /* 5450 * Determine the Tx command flags and Tx rate+flags to use. 5451 * Return the selected Tx rate. 5452 */ 5453 static const struct iwx_rate * 5454 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in, 5455 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags, 5456 struct mbuf *m) 5457 { 5458 struct ieee80211com *ic = &sc->sc_ic; 5459 struct ieee80211_node *ni = &in->in_ni; 5460 struct ieee80211_rateset *rs = &ni->ni_rates; 5461 const struct iwx_rate *rinfo = NULL; 5462 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 5463 int ridx = iwx_min_basic_rate(ic); 5464 int min_ridx, rate_flags; 5465 uint8_t rval; 5466 5467 /* We're in the process of clearing the node, no channel already */ 5468 if (ridx == -1) 5469 return NULL; 5470 5471 min_ridx = iwx_rval2ridx(ridx); 5472 5473 *flags = 0; 5474 5475 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 5476 type != IEEE80211_FC0_TYPE_DATA) { 5477 /* for non-data, use the lowest supported rate */ 5478 ridx = min_ridx; 5479 *flags |= IWX_TX_FLAGS_CMD_RATE; 5480 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 5481 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni) 5482 & ~IEEE80211_RATE_MCS]; 5483 } else { 5484 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)] 5485 & IEEE80211_RATE_VAL); 5486 ridx = iwx_rval2ridx(rval); 5487 if (ridx < min_ridx) 5488 ridx = min_ridx; 5489 } 5490 5491 if (m->m_flags & M_EAPOL) 5492 *flags |= IWX_TX_FLAGS_HIGH_PRI; 5493 5494 rinfo = &iwx_rates[ridx]; 5495 5496 /* 5497 * Do not fill rate_n_flags if firmware controls the Tx rate. 5498 * For data frames we rely on Tx rate scaling in firmware by default. 5499 */ 5500 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) { 5501 *rate_n_flags = 0; 5502 return rinfo; 5503 } 5504 5505 /* 5506 * Forcing a CCK/OFDM legacy rate is important for management frames. 5507 * Association will only succeed if we do this correctly. 5508 */ 5509 5510 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx); 5511 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx); 5512 rate_flags = IWX_RATE_MCS_ANT_A_MSK; 5513 if (IWX_RIDX_IS_CCK(ridx)) { 5514 if (sc->sc_rate_n_flags_version >= 2) 5515 rate_flags |= IWX_RATE_MCS_CCK_MSK; 5516 else 5517 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1; 5518 } else if (sc->sc_rate_n_flags_version >= 2) 5519 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK; 5520 5521 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)] 5522 & IEEE80211_RATE_VAL); 5523 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__, 5524 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]); 5525 5526 if (sc->sc_rate_n_flags_version >= 2) { 5527 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) { 5528 rate_flags |= (iwx_fw_rateidx_ofdm(rval) & 5529 IWX_RATE_LEGACY_RATE_MSK); 5530 } else { 5531 rate_flags |= (iwx_fw_rateidx_cck(rval) & 5532 IWX_RATE_LEGACY_RATE_MSK); 5533 } 5534 } else 5535 rate_flags |= rinfo->plcp; 5536 5537 *rate_n_flags = rate_flags; 5538 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n", 5539 __func__, __LINE__,*flags); 5540 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n", 5541 __func__, __LINE__, *rate_n_flags); 5542 5543 if (sc->sc_debug & IWX_DEBUG_TXRATE) 5544 print_ratenflags(__func__, __LINE__, 5545 *rate_n_flags, sc->sc_rate_n_flags_version); 5546 5547 return rinfo; 5548 } 5549 5550 static void 5551 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq, 5552 int idx, uint16_t byte_cnt, uint16_t num_tbs) 5553 { 5554 uint8_t filled_tfd_size, num_fetch_chunks; 5555 uint16_t len = byte_cnt; 5556 uint16_t bc_ent; 5557 5558 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) + 5559 num_tbs * sizeof(struct iwx_tfh_tb); 5560 /* 5561 * filled_tfd_size contains the number of filled bytes in the TFD. 5562 * Dividing it by 64 will give the number of chunks to fetch 5563 * to SRAM- 0 for one chunk, 1 for 2 and so on. 5564 * If, for example, TFD contains only 3 TBs then 32 bytes 5565 * of the TFD are used, and only one chunk of 64 bytes should 5566 * be fetched 5567 */ 5568 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1; 5569 5570 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 5571 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr; 5572 /* Starting from AX210, the HW expects bytes */ 5573 bc_ent = htole16(len | (num_fetch_chunks << 14)); 5574 scd_bc_tbl[idx].tfd_offset = bc_ent; 5575 } else { 5576 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr; 5577 /* Before AX210, the HW expects DW */ 5578 len = howmany(len, 4); 5579 bc_ent = htole16(len | (num_fetch_chunks << 12)); 5580 scd_bc_tbl->tfd_offset[idx] = bc_ent; 5581 } 5582 5583 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE); 5584 } 5585 5586 static int 5587 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 5588 { 5589 struct ieee80211com *ic = &sc->sc_ic; 5590 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5591 struct iwx_node *in = (void *)ni; 5592 struct iwx_tx_ring *ring; 5593 struct iwx_tx_data *data; 5594 struct iwx_tfh_tfd *desc; 5595 struct iwx_device_cmd *cmd; 5596 struct ieee80211_frame *wh; 5597 struct ieee80211_key *k = NULL; 5598 const struct iwx_rate *rinfo; 5599 uint64_t paddr; 5600 u_int hdrlen; 5601 uint32_t rate_n_flags; 5602 uint16_t num_tbs, flags, offload_assist = 0; 5603 uint8_t type, subtype; 5604 int i, totlen, err, pad, qid; 5605 #define IWM_MAX_SCATTER 20 5606 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 5607 int nsegs; 5608 struct mbuf *m1; 5609 size_t txcmd_size; 5610 5611 wh = mtod(m, struct ieee80211_frame *); 5612 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 5613 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 5614 hdrlen = ieee80211_anyhdrsize(wh); 5615 5616 qid = sc->first_data_qid; 5617 5618 /* Put QoS frames on the data queue which maps to their TID. */ 5619 if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) { 5620 uint16_t qos = ieee80211_gettid(wh); 5621 uint8_t tid = qos & IEEE80211_QOS_TID; 5622 #if 0 5623 /* 5624 * XXX-THJ: TODO when we enable ba we need to manage the 5625 * mappings 5626 */ 5627 struct ieee80211_tx_ba *ba; 5628 ba = &ni->ni_tx_ba[tid]; 5629 5630 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 5631 type == IEEE80211_FC0_TYPE_DATA && 5632 subtype != IEEE80211_FC0_SUBTYPE_NODATA && 5633 subtype != IEEE80211_FC0_SUBTYPE_BAR && 5634 sc->aggqid[tid] != 0 /*&& 5635 ba->ba_state == IEEE80211_BA_AGREED*/) { 5636 qid = sc->aggqid[tid]; 5637 #else 5638 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 5639 type == IEEE80211_FC0_TYPE_DATA && 5640 subtype != IEEE80211_FC0_SUBTYPE_NODATA && 5641 sc->aggqid[tid] != 0) { 5642 qid = sc->aggqid[tid]; 5643 #endif 5644 } 5645 } 5646 5647 ring = &sc->txq[qid]; 5648 desc = &ring->desc[ring->cur]; 5649 memset(desc, 0, sizeof(*desc)); 5650 data = &ring->data[ring->cur]; 5651 5652 cmd = &ring->cmd[ring->cur]; 5653 cmd->hdr.code = IWX_TX_CMD; 5654 cmd->hdr.flags = 0; 5655 cmd->hdr.qid = ring->qid; 5656 cmd->hdr.idx = ring->cur; 5657 5658 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m); 5659 if (rinfo == NULL) 5660 return EINVAL; 5661 5662 if (ieee80211_radiotap_active_vap(vap)) { 5663 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap; 5664 5665 tap->wt_flags = 0; 5666 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 5667 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 5668 tap->wt_rate = rinfo->rate; 5669 if (k != NULL) 5670 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 5671 ieee80211_radiotap_tx(vap, m); 5672 } 5673 5674 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 5675 k = ieee80211_crypto_get_txkey(ni, m); 5676 if (k == NULL) { 5677 printf("%s: k is NULL!\n", __func__); 5678 m_freem(m); 5679 return (ENOBUFS); 5680 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 5681 k->wk_keytsc++; 5682 } else { 5683 k->wk_cipher->ic_encap(k, m); 5684 5685 /* 802.11 headers may have moved */ 5686 wh = mtod(m, struct ieee80211_frame *); 5687 flags |= IWX_TX_FLAGS_ENCRYPT_DIS; 5688 } 5689 } else 5690 flags |= IWX_TX_FLAGS_ENCRYPT_DIS; 5691 5692 totlen = m->m_pkthdr.len; 5693 5694 if (hdrlen & 3) { 5695 /* First segment length must be a multiple of 4. */ 5696 pad = 4 - (hdrlen & 3); 5697 offload_assist |= IWX_TX_CMD_OFFLD_PAD; 5698 } else 5699 pad = 0; 5700 5701 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 5702 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data; 5703 memset(tx, 0, sizeof(*tx)); 5704 tx->len = htole16(totlen); 5705 tx->offload_assist = htole32(offload_assist); 5706 tx->flags = htole16(flags); 5707 tx->rate_n_flags = htole32(rate_n_flags); 5708 memcpy(tx->hdr, wh, hdrlen); 5709 txcmd_size = sizeof(*tx); 5710 } else { 5711 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data; 5712 memset(tx, 0, sizeof(*tx)); 5713 tx->len = htole16(totlen); 5714 tx->offload_assist = htole16(offload_assist); 5715 tx->flags = htole32(flags); 5716 tx->rate_n_flags = htole32(rate_n_flags); 5717 memcpy(tx->hdr, wh, hdrlen); 5718 txcmd_size = sizeof(*tx); 5719 } 5720 5721 /* Trim 802.11 header. */ 5722 m_adj(m, hdrlen); 5723 5724 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 5725 &nsegs, BUS_DMA_NOWAIT); 5726 if (err && err != EFBIG) { 5727 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err); 5728 m_freem(m); 5729 return err; 5730 } 5731 if (err) { 5732 /* Too many DMA segments, linearize mbuf. */ 5733 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 5734 if (m1 == NULL) { 5735 printf("%s: could not defrag mbufs\n", __func__); 5736 m_freem(m); 5737 return (ENOBUFS); 5738 } 5739 m = m1; 5740 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 5741 segs, &nsegs, BUS_DMA_NOWAIT); 5742 if (err) { 5743 printf("%s: can't map mbuf (error %d)\n", __func__, 5744 err); 5745 m_freem(m); 5746 return (err); 5747 } 5748 } 5749 data->m = m; 5750 data->in = in; 5751 5752 /* Fill TX descriptor. */ 5753 num_tbs = 2 + nsegs; 5754 desc->num_tbs = htole16(num_tbs); 5755 5756 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE); 5757 paddr = htole64(data->cmd_paddr); 5758 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr)); 5759 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32) 5760 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__)); 5761 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) + 5762 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE); 5763 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE); 5764 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr)); 5765 5766 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32) 5767 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__)); 5768 5769 /* Other DMA segments are for data payload. */ 5770 for (i = 0; i < nsegs; i++) { 5771 seg = &segs[i]; 5772 desc->tbs[i + 2].tb_len = htole16(seg->ds_len); 5773 paddr = htole64(seg->ds_addr); 5774 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr)); 5775 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32) 5776 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2)); 5777 } 5778 5779 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 5780 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 5781 BUS_DMASYNC_PREWRITE); 5782 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5783 BUS_DMASYNC_PREWRITE); 5784 5785 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs); 5786 5787 /* Kick TX ring. */ 5788 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT; 5789 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size; 5790 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw); 5791 5792 /* Mark TX ring as full if we reach a certain threshold. */ 5793 if (++ring->queued > iwx_himark) { 5794 sc->qfullmsk |= 1 << ring->qid; 5795 } 5796 5797 sc->sc_tx_timer[ring->qid] = 15; 5798 5799 return 0; 5800 } 5801 5802 static int 5803 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids) 5804 { 5805 struct iwx_rx_packet *pkt; 5806 struct iwx_tx_path_flush_cmd_rsp *resp; 5807 struct iwx_tx_path_flush_cmd flush_cmd = { 5808 .sta_id = htole32(sta_id), 5809 .tid_mask = htole16(tids), 5810 }; 5811 struct iwx_host_cmd hcmd = { 5812 .id = IWX_TXPATH_FLUSH, 5813 .len = { sizeof(flush_cmd), }, 5814 .data = { &flush_cmd, }, 5815 .flags = IWX_CMD_WANT_RESP, 5816 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 5817 }; 5818 int err, resp_len, i, num_flushed_queues; 5819 5820 err = iwx_send_cmd(sc, &hcmd); 5821 if (err) 5822 return err; 5823 5824 pkt = hcmd.resp_pkt; 5825 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 5826 err = EIO; 5827 goto out; 5828 } 5829 5830 resp_len = iwx_rx_packet_payload_len(pkt); 5831 /* Some firmware versions don't provide a response. */ 5832 if (resp_len == 0) 5833 goto out; 5834 else if (resp_len != sizeof(*resp)) { 5835 err = EIO; 5836 goto out; 5837 } 5838 5839 resp = (void *)pkt->data; 5840 5841 if (le16toh(resp->sta_id) != sta_id) { 5842 err = EIO; 5843 goto out; 5844 } 5845 5846 num_flushed_queues = le16toh(resp->num_flushed_queues); 5847 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) { 5848 err = EIO; 5849 goto out; 5850 } 5851 5852 for (i = 0; i < num_flushed_queues; i++) { 5853 struct iwx_flush_queue_info *queue_info = &resp->queues[i]; 5854 uint16_t tid = le16toh(queue_info->tid); 5855 uint16_t read_after = le16toh(queue_info->read_after_flush); 5856 uint16_t qid = le16toh(queue_info->queue_num); 5857 struct iwx_tx_ring *txq; 5858 5859 if (qid >= nitems(sc->txq)) 5860 continue; 5861 5862 txq = &sc->txq[qid]; 5863 if (tid != txq->tid) 5864 continue; 5865 5866 iwx_txq_advance(sc, txq, read_after); 5867 } 5868 out: 5869 iwx_free_resp(sc, &hcmd); 5870 return err; 5871 } 5872 5873 #define IWX_FLUSH_WAIT_MS 2000 5874 5875 static int 5876 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain) 5877 { 5878 struct iwx_add_sta_cmd cmd; 5879 int err; 5880 uint32_t status; 5881 5882 memset(&cmd, 0, sizeof(cmd)); 5883 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 5884 in->in_color)); 5885 cmd.sta_id = IWX_STATION_ID; 5886 cmd.add_modify = IWX_STA_MODE_MODIFY; 5887 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0; 5888 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW); 5889 5890 status = IWX_ADD_STA_SUCCESS; 5891 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, 5892 sizeof(cmd), &cmd, &status); 5893 if (err) { 5894 printf("%s: could not update sta (error %d)\n", 5895 DEVNAME(sc), err); 5896 return err; 5897 } 5898 5899 switch (status & IWX_ADD_STA_STATUS_MASK) { 5900 case IWX_ADD_STA_SUCCESS: 5901 break; 5902 default: 5903 err = EIO; 5904 printf("%s: Couldn't %s draining for station\n", 5905 DEVNAME(sc), drain ? "enable" : "disable"); 5906 break; 5907 } 5908 5909 return err; 5910 } 5911 5912 static int 5913 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in) 5914 { 5915 int err; 5916 5917 IWX_ASSERT_LOCKED(sc); 5918 5919 sc->sc_flags |= IWX_FLAG_TXFLUSH; 5920 5921 err = iwx_drain_sta(sc, in, 1); 5922 if (err) 5923 goto done; 5924 5925 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff); 5926 if (err) { 5927 printf("%s: could not flush Tx path (error %d)\n", 5928 DEVNAME(sc), err); 5929 goto done; 5930 } 5931 5932 /* 5933 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the 5934 * fc drive rand has has been replaced in OpenBSD. 5935 */ 5936 5937 err = iwx_drain_sta(sc, in, 0); 5938 done: 5939 sc->sc_flags &= ~IWX_FLAG_TXFLUSH; 5940 return err; 5941 } 5942 5943 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25 5944 5945 static int 5946 iwx_beacon_filter_send_cmd(struct iwx_softc *sc, 5947 struct iwx_beacon_filter_cmd *cmd) 5948 { 5949 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD, 5950 0, sizeof(struct iwx_beacon_filter_cmd), cmd); 5951 } 5952 5953 static int 5954 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable) 5955 { 5956 struct iwx_beacon_filter_cmd cmd = { 5957 IWX_BF_CMD_CONFIG_DEFAULTS, 5958 .bf_enable_beacon_filter = htole32(1), 5959 .ba_enable_beacon_abort = htole32(enable), 5960 }; 5961 5962 if (!sc->sc_bf.bf_enabled) 5963 return 0; 5964 5965 sc->sc_bf.ba_enabled = enable; 5966 return iwx_beacon_filter_send_cmd(sc, &cmd); 5967 } 5968 5969 static void 5970 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in, 5971 struct iwx_mac_power_cmd *cmd) 5972 { 5973 struct ieee80211com *ic = &sc->sc_ic; 5974 struct ieee80211_node *ni = &in->in_ni; 5975 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5976 int dtim_period, dtim_msec, keep_alive; 5977 5978 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 5979 in->in_color)); 5980 if (vap->iv_dtim_period) 5981 dtim_period = vap->iv_dtim_period; 5982 else 5983 dtim_period = 1; 5984 5985 /* 5986 * Regardless of power management state the driver must set 5987 * keep alive period. FW will use it for sending keep alive NDPs 5988 * immediately after association. Check that keep alive period 5989 * is at least 3 * DTIM. 5990 */ 5991 dtim_msec = dtim_period * ni->ni_intval; 5992 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC); 5993 keep_alive = roundup(keep_alive, 1000) / 1000; 5994 cmd->keep_alive_seconds = htole16(keep_alive); 5995 5996 if (ic->ic_opmode != IEEE80211_M_MONITOR) 5997 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK); 5998 } 5999 6000 static int 6001 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in) 6002 { 6003 int err; 6004 int ba_enable; 6005 struct iwx_mac_power_cmd cmd; 6006 6007 memset(&cmd, 0, sizeof(cmd)); 6008 6009 iwx_power_build_cmd(sc, in, &cmd); 6010 6011 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0, 6012 sizeof(cmd), &cmd); 6013 if (err != 0) 6014 return err; 6015 6016 ba_enable = !!(cmd.flags & 6017 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); 6018 return iwx_update_beacon_abort(sc, in, ba_enable); 6019 } 6020 6021 static int 6022 iwx_power_update_device(struct iwx_softc *sc) 6023 { 6024 struct iwx_device_power_cmd cmd = { }; 6025 struct ieee80211com *ic = &sc->sc_ic; 6026 6027 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6028 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); 6029 6030 return iwx_send_cmd_pdu(sc, 6031 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd); 6032 } 6033 #if 0 6034 static int 6035 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in) 6036 { 6037 struct iwx_beacon_filter_cmd cmd = { 6038 IWX_BF_CMD_CONFIG_DEFAULTS, 6039 .bf_enable_beacon_filter = htole32(1), 6040 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled), 6041 }; 6042 int err; 6043 6044 err = iwx_beacon_filter_send_cmd(sc, &cmd); 6045 if (err == 0) 6046 sc->sc_bf.bf_enabled = 1; 6047 6048 return err; 6049 } 6050 #endif 6051 static int 6052 iwx_disable_beacon_filter(struct iwx_softc *sc) 6053 { 6054 struct iwx_beacon_filter_cmd cmd; 6055 int err; 6056 6057 memset(&cmd, 0, sizeof(cmd)); 6058 6059 err = iwx_beacon_filter_send_cmd(sc, &cmd); 6060 if (err == 0) 6061 sc->sc_bf.bf_enabled = 0; 6062 6063 return err; 6064 } 6065 6066 static int 6067 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update) 6068 { 6069 struct iwx_add_sta_cmd add_sta_cmd; 6070 int err, i; 6071 uint32_t status, aggsize; 6072 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >> 6073 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT); 6074 struct ieee80211com *ic = &sc->sc_ic; 6075 struct ieee80211_node *ni = &in->in_ni; 6076 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 6077 6078 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE)) 6079 panic("STA already added"); 6080 6081 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd)); 6082 6083 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6084 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID; 6085 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE; 6086 } else { 6087 add_sta_cmd.sta_id = IWX_STATION_ID; 6088 add_sta_cmd.station_type = IWX_STA_LINK; 6089 } 6090 add_sta_cmd.mac_id_n_color 6091 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)); 6092 if (!update) { 6093 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6094 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, 6095 etheranyaddr); 6096 else 6097 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, 6098 in->in_macaddr); 6099 } 6100 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__, 6101 ether_sprintf(add_sta_cmd.addr))); 6102 add_sta_cmd.add_modify = update ? 1 : 0; 6103 add_sta_cmd.station_flags_msk 6104 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK); 6105 6106 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) { 6107 add_sta_cmd.station_flags_msk 6108 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK | 6109 IWX_STA_FLG_AGG_MPDU_DENS_MSK); 6110 6111 if (iwx_mimo_enabled(sc)) { 6112 if (ni->ni_flags & IEEE80211_NODE_VHT) { 6113 add_sta_cmd.station_flags |= 6114 htole32(IWX_STA_FLG_MIMO_EN_MIMO2); 6115 } else { 6116 int hasmimo = 0; 6117 for (i = 0; i < htrs->rs_nrates; i++) { 6118 if (htrs->rs_rates[i] > 7) { 6119 hasmimo = 1; 6120 break; 6121 } 6122 } 6123 if (hasmimo) { 6124 add_sta_cmd.station_flags |= 6125 htole32(IWX_STA_FLG_MIMO_EN_MIMO2); 6126 } 6127 } 6128 } 6129 6130 if (ni->ni_flags & IEEE80211_NODE_HT && 6131 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6132 add_sta_cmd.station_flags |= htole32( 6133 IWX_STA_FLG_FAT_EN_40MHZ); 6134 } 6135 6136 6137 if (ni->ni_flags & IEEE80211_NODE_VHT) { 6138 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) { 6139 add_sta_cmd.station_flags |= htole32( 6140 IWX_STA_FLG_FAT_EN_80MHZ); 6141 } 6142 // XXX-misha: TODO get real ampdu size 6143 aggsize = max_aggsize; 6144 } else { 6145 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam), 6146 IEEE80211_HTCAP_MAXRXAMPDU); 6147 } 6148 6149 if (aggsize > max_aggsize) 6150 aggsize = max_aggsize; 6151 add_sta_cmd.station_flags |= htole32((aggsize << 6152 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) & 6153 IWX_STA_FLG_MAX_AGG_SIZE_MSK); 6154 6155 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam), 6156 IEEE80211_HTCAP_MPDUDENSITY)) { 6157 case IEEE80211_HTCAP_MPDUDENSITY_2: 6158 add_sta_cmd.station_flags 6159 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US); 6160 break; 6161 case IEEE80211_HTCAP_MPDUDENSITY_4: 6162 add_sta_cmd.station_flags 6163 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US); 6164 break; 6165 case IEEE80211_HTCAP_MPDUDENSITY_8: 6166 add_sta_cmd.station_flags 6167 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US); 6168 break; 6169 case IEEE80211_HTCAP_MPDUDENSITY_16: 6170 add_sta_cmd.station_flags 6171 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US); 6172 break; 6173 default: 6174 break; 6175 } 6176 } 6177 6178 status = IWX_ADD_STA_SUCCESS; 6179 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd), 6180 &add_sta_cmd, &status); 6181 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 6182 err = EIO; 6183 6184 return err; 6185 } 6186 6187 static int 6188 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in) 6189 { 6190 struct ieee80211com *ic = &sc->sc_ic; 6191 struct iwx_rm_sta_cmd rm_sta_cmd; 6192 int err; 6193 6194 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0) 6195 panic("sta already removed"); 6196 6197 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 6198 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6199 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID; 6200 else 6201 rm_sta_cmd.sta_id = IWX_STATION_ID; 6202 6203 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd), 6204 &rm_sta_cmd); 6205 6206 return err; 6207 } 6208 6209 static int 6210 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in) 6211 { 6212 int err, i, cmd_ver; 6213 6214 err = iwx_flush_sta(sc, in); 6215 if (err) { 6216 printf("%s: could not flush Tx path (error %d)\n", 6217 DEVNAME(sc), err); 6218 return err; 6219 } 6220 6221 /* 6222 * New SCD_QUEUE_CONFIG API requires explicit queue removal 6223 * before a station gets removed. 6224 */ 6225 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 6226 IWX_SCD_QUEUE_CONFIG_CMD); 6227 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) { 6228 err = iwx_disable_mgmt_queue(sc); 6229 if (err) 6230 return err; 6231 for (i = IWX_FIRST_AGG_TX_QUEUE; 6232 i < IWX_LAST_AGG_TX_QUEUE; i++) { 6233 struct iwx_tx_ring *ring = &sc->txq[i]; 6234 if ((sc->qenablemsk & (1 << i)) == 0) 6235 continue; 6236 err = iwx_disable_txq(sc, IWX_STATION_ID, 6237 ring->qid, ring->tid); 6238 if (err) { 6239 printf("%s: could not disable Tx queue %d " 6240 "(error %d)\n", DEVNAME(sc), ring->qid, 6241 err); 6242 return err; 6243 } 6244 } 6245 } 6246 6247 err = iwx_rm_sta_cmd(sc, in); 6248 if (err) { 6249 printf("%s: could not remove STA (error %d)\n", 6250 DEVNAME(sc), err); 6251 return err; 6252 } 6253 6254 in->in_flags = 0; 6255 6256 sc->sc_rx_ba_sessions = 0; 6257 sc->ba_rx.start_tidmask = 0; 6258 sc->ba_rx.stop_tidmask = 0; 6259 memset(sc->aggqid, 0, sizeof(sc->aggqid)); 6260 sc->ba_tx.start_tidmask = 0; 6261 sc->ba_tx.stop_tidmask = 0; 6262 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++) 6263 sc->qenablemsk &= ~(1 << i); 6264 6265 #if 0 6266 for (i = 0; i < IEEE80211_NUM_TID; i++) { 6267 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i]; 6268 if (ba->ba_state != IEEE80211_BA_AGREED) 6269 continue; 6270 ieee80211_delba_request(ic, ni, 0, 1, i); 6271 } 6272 #endif 6273 /* Clear ampdu rx state (GOS-1525) */ 6274 for (i = 0; i < IWX_MAX_TID_COUNT; i++) { 6275 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i]; 6276 ba->ba_flags = 0; 6277 } 6278 6279 return 0; 6280 } 6281 6282 static uint8_t 6283 iwx_umac_scan_fill_channels(struct iwx_softc *sc, 6284 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems, 6285 int n_ssids, uint32_t channel_cfg_flags) 6286 { 6287 struct ieee80211com *ic = &sc->sc_ic; 6288 struct ieee80211_scan_state *ss = ic->ic_scan; 6289 struct ieee80211_channel *c; 6290 uint8_t nchan; 6291 int j; 6292 6293 for (nchan = j = 0; 6294 j < ss->ss_last && 6295 nchan < sc->sc_capa_n_scan_channels; 6296 j++) { 6297 uint8_t channel_num; 6298 6299 c = ss->ss_chans[j]; 6300 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0); 6301 if (isset(sc->sc_ucode_api, 6302 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) { 6303 chan->v2.channel_num = channel_num; 6304 if (IEEE80211_IS_CHAN_2GHZ(c)) 6305 chan->v2.band = IWX_PHY_BAND_24; 6306 else 6307 chan->v2.band = IWX_PHY_BAND_5; 6308 chan->v2.iter_count = 1; 6309 chan->v2.iter_interval = 0; 6310 } else { 6311 chan->v1.channel_num = channel_num; 6312 chan->v1.iter_count = 1; 6313 chan->v1.iter_interval = htole16(0); 6314 } 6315 chan->flags |= htole32(channel_cfg_flags); 6316 chan++; 6317 nchan++; 6318 } 6319 6320 return nchan; 6321 } 6322 6323 static int 6324 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq) 6325 { 6326 struct ieee80211com *ic = &sc->sc_ic; 6327 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6328 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf; 6329 struct ieee80211_rateset *rs; 6330 size_t remain = sizeof(preq->buf); 6331 uint8_t *frm, *pos; 6332 6333 memset(preq, 0, sizeof(*preq)); 6334 6335 if (remain < sizeof(*wh) + 2) 6336 return ENOBUFS; 6337 6338 /* 6339 * Build a probe request frame. Most of the following code is a 6340 * copy & paste of what is done in net80211. 6341 */ 6342 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6343 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6344 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6345 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 6346 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr); 6347 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 6348 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6349 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6350 6351 frm = (uint8_t *)(wh + 1); 6352 *frm++ = IEEE80211_ELEMID_SSID; 6353 *frm++ = 0; 6354 /* hardware inserts SSID */ 6355 6356 /* Tell the firmware where the MAC header is. */ 6357 preq->mac_header.offset = 0; 6358 preq->mac_header.len = htole16(frm - (uint8_t *)wh); 6359 remain -= frm - (uint8_t *)wh; 6360 6361 /* Fill in 2GHz IEs and tell firmware where they are. */ 6362 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6363 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 6364 if (remain < 4 + rs->rs_nrates) 6365 return ENOBUFS; 6366 } else if (remain < 2 + rs->rs_nrates) 6367 return ENOBUFS; 6368 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh); 6369 pos = frm; 6370 frm = ieee80211_add_rates(frm, rs); 6371 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6372 frm = ieee80211_add_xrates(frm, rs); 6373 remain -= frm - pos; 6374 6375 if (isset(sc->sc_enabled_capa, 6376 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) { 6377 if (remain < 3) 6378 return ENOBUFS; 6379 *frm++ = IEEE80211_ELEMID_DSPARMS; 6380 *frm++ = 1; 6381 *frm++ = 0; 6382 remain -= 3; 6383 } 6384 preq->band_data[0].len = htole16(frm - pos); 6385 6386 if (sc->sc_nvm.sku_cap_band_52GHz_enable) { 6387 /* Fill in 5GHz IEs. */ 6388 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6389 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 6390 if (remain < 4 + rs->rs_nrates) 6391 return ENOBUFS; 6392 } else if (remain < 2 + rs->rs_nrates) 6393 return ENOBUFS; 6394 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh); 6395 pos = frm; 6396 frm = ieee80211_add_rates(frm, rs); 6397 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6398 frm = ieee80211_add_xrates(frm, rs); 6399 preq->band_data[1].len = htole16(frm - pos); 6400 remain -= frm - pos; 6401 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) { 6402 if (remain < 14) 6403 return ENOBUFS; 6404 frm = ieee80211_add_vhtcap(frm, vap->iv_bss); 6405 remain -= frm - pos; 6406 preq->band_data[1].len = htole16(frm - pos); 6407 } 6408 } 6409 6410 /* Send 11n IEs on both 2GHz and 5GHz bands. */ 6411 preq->common_data.offset = htole16(frm - (uint8_t *)wh); 6412 pos = frm; 6413 if (vap->iv_flags_ht & IEEE80211_FHT_HT) { 6414 if (remain < 28) 6415 return ENOBUFS; 6416 frm = ieee80211_add_htcap(frm, vap->iv_bss); 6417 /* XXX add WME info? */ 6418 remain -= frm - pos; 6419 } 6420 6421 preq->common_data.len = htole16(frm - pos); 6422 6423 return 0; 6424 } 6425 6426 static int 6427 iwx_config_umac_scan_reduced(struct iwx_softc *sc) 6428 { 6429 struct iwx_scan_config scan_cfg; 6430 struct iwx_host_cmd hcmd = { 6431 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0), 6432 .len[0] = sizeof(scan_cfg), 6433 .data[0] = &scan_cfg, 6434 .flags = 0, 6435 }; 6436 int cmdver; 6437 6438 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) { 6439 printf("%s: firmware does not support reduced scan config\n", 6440 DEVNAME(sc)); 6441 return ENOTSUP; 6442 } 6443 6444 memset(&scan_cfg, 0, sizeof(scan_cfg)); 6445 6446 /* 6447 * SCAN_CFG version >= 5 implies that the broadcast 6448 * STA ID field is deprecated. 6449 */ 6450 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD); 6451 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5) 6452 scan_cfg.bcast_sta_id = 0xff; 6453 6454 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc)); 6455 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc)); 6456 6457 return iwx_send_cmd(sc, &hcmd); 6458 } 6459 6460 static uint16_t 6461 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan) 6462 { 6463 struct ieee80211com *ic = &sc->sc_ic; 6464 struct ieee80211_scan_state *ss = ic->ic_scan; 6465 uint16_t flags = 0; 6466 6467 if (ss->ss_nssid == 0) { 6468 DPRINTF(("%s: Passive scan started\n", __func__)); 6469 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; 6470 } 6471 6472 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL; 6473 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; 6474 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL; 6475 6476 return flags; 6477 } 6478 6479 #define IWX_SCAN_DWELL_ACTIVE 10 6480 #define IWX_SCAN_DWELL_PASSIVE 110 6481 6482 /* adaptive dwell max budget time [TU] for full scan */ 6483 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 6484 /* adaptive dwell max budget time [TU] for directed scan */ 6485 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 6486 /* adaptive dwell default high band APs number */ 6487 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8 6488 /* adaptive dwell default low band APs number */ 6489 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2 6490 /* adaptive dwell default APs number in social channels (1, 6, 11) */ 6491 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 6492 /* adaptive dwell number of APs override for p2p friendly GO channels */ 6493 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10 6494 /* adaptive dwell number of APs override for social channels */ 6495 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 6496 6497 static void 6498 iwx_scan_umac_dwell_v10(struct iwx_softc *sc, 6499 struct iwx_scan_general_params_v10 *general_params, int bgscan) 6500 { 6501 uint32_t suspend_time, max_out_time; 6502 uint8_t active_dwell, passive_dwell; 6503 6504 active_dwell = IWX_SCAN_DWELL_ACTIVE; 6505 passive_dwell = IWX_SCAN_DWELL_PASSIVE; 6506 6507 general_params->adwell_default_social_chn = 6508 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; 6509 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS; 6510 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS; 6511 6512 if (bgscan) 6513 general_params->adwell_max_budget = 6514 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); 6515 else 6516 general_params->adwell_max_budget = 6517 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); 6518 6519 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6); 6520 if (bgscan) { 6521 max_out_time = htole32(120); 6522 suspend_time = htole32(120); 6523 } else { 6524 max_out_time = htole32(0); 6525 suspend_time = htole32(0); 6526 } 6527 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] = 6528 htole32(max_out_time); 6529 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] = 6530 htole32(suspend_time); 6531 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] = 6532 htole32(max_out_time); 6533 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] = 6534 htole32(suspend_time); 6535 6536 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell; 6537 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell; 6538 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell; 6539 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell; 6540 } 6541 6542 static void 6543 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc, 6544 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan) 6545 { 6546 iwx_scan_umac_dwell_v10(sc, gp, bgscan); 6547 6548 gp->flags = htole16(gen_flags); 6549 6550 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) 6551 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3; 6552 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) 6553 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3; 6554 6555 gp->scan_start_mac_id = 0; 6556 } 6557 6558 static void 6559 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc, 6560 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags, 6561 int n_ssid) 6562 { 6563 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER; 6564 6565 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config, 6566 nitems(cp->channel_config), n_ssid, channel_cfg_flags); 6567 6568 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY; 6569 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS; 6570 } 6571 6572 static int 6573 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan) 6574 { 6575 struct ieee80211com *ic = &sc->sc_ic; 6576 struct ieee80211_scan_state *ss = ic->ic_scan; 6577 struct iwx_host_cmd hcmd = { 6578 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0), 6579 .len = { 0, }, 6580 .data = { NULL, }, 6581 .flags = 0, 6582 }; 6583 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd; 6584 struct iwx_scan_req_params_v14 *scan_p; 6585 int err, async = bgscan, n_ssid = 0; 6586 uint16_t gen_flags; 6587 uint32_t bitmap_ssid = 0; 6588 6589 IWX_ASSERT_LOCKED(sc); 6590 6591 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14)); 6592 6593 scan_p = &cmd->scan_params; 6594 6595 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6); 6596 cmd->uid = htole32(0); 6597 6598 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan); 6599 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params, 6600 gen_flags, bgscan); 6601 6602 scan_p->periodic_params.schedule[0].interval = htole16(0); 6603 scan_p->periodic_params.schedule[0].iter_count = 1; 6604 6605 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq); 6606 if (err) { 6607 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__, 6608 err); 6609 return err; 6610 } 6611 6612 for (int i=0; i < ss->ss_nssid; i++) { 6613 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID; 6614 scan_p->probe_params.direct_scan[i].len = 6615 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 6616 DPRINTF(("%s: Active scan started for ssid ", __func__)); 6617 memcpy(scan_p->probe_params.direct_scan[i].ssid, 6618 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len); 6619 n_ssid++; 6620 bitmap_ssid |= (1 << i); 6621 } 6622 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid)); 6623 6624 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid, 6625 n_ssid); 6626 6627 hcmd.len[0] = sizeof(*cmd); 6628 hcmd.data[0] = (void *)cmd; 6629 hcmd.flags |= async ? IWX_CMD_ASYNC : 0; 6630 6631 err = iwx_send_cmd(sc, &hcmd); 6632 return err; 6633 } 6634 6635 static void 6636 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif) 6637 { 6638 char alpha2[3]; 6639 6640 snprintf(alpha2, sizeof(alpha2), "%c%c", 6641 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff); 6642 6643 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' " 6644 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc)); 6645 6646 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */ 6647 } 6648 6649 uint8_t 6650 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx) 6651 { 6652 int i; 6653 uint8_t rval; 6654 6655 for (i = 0; i < rs->rs_nrates; i++) { 6656 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 6657 if (rval == iwx_rates[ridx].rate) 6658 return rs->rs_rates[i]; 6659 } 6660 6661 return 0; 6662 } 6663 6664 static int 6665 iwx_rval2ridx(int rval) 6666 { 6667 int ridx; 6668 6669 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) { 6670 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP) 6671 continue; 6672 if (rval == iwx_rates[ridx].rate) 6673 break; 6674 } 6675 6676 return ridx; 6677 } 6678 6679 static void 6680 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates, 6681 int *ofdm_rates) 6682 { 6683 struct ieee80211_node *ni = &in->in_ni; 6684 struct ieee80211_rateset *rs = &ni->ni_rates; 6685 int lowest_present_ofdm = -1; 6686 int lowest_present_cck = -1; 6687 uint8_t cck = 0; 6688 uint8_t ofdm = 0; 6689 int i; 6690 6691 if (ni->ni_chan == IEEE80211_CHAN_ANYC || 6692 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { 6693 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) { 6694 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0) 6695 continue; 6696 cck |= (1 << i); 6697 if (lowest_present_cck == -1 || lowest_present_cck > i) 6698 lowest_present_cck = i; 6699 } 6700 } 6701 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) { 6702 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0) 6703 continue; 6704 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE)); 6705 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i) 6706 lowest_present_ofdm = i; 6707 } 6708 6709 /* 6710 * Now we've got the basic rates as bitmaps in the ofdm and cck 6711 * variables. This isn't sufficient though, as there might not 6712 * be all the right rates in the bitmap. E.g. if the only basic 6713 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps 6714 * and 6 Mbps because the 802.11-2007 standard says in 9.6: 6715 * 6716 * [...] a STA responding to a received frame shall transmit 6717 * its Control Response frame [...] at the highest rate in the 6718 * BSSBasicRateSet parameter that is less than or equal to the 6719 * rate of the immediately previous frame in the frame exchange 6720 * sequence ([...]) and that is of the same modulation class 6721 * ([...]) as the received frame. If no rate contained in the 6722 * BSSBasicRateSet parameter meets these conditions, then the 6723 * control frame sent in response to a received frame shall be 6724 * transmitted at the highest mandatory rate of the PHY that is 6725 * less than or equal to the rate of the received frame, and 6726 * that is of the same modulation class as the received frame. 6727 * 6728 * As a consequence, we need to add all mandatory rates that are 6729 * lower than all of the basic rates to these bitmaps. 6730 */ 6731 6732 if (IWX_RATE_24M_INDEX < lowest_present_ofdm) 6733 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE; 6734 if (IWX_RATE_12M_INDEX < lowest_present_ofdm) 6735 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE; 6736 /* 6M already there or needed so always add */ 6737 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE; 6738 6739 /* 6740 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. 6741 * Note, however: 6742 * - if no CCK rates are basic, it must be ERP since there must 6743 * be some basic rates at all, so they're OFDM => ERP PHY 6744 * (or we're in 5 GHz, and the cck bitmap will never be used) 6745 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M 6746 * - if 5.5M is basic, 1M and 2M are mandatory 6747 * - if 2M is basic, 1M is mandatory 6748 * - if 1M is basic, that's the only valid ACK rate. 6749 * As a consequence, it's not as complicated as it sounds, just add 6750 * any lower rates to the ACK rate bitmap. 6751 */ 6752 if (IWX_RATE_11M_INDEX < lowest_present_cck) 6753 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE; 6754 if (IWX_RATE_5M_INDEX < lowest_present_cck) 6755 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE; 6756 if (IWX_RATE_2M_INDEX < lowest_present_cck) 6757 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE; 6758 /* 1M already there or needed so always add */ 6759 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE; 6760 6761 *cck_rates = cck; 6762 *ofdm_rates = ofdm; 6763 } 6764 6765 static void 6766 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in, 6767 struct iwx_mac_ctx_cmd *cmd, uint32_t action) 6768 { 6769 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6770 struct ieee80211com *ic = &sc->sc_ic; 6771 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6772 struct ieee80211_node *ni = vap->iv_bss; 6773 int cck_ack_rates, ofdm_ack_rates; 6774 6775 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 6776 in->in_color)); 6777 cmd->action = htole32(action); 6778 6779 if (action == IWX_FW_CTXT_ACTION_REMOVE) 6780 return; 6781 6782 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6783 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER); 6784 else if (ic->ic_opmode == IEEE80211_M_STA) 6785 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA); 6786 else 6787 panic("unsupported operating mode %d", ic->ic_opmode); 6788 cmd->tsf_id = htole32(IWX_TSF_ID_A); 6789 6790 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr); 6791 DPRINTF(("%s: cmd->node_addr=%s\n", __func__, 6792 ether_sprintf(cmd->node_addr))); 6793 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6794 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr); 6795 return; 6796 } 6797 6798 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr); 6799 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__, 6800 ether_sprintf(cmd->bssid_addr))); 6801 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates); 6802 cmd->cck_rates = htole32(cck_ack_rates); 6803 cmd->ofdm_rates = htole32(ofdm_ack_rates); 6804 6805 cmd->cck_short_preamble 6806 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6807 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0); 6808 cmd->short_slot 6809 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT) 6810 ? IWX_MAC_FLG_SHORT_SLOT : 0); 6811 6812 struct chanAccParams chp; 6813 ieee80211_wme_vap_getparams(vap, &chp); 6814 6815 for (int i = 0; i < WME_NUM_AC; i++) { 6816 int txf = iwx_ac_to_tx_fifo[i]; 6817 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin); 6818 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax); 6819 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn; 6820 cmd->ac[txf].fifos_mask = (1 << txf); 6821 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit; 6822 6823 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32); 6824 } 6825 6826 if (ni->ni_flags & IEEE80211_NODE_QOS) { 6827 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__)); 6828 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA); 6829 } 6830 6831 if (ni->ni_flags & IEEE80211_NODE_HT) { 6832 switch (vap->iv_curhtprotmode) { 6833 case IEEE80211_HTINFO_OPMODE_PURE: 6834 break; 6835 case IEEE80211_HTINFO_OPMODE_PROTOPT: 6836 case IEEE80211_HTINFO_OPMODE_MIXED: 6837 cmd->protection_flags |= 6838 htole32(IWX_MAC_PROT_FLG_HT_PROT | 6839 IWX_MAC_PROT_FLG_FAT_PROT); 6840 break; 6841 case IEEE80211_HTINFO_OPMODE_HT20PR: 6842 if (in->in_phyctxt && 6843 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE || 6844 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) { 6845 cmd->protection_flags |= 6846 htole32(IWX_MAC_PROT_FLG_HT_PROT | 6847 IWX_MAC_PROT_FLG_FAT_PROT); 6848 } 6849 break; 6850 default: 6851 break; 6852 } 6853 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN); 6854 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__)); 6855 } 6856 6857 if (ic->ic_flags & IEEE80211_F_USEPROT) 6858 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT); 6859 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP); 6860 #undef IWX_EXP2 6861 } 6862 6863 static void 6864 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in, 6865 struct iwx_mac_data_sta *sta, int assoc) 6866 { 6867 struct ieee80211_node *ni = &in->in_ni; 6868 struct ieee80211com *ic = &sc->sc_ic; 6869 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6870 uint32_t dtim_off; 6871 uint64_t tsf; 6872 int dtim_period; 6873 6874 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU; 6875 tsf = le64toh(ni->ni_tstamp.tsf); 6876 dtim_period = vap->iv_dtim_period; 6877 6878 sta->is_assoc = htole32(assoc); 6879 6880 if (assoc) { 6881 sta->dtim_time = htole32(tsf + dtim_off); 6882 sta->dtim_tsf = htole64(tsf + dtim_off); 6883 // XXX: unset in iwm 6884 sta->assoc_beacon_arrive_time = 0; 6885 } 6886 sta->bi = htole32(ni->ni_intval); 6887 sta->dtim_interval = htole32(ni->ni_intval * dtim_period); 6888 sta->data_policy = htole32(0); 6889 sta->listen_interval = htole32(10); 6890 sta->assoc_id = htole32(ni->ni_associd); 6891 } 6892 6893 static int 6894 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action, 6895 int assoc) 6896 { 6897 struct ieee80211com *ic = &sc->sc_ic; 6898 struct ieee80211_node *ni = &in->in_ni; 6899 struct iwx_mac_ctx_cmd cmd; 6900 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE); 6901 6902 if (action == IWX_FW_CTXT_ACTION_ADD && active) 6903 panic("MAC already added"); 6904 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active) 6905 panic("MAC already removed"); 6906 6907 memset(&cmd, 0, sizeof(cmd)); 6908 6909 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action); 6910 6911 if (action == IWX_FW_CTXT_ACTION_REMOVE) { 6912 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, 6913 sizeof(cmd), &cmd); 6914 } 6915 6916 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6917 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC | 6918 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT | 6919 IWX_MAC_FILTER_ACCEPT_GRP | 6920 IWX_MAC_FILTER_IN_BEACON | 6921 IWX_MAC_FILTER_IN_PROBE_REQUEST | 6922 IWX_MAC_FILTER_IN_CRC32); 6923 // XXX: dtim period is in vap 6924 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) { 6925 /* 6926 * Allow beacons to pass through as long as we are not 6927 * associated or we do not have dtim period information. 6928 */ 6929 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON); 6930 } 6931 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc); 6932 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 6933 } 6934 6935 static int 6936 iwx_clear_statistics(struct iwx_softc *sc) 6937 { 6938 struct iwx_statistics_cmd scmd = { 6939 .flags = htole32(IWX_STATISTICS_FLG_CLEAR) 6940 }; 6941 struct iwx_host_cmd cmd = { 6942 .id = IWX_STATISTICS_CMD, 6943 .len[0] = sizeof(scmd), 6944 .data[0] = &scmd, 6945 .flags = IWX_CMD_WANT_RESP, 6946 .resp_pkt_len = sizeof(struct iwx_notif_statistics), 6947 }; 6948 int err; 6949 6950 err = iwx_send_cmd(sc, &cmd); 6951 if (err) 6952 return err; 6953 6954 iwx_free_resp(sc, &cmd); 6955 return 0; 6956 } 6957 6958 static int 6959 iwx_scan(struct iwx_softc *sc) 6960 { 6961 int err; 6962 err = iwx_umac_scan_v14(sc, 0); 6963 6964 if (err) { 6965 printf("%s: could not initiate scan\n", DEVNAME(sc)); 6966 return err; 6967 } 6968 return 0; 6969 } 6970 6971 static int 6972 iwx_bgscan(struct ieee80211com *ic) 6973 { 6974 struct iwx_softc *sc = ic->ic_softc; 6975 int err; 6976 6977 err = iwx_umac_scan_v14(sc, 1); 6978 if (err) { 6979 printf("%s: could not initiate scan\n", DEVNAME(sc)); 6980 return err; 6981 } 6982 return 0; 6983 } 6984 6985 static int 6986 iwx_enable_mgmt_queue(struct iwx_softc *sc) 6987 { 6988 int err; 6989 6990 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1; 6991 6992 /* 6993 * Non-QoS frames use the "MGMT" TID and queue. 6994 * Other TIDs and data queues are reserved for QoS data frames. 6995 */ 6996 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid, 6997 IWX_MGMT_TID, IWX_TX_RING_COUNT); 6998 if (err) { 6999 printf("%s: could not enable Tx queue %d (error %d)\n", 7000 DEVNAME(sc), sc->first_data_qid, err); 7001 return err; 7002 } 7003 7004 return 0; 7005 } 7006 7007 static int 7008 iwx_disable_mgmt_queue(struct iwx_softc *sc) 7009 { 7010 int err, cmd_ver; 7011 7012 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */ 7013 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7014 IWX_SCD_QUEUE_CONFIG_CMD); 7015 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) 7016 return 0; 7017 7018 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1; 7019 7020 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid, 7021 IWX_MGMT_TID); 7022 if (err) { 7023 printf("%s: could not disable Tx queue %d (error %d)\n", 7024 DEVNAME(sc), sc->first_data_qid, err); 7025 return err; 7026 } 7027 7028 return 0; 7029 } 7030 7031 static int 7032 iwx_rs_rval2idx(uint8_t rval) 7033 { 7034 /* Firmware expects indices which match our 11g rate set. */ 7035 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g; 7036 int i; 7037 7038 for (i = 0; i < rs->rs_nrates; i++) { 7039 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 7040 return i; 7041 } 7042 7043 return -1; 7044 } 7045 7046 static uint16_t 7047 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx) 7048 { 7049 uint16_t htrates = 0; 7050 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 7051 int i; 7052 7053 if (rsidx == IEEE80211_HT_RATESET_SISO) { 7054 for (i = 0; i < htrs->rs_nrates; i++) { 7055 if (htrs->rs_rates[i] <= 7) 7056 htrates |= (1 << htrs->rs_rates[i]); 7057 } 7058 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) { 7059 for (i = 0; i < htrs->rs_nrates; i++) { 7060 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15) 7061 htrates |= (1 << (htrs->rs_rates[i] - 8)); 7062 } 7063 } else 7064 panic(("iwx_rs_ht_rates")); 7065 7066 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, 7067 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates); 7068 7069 return htrates; 7070 } 7071 7072 uint16_t 7073 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss) 7074 { 7075 uint16_t rx_mcs; 7076 int max_mcs = -1; 7077 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1))) 7078 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1)) 7079 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map & 7080 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >> 7081 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss); 7082 7083 switch (rx_mcs) { 7084 case IEEE80211_VHT_MCS_NOT_SUPPORTED: 7085 break; 7086 case IEEE80211_VHT_MCS_SUPPORT_0_7: 7087 max_mcs = 7; 7088 break; 7089 case IEEE80211_VHT_MCS_SUPPORT_0_8: 7090 max_mcs = 8; 7091 break; 7092 case IEEE80211_VHT_MCS_SUPPORT_0_9: 7093 /* Disable VHT MCS 9 for 20MHz-only stations. */ 7094 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0) 7095 max_mcs = 8; 7096 else 7097 max_mcs = 9; 7098 break; 7099 default: 7100 /* Should not happen; Values above cover the possible range. */ 7101 panic("invalid VHT Rx MCS value %u", rx_mcs); 7102 } 7103 7104 return ((1 << (max_mcs + 1)) - 1); 7105 } 7106 7107 static int 7108 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in) 7109 { 7110 #if 1 7111 panic("iwx: Trying to init rate set on untested version"); 7112 #else 7113 struct ieee80211_node *ni = &in->in_ni; 7114 struct ieee80211_rateset *rs = &ni->ni_rates; 7115 struct iwx_tlc_config_cmd_v3 cfg_cmd; 7116 uint32_t cmd_id; 7117 int i; 7118 size_t cmd_size = sizeof(cfg_cmd); 7119 7120 memset(&cfg_cmd, 0, sizeof(cfg_cmd)); 7121 7122 for (i = 0; i < rs->rs_nrates; i++) { 7123 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL; 7124 int idx = iwx_rs_rval2idx(rval); 7125 if (idx == -1) 7126 return EINVAL; 7127 cfg_cmd.non_ht_rates |= (1 << idx); 7128 } 7129 7130 if (ni->ni_flags & IEEE80211_NODE_VHT) { 7131 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT; 7132 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7133 htole16(iwx_rs_vht_rates(sc, ni, 1)); 7134 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7135 htole16(iwx_rs_vht_rates(sc, ni, 2)); 7136 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 7137 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT; 7138 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7139 htole16(iwx_rs_ht_rates(sc, ni, 7140 IEEE80211_HT_RATESET_SISO)); 7141 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7142 htole16(iwx_rs_ht_rates(sc, ni, 7143 IEEE80211_HT_RATESET_MIMO2)); 7144 } else 7145 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT; 7146 7147 cfg_cmd.sta_id = IWX_STATION_ID; 7148 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) 7149 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7150 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA || 7151 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB) 7152 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7153 else 7154 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7155 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK; 7156 if (ni->ni_flags & IEEE80211_NODE_VHT) 7157 cfg_cmd.max_mpdu_len = htole16(3895); 7158 else 7159 cfg_cmd.max_mpdu_len = htole16(3839); 7160 if (ni->ni_flags & IEEE80211_NODE_HT) { 7161 if (ieee80211_node_supports_ht_sgi20(ni)) { 7162 cfg_cmd.sgi_ch_width_supp |= (1 << 7163 IWX_TLC_MNG_CH_WIDTH_20MHZ); 7164 } 7165 if (ieee80211_node_supports_ht_sgi40(ni)) { 7166 cfg_cmd.sgi_ch_width_supp |= (1 << 7167 IWX_TLC_MNG_CH_WIDTH_40MHZ); 7168 } 7169 } 7170 if ((ni->ni_flags & IEEE80211_NODE_VHT) && 7171 ieee80211_node_supports_vht_sgi80(ni)) 7172 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ); 7173 7174 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0); 7175 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd); 7176 #endif 7177 } 7178 7179 static int 7180 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in) 7181 { 7182 struct ieee80211_node *ni = &in->in_ni; 7183 struct ieee80211_rateset *rs = &ni->ni_rates; 7184 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 7185 struct iwx_tlc_config_cmd_v4 cfg_cmd; 7186 uint32_t cmd_id; 7187 int i; 7188 int sgi80 = 0; 7189 size_t cmd_size = sizeof(cfg_cmd); 7190 7191 memset(&cfg_cmd, 0, sizeof(cfg_cmd)); 7192 7193 for (i = 0; i < rs->rs_nrates; i++) { 7194 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL; 7195 int idx = iwx_rs_rval2idx(rval); 7196 if (idx == -1) 7197 return EINVAL; 7198 cfg_cmd.non_ht_rates |= (1 << idx); 7199 } 7200 for (i = 0; i < htrs->rs_nrates; i++) { 7201 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i])); 7202 } 7203 7204 if (ni->ni_flags & IEEE80211_NODE_VHT) { 7205 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT; 7206 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7207 htole16(iwx_rs_vht_rates(sc, ni, 1)); 7208 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7209 htole16(iwx_rs_vht_rates(sc, ni, 2)); 7210 7211 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n", 7212 __func__, __LINE__, 7213 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]); 7214 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n", 7215 __func__, __LINE__, 7216 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]); 7217 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 7218 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT; 7219 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7220 htole16(iwx_rs_ht_rates(sc, ni, 7221 IEEE80211_HT_RATESET_SISO)); 7222 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7223 htole16(iwx_rs_ht_rates(sc, ni, 7224 IEEE80211_HT_RATESET_MIMO2)); 7225 7226 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n", 7227 __func__, __LINE__, 7228 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]); 7229 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n", 7230 __func__, __LINE__, 7231 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]); 7232 } else 7233 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT; 7234 7235 cfg_cmd.sta_id = IWX_STATION_ID; 7236 #if 0 7237 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) 7238 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7239 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA || 7240 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB) 7241 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7242 else 7243 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7244 #endif 7245 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) { 7246 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7247 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) { 7248 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7249 } else { 7250 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7251 } 7252 7253 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK; 7254 if (ni->ni_flags & IEEE80211_NODE_VHT) 7255 cfg_cmd.max_mpdu_len = htole16(3895); 7256 else 7257 cfg_cmd.max_mpdu_len = htole16(3839); 7258 if (ni->ni_flags & IEEE80211_NODE_HT) { 7259 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 7260 cfg_cmd.sgi_ch_width_supp |= (1 << 7261 IWX_TLC_MNG_CH_WIDTH_20MHZ); 7262 } 7263 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) { 7264 cfg_cmd.sgi_ch_width_supp |= (1 << 7265 IWX_TLC_MNG_CH_WIDTH_40MHZ); 7266 } 7267 } 7268 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap, 7269 IEEE80211_VHTCAP_SHORT_GI_80); 7270 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) { 7271 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ); 7272 } 7273 7274 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0); 7275 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd); 7276 } 7277 7278 static int 7279 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in) 7280 { 7281 int cmd_ver; 7282 7283 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7284 IWX_TLC_MNG_CONFIG_CMD); 7285 if (cmd_ver == 4) 7286 return iwx_rs_init_v4(sc, in); 7287 else 7288 return iwx_rs_init_v3(sc, in); 7289 } 7290 7291 static void 7292 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif) 7293 { 7294 struct ieee80211com *ic = &sc->sc_ic; 7295 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7296 struct ieee80211_node *ni = (void *)vap->iv_bss; 7297 7298 struct ieee80211_rateset *rs = &ni->ni_rates; 7299 uint32_t rate_n_flags; 7300 uint8_t plcp, rval; 7301 int i, cmd_ver, rate_n_flags_ver2 = 0; 7302 7303 if (notif->sta_id != IWX_STATION_ID || 7304 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0) 7305 return; 7306 7307 rate_n_flags = le32toh(notif->rate); 7308 7309 if (sc->sc_debug & IWX_DEBUG_TXRATE) 7310 print_ratenflags(__func__, __LINE__, 7311 rate_n_flags, sc->sc_rate_n_flags_version); 7312 7313 cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP, 7314 IWX_TLC_MNG_UPDATE_NOTIF); 7315 if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3) 7316 rate_n_flags_ver2 = 1; 7317 7318 if (rate_n_flags_ver2) { 7319 uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK); 7320 if (mod_type == IWX_RATE_MCS_HT_MSK) { 7321 7322 ieee80211_node_set_txrate_dot11rate(ni, 7323 IWX_RATE_HT_MCS_INDEX(rate_n_flags) | 7324 IEEE80211_RATE_MCS); 7325 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, 7326 "%s:%d new MCS: %d rate_n_flags: %x\n", 7327 __func__, __LINE__, 7328 ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS, 7329 rate_n_flags); 7330 return; 7331 } 7332 } else { 7333 if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) { 7334 ieee80211_node_set_txrate_dot11rate(ni, 7335 rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 | 7336 IWX_RATE_HT_MCS_NSS_MSK_V1)); 7337 7338 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, 7339 "%s:%d new MCS idx: %d rate_n_flags: %x\n", 7340 __func__, __LINE__, 7341 ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags); 7342 return; 7343 } 7344 } 7345 7346 if (rate_n_flags_ver2) { 7347 const struct ieee80211_rateset *rs; 7348 uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK); 7349 if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) 7350 rs = &ieee80211_std_rateset_11a; 7351 else 7352 rs = &ieee80211_std_rateset_11b; 7353 if (ridx < rs->rs_nrates) 7354 rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL); 7355 else 7356 rval = 0; 7357 } else { 7358 plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1); 7359 7360 rval = 0; 7361 for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) { 7362 if (iwx_rates[i].plcp == plcp) { 7363 rval = iwx_rates[i].rate; 7364 break; 7365 } 7366 } 7367 } 7368 7369 if (rval) { 7370 uint8_t rv; 7371 for (i = 0; i < rs->rs_nrates; i++) { 7372 rv = rs->rs_rates[i] & IEEE80211_RATE_VAL; 7373 if (rv == rval) { 7374 ieee80211_node_set_txrate_dot11rate(ni, i); 7375 break; 7376 } 7377 } 7378 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, 7379 "%s:%d new rate %d\n", __func__, __LINE__, 7380 ieee80211_node_get_txrate_dot11rate(ni)); 7381 } 7382 } 7383 7384 static int 7385 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt, 7386 uint8_t chains_static, uint8_t chains_dynamic) 7387 { 7388 struct iwx_rlc_config_cmd cmd; 7389 uint32_t cmd_id; 7390 uint8_t active_cnt, idle_cnt; 7391 7392 memset(&cmd, 0, sizeof(cmd)); 7393 7394 idle_cnt = chains_static; 7395 active_cnt = chains_dynamic; 7396 7397 cmd.phy_id = htole32(phyctxt->id); 7398 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) << 7399 IWX_PHY_RX_CHAIN_VALID_POS); 7400 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS); 7401 cmd.rlc.rx_chain_info |= htole32(active_cnt << 7402 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 7403 7404 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2); 7405 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 7406 } 7407 7408 static int 7409 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt, 7410 struct ieee80211_channel *chan, uint8_t chains_static, 7411 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco, 7412 uint8_t vht_chan_width) 7413 { 7414 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ); 7415 int err; 7416 7417 if (chan == IEEE80211_CHAN_ANYC) { 7418 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n", 7419 DEVNAME(sc)); 7420 return EIO; 7421 } 7422 7423 if (isset(sc->sc_enabled_capa, 7424 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && 7425 (phyctxt->channel->ic_flags & band_flags) != 7426 (chan->ic_flags & band_flags)) { 7427 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7428 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco, 7429 vht_chan_width); 7430 if (err) { 7431 printf("%s: could not remove PHY context " 7432 "(error %d)\n", DEVNAME(sc), err); 7433 return err; 7434 } 7435 phyctxt->channel = chan; 7436 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7437 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco, 7438 vht_chan_width); 7439 if (err) { 7440 printf("%s: could not add PHY context " 7441 "(error %d)\n", DEVNAME(sc), err); 7442 return err; 7443 } 7444 } else { 7445 phyctxt->channel = chan; 7446 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7447 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco, 7448 vht_chan_width); 7449 if (err) { 7450 printf("%s: could not update PHY context (error %d)\n", 7451 DEVNAME(sc), err); 7452 return err; 7453 } 7454 } 7455 7456 phyctxt->sco = sco; 7457 phyctxt->vht_chan_width = vht_chan_width; 7458 7459 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__, 7460 phyctxt->channel->ic_ieee)); 7461 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco)); 7462 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__, 7463 phyctxt->vht_chan_width)); 7464 7465 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7466 IWX_RLC_CONFIG_CMD) == 2) 7467 return iwx_phy_send_rlc(sc, phyctxt, 7468 chains_static, chains_dynamic); 7469 7470 return 0; 7471 } 7472 7473 static int 7474 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc) 7475 { 7476 struct ieee80211com *ic = &sc->sc_ic; 7477 struct iwx_node *in; 7478 struct iwx_vap *ivp = IWX_VAP(vap); 7479 struct ieee80211_node *ni; 7480 uint32_t duration; 7481 int generation = sc->sc_generation, err; 7482 7483 IWX_ASSERT_LOCKED(sc); 7484 7485 ni = ieee80211_ref_node(vap->iv_bss); 7486 in = IWX_NODE(ni); 7487 7488 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7489 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7490 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7491 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7492 if (err) 7493 return err; 7494 } else { 7495 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7496 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7497 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7498 if (err) 7499 return err; 7500 } 7501 ivp->phy_ctxt = &sc->sc_phyctxt[0]; 7502 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr); 7503 DPRINTF(("%s: in-in_macaddr=%s\n", __func__, 7504 ether_sprintf(in->in_macaddr))); 7505 7506 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0); 7507 if (err) { 7508 printf("%s: could not add MAC context (error %d)\n", 7509 DEVNAME(sc), err); 7510 return err; 7511 } 7512 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE; 7513 7514 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD); 7515 if (err) { 7516 printf("%s: could not add binding (error %d)\n", 7517 DEVNAME(sc), err); 7518 goto rm_mac_ctxt; 7519 } 7520 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE; 7521 7522 err = iwx_add_sta_cmd(sc, in, 0); 7523 if (err) { 7524 printf("%s: could not add sta (error %d)\n", 7525 DEVNAME(sc), err); 7526 goto rm_binding; 7527 } 7528 sc->sc_flags |= IWX_FLAG_STA_ACTIVE; 7529 7530 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7531 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID, 7532 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID, 7533 IWX_TX_RING_COUNT); 7534 if (err) 7535 goto rm_sta; 7536 return 0; 7537 } 7538 7539 err = iwx_enable_mgmt_queue(sc); 7540 if (err) 7541 goto rm_sta; 7542 7543 err = iwx_clear_statistics(sc); 7544 if (err) 7545 goto rm_mgmt_queue; 7546 7547 /* 7548 * Prevent the FW from wandering off channel during association 7549 * by "protecting" the session with a time event. 7550 */ 7551 if (in->in_ni.ni_intval) 7552 duration = in->in_ni.ni_intval * 9; 7553 else 7554 duration = 900; 7555 return iwx_schedule_session_protection(sc, in, duration); 7556 7557 rm_mgmt_queue: 7558 if (generation == sc->sc_generation) 7559 iwx_disable_mgmt_queue(sc); 7560 rm_sta: 7561 if (generation == sc->sc_generation) { 7562 iwx_rm_sta_cmd(sc, in); 7563 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 7564 } 7565 rm_binding: 7566 if (generation == sc->sc_generation) { 7567 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE); 7568 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 7569 } 7570 rm_mac_ctxt: 7571 if (generation == sc->sc_generation) { 7572 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0); 7573 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 7574 } 7575 return err; 7576 } 7577 7578 static int 7579 iwx_deauth(struct iwx_softc *sc) 7580 { 7581 struct ieee80211com *ic = &sc->sc_ic; 7582 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7583 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7584 int err; 7585 7586 IWX_ASSERT_LOCKED(sc); 7587 7588 iwx_unprotect_session(sc, in); 7589 7590 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) { 7591 err = iwx_rm_sta(sc, in); 7592 if (err) 7593 return err; 7594 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 7595 } 7596 7597 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) { 7598 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE); 7599 if (err) { 7600 printf("%s: could not remove binding (error %d)\n", 7601 DEVNAME(sc), err); 7602 return err; 7603 } 7604 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 7605 } 7606 7607 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags & 7608 IWX_FLAG_MAC_ACTIVE)); 7609 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) { 7610 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0); 7611 if (err) { 7612 printf("%s: could not remove MAC context (error %d)\n", 7613 DEVNAME(sc), err); 7614 return err; 7615 } 7616 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 7617 } 7618 7619 /* Move unused PHY context to a default channel. */ 7620 //TODO uncommented in obsd, but stays on the way of auth->auth 7621 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7622 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7623 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7624 if (err) 7625 return err; 7626 7627 return 0; 7628 } 7629 7630 static int 7631 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc) 7632 { 7633 struct ieee80211com *ic = &sc->sc_ic; 7634 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7635 struct ieee80211_node *ni = &in->in_ni; 7636 struct iwx_vap *ivp = IWX_VAP(vap); 7637 int err; 7638 7639 IWX_ASSERT_LOCKED(sc); 7640 7641 if (ni->ni_flags & IEEE80211_NODE_HT) { 7642 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1; 7643 uint8_t sco, vht_chan_width; 7644 sco = IEEE80211_HTOP0_SCO_SCN; 7645 if ((ni->ni_flags & IEEE80211_NODE_VHT) && 7646 IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) 7647 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80; 7648 else 7649 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT; 7650 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt, 7651 ivp->phy_ctxt->channel, chains, chains, 7652 0, sco, vht_chan_width); 7653 if (err) { 7654 printf("%s: failed to update PHY\n", DEVNAME(sc)); 7655 return err; 7656 } 7657 } 7658 7659 /* Update STA again to apply HT and VHT settings. */ 7660 err = iwx_add_sta_cmd(sc, in, 1); 7661 if (err) { 7662 printf("%s: could not update STA (error %d)\n", 7663 DEVNAME(sc), err); 7664 return err; 7665 } 7666 7667 /* We have now been assigned an associd by the AP. */ 7668 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1); 7669 if (err) { 7670 printf("%s: failed to update MAC\n", DEVNAME(sc)); 7671 return err; 7672 } 7673 7674 err = iwx_sf_config(sc, IWX_SF_FULL_ON); 7675 if (err) { 7676 printf("%s: could not set sf full on (error %d)\n", 7677 DEVNAME(sc), err); 7678 return err; 7679 } 7680 7681 err = iwx_allow_mcast(sc); 7682 if (err) { 7683 printf("%s: could not allow mcast (error %d)\n", 7684 DEVNAME(sc), err); 7685 return err; 7686 } 7687 7688 err = iwx_power_update_device(sc); 7689 if (err) { 7690 printf("%s: could not send power command (error %d)\n", 7691 DEVNAME(sc), err); 7692 return err; 7693 } 7694 #ifdef notyet 7695 /* 7696 * Disabled for now. Default beacon filter settings 7697 * prevent net80211 from getting ERP and HT protection 7698 * updates from beacons. 7699 */ 7700 err = iwx_enable_beacon_filter(sc, in); 7701 if (err) { 7702 printf("%s: could not enable beacon filter\n", 7703 DEVNAME(sc)); 7704 return err; 7705 } 7706 #endif 7707 err = iwx_power_mac_update_mode(sc, in); 7708 if (err) { 7709 printf("%s: could not update MAC power (error %d)\n", 7710 DEVNAME(sc), err); 7711 return err; 7712 } 7713 7714 if (ic->ic_opmode == IEEE80211_M_MONITOR) 7715 return 0; 7716 7717 err = iwx_rs_init(sc, in); 7718 if (err) { 7719 printf("%s: could not init rate scaling (error %d)\n", 7720 DEVNAME(sc), err); 7721 return err; 7722 } 7723 7724 return 0; 7725 } 7726 7727 static int 7728 iwx_run_stop(struct iwx_softc *sc) 7729 { 7730 struct ieee80211com *ic = &sc->sc_ic; 7731 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7732 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7733 struct ieee80211_node *ni = &in->in_ni; 7734 int err, i; 7735 7736 IWX_ASSERT_LOCKED(sc); 7737 7738 err = iwx_flush_sta(sc, in); 7739 if (err) { 7740 printf("%s: could not flush Tx path (error %d)\n", 7741 DEVNAME(sc), err); 7742 return err; 7743 } 7744 7745 /* 7746 * Stop Rx BA sessions now. We cannot rely on the BA task 7747 * for this when moving out of RUN state since it runs in a 7748 * separate thread. 7749 * Note that in->in_ni (struct ieee80211_node) already represents 7750 * our new access point in case we are roaming between APs. 7751 * This means we cannot rely on struct ieee802111_node to tell 7752 * us which BA sessions exist. 7753 */ 7754 // TODO agg 7755 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 7756 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i]; 7757 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID) 7758 continue; 7759 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0); 7760 } 7761 7762 err = iwx_sf_config(sc, IWX_SF_INIT_OFF); 7763 if (err) 7764 return err; 7765 7766 err = iwx_disable_beacon_filter(sc); 7767 if (err) { 7768 printf("%s: could not disable beacon filter (error %d)\n", 7769 DEVNAME(sc), err); 7770 return err; 7771 } 7772 7773 /* Mark station as disassociated. */ 7774 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0); 7775 if (err) { 7776 printf("%s: failed to update MAC\n", DEVNAME(sc)); 7777 return err; 7778 } 7779 7780 return 0; 7781 } 7782 7783 static struct ieee80211_node * 7784 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 7785 { 7786 return malloc(sizeof (struct iwx_node), M_80211_NODE, 7787 M_NOWAIT | M_ZERO); 7788 } 7789 7790 #if 0 7791 int 7792 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 7793 struct ieee80211_key *k) 7794 { 7795 struct iwx_softc *sc = ic->ic_softc; 7796 struct iwx_node *in = (void *)ni; 7797 struct iwx_setkey_task_arg *a; 7798 int err; 7799 7800 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 7801 /* Fallback to software crypto for other ciphers. */ 7802 err = ieee80211_set_key(ic, ni, k); 7803 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP)) 7804 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY; 7805 return err; 7806 } 7807 7808 if (sc->setkey_nkeys >= nitems(sc->setkey_arg)) 7809 return ENOSPC; 7810 7811 a = &sc->setkey_arg[sc->setkey_cur]; 7812 a->sta_id = IWX_STATION_ID; 7813 a->ni = ni; 7814 a->k = k; 7815 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg); 7816 sc->setkey_nkeys++; 7817 iwx_add_task(sc, systq, &sc->setkey_task); 7818 return EBUSY; 7819 } 7820 7821 int 7822 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni, 7823 struct ieee80211_key *k) 7824 { 7825 struct ieee80211com *ic = &sc->sc_ic; 7826 struct iwx_node *in = (void *)ni; 7827 struct iwx_add_sta_key_cmd cmd; 7828 uint32_t status; 7829 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY | 7830 IWX_NODE_FLAG_HAVE_GROUP_KEY); 7831 int err; 7832 7833 /* 7834 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid. 7835 * Currently we only implement station mode where 'ni' is always 7836 * ic->ic_bss so there is no need to validate arguments beyond this: 7837 */ 7838 KASSERT(ni == ic->ic_bss); 7839 7840 memset(&cmd, 0, sizeof(cmd)); 7841 7842 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM | 7843 IWX_STA_KEY_FLG_WEP_KEY_MAP | 7844 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) & 7845 IWX_STA_KEY_FLG_KEYID_MSK)); 7846 if (k->k_flags & IEEE80211_KEY_GROUP) { 7847 cmd.common.key_offset = 1; 7848 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST); 7849 } else 7850 cmd.common.key_offset = 0; 7851 7852 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len)); 7853 cmd.common.sta_id = sta_id; 7854 7855 cmd.transmit_seq_cnt = htole64(k->k_tsc); 7856 7857 status = IWX_ADD_STA_SUCCESS; 7858 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd, 7859 &status); 7860 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 7861 return ECANCELED; 7862 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 7863 err = EIO; 7864 if (err) { 7865 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH, 7866 IEEE80211_REASON_AUTH_LEAVE); 7867 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 7868 return err; 7869 } 7870 7871 if (k->k_flags & IEEE80211_KEY_GROUP) 7872 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY; 7873 else 7874 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY; 7875 7876 if ((in->in_flags & want_keymask) == want_keymask) { 7877 DPRINTF(("marking port %s valid\n", 7878 ether_sprintf(ni->ni_macaddr))); 7879 ni->ni_port_valid = 1; 7880 ieee80211_set_link_state(ic, LINK_STATE_UP); 7881 } 7882 7883 return 0; 7884 } 7885 7886 void 7887 iwx_setkey_task(void *arg) 7888 { 7889 struct iwx_softc *sc = arg; 7890 struct iwx_setkey_task_arg *a; 7891 int err = 0, s = splnet(); 7892 7893 while (sc->setkey_nkeys > 0) { 7894 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN)) 7895 break; 7896 a = &sc->setkey_arg[sc->setkey_tail]; 7897 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k); 7898 a->sta_id = 0; 7899 a->ni = NULL; 7900 a->k = NULL; 7901 sc->setkey_tail = (sc->setkey_tail + 1) % 7902 nitems(sc->setkey_arg); 7903 sc->setkey_nkeys--; 7904 } 7905 7906 refcnt_rele_wake(&sc->task_refs); 7907 splx(s); 7908 } 7909 7910 void 7911 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 7912 struct ieee80211_key *k) 7913 { 7914 struct iwx_softc *sc = ic->ic_softc; 7915 struct iwx_add_sta_key_cmd cmd; 7916 7917 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 7918 /* Fallback to software crypto for other ciphers. */ 7919 ieee80211_delete_key(ic, ni, k); 7920 return; 7921 } 7922 7923 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0) 7924 return; 7925 7926 memset(&cmd, 0, sizeof(cmd)); 7927 7928 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID | 7929 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP | 7930 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) & 7931 IWX_STA_KEY_FLG_KEYID_MSK)); 7932 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len)); 7933 if (k->k_flags & IEEE80211_KEY_GROUP) 7934 cmd.common.key_offset = 1; 7935 else 7936 cmd.common.key_offset = 0; 7937 cmd.common.sta_id = IWX_STATION_ID; 7938 7939 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd); 7940 } 7941 #endif 7942 7943 static int 7944 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate) 7945 { 7946 struct ieee80211com *ic = vap->iv_ic; 7947 struct iwx_softc *sc = ic->ic_softc; 7948 enum ieee80211_state ostate = vap->iv_state; 7949 int err = 0; 7950 7951 IWX_LOCK(sc); 7952 7953 if (nstate <= ostate || nstate > IEEE80211_S_RUN) { 7954 switch (ostate) { 7955 case IEEE80211_S_RUN: 7956 err = iwx_run_stop(sc); 7957 if (err) 7958 goto out; 7959 /* FALLTHROUGH */ 7960 case IEEE80211_S_ASSOC: 7961 case IEEE80211_S_AUTH: 7962 if (nstate <= IEEE80211_S_AUTH) { 7963 err = iwx_deauth(sc); 7964 if (err) 7965 goto out; 7966 } 7967 /* FALLTHROUGH */ 7968 case IEEE80211_S_SCAN: 7969 case IEEE80211_S_INIT: 7970 default: 7971 break; 7972 } 7973 // 7974 // /* Die now if iwx_stop() was called while we were sleeping. */ 7975 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) { 7976 // refcnt_rele_wake(&sc->task_refs); 7977 // splx(s); 7978 // return; 7979 // } 7980 } 7981 7982 switch (nstate) { 7983 case IEEE80211_S_INIT: 7984 break; 7985 7986 case IEEE80211_S_SCAN: 7987 break; 7988 7989 case IEEE80211_S_AUTH: 7990 err = iwx_auth(vap, sc); 7991 break; 7992 7993 case IEEE80211_S_ASSOC: 7994 break; 7995 7996 case IEEE80211_S_RUN: 7997 err = iwx_run(vap, sc); 7998 break; 7999 default: 8000 break; 8001 } 8002 8003 out: 8004 IWX_UNLOCK(sc); 8005 8006 return (err); 8007 } 8008 8009 static int 8010 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 8011 { 8012 struct iwx_vap *ivp = IWX_VAP(vap); 8013 struct ieee80211com *ic = vap->iv_ic; 8014 enum ieee80211_state ostate = vap->iv_state; 8015 int err; 8016 8017 /* 8018 * Prevent attempts to transition towards the same state, unless 8019 * we are scanning in which case a SCAN -> SCAN transition 8020 * triggers another scan iteration. And AUTH -> AUTH is needed 8021 * to support band-steering. 8022 */ 8023 if (ostate == nstate && nstate != IEEE80211_S_SCAN && 8024 nstate != IEEE80211_S_AUTH) 8025 return 0; 8026 IEEE80211_UNLOCK(ic); 8027 err = iwx_newstate_sub(vap, nstate); 8028 IEEE80211_LOCK(ic); 8029 if (err == 0) 8030 err = ivp->iv_newstate(vap, nstate, arg); 8031 8032 return (err); 8033 } 8034 8035 static void 8036 iwx_endscan(struct iwx_softc *sc) 8037 { 8038 struct ieee80211com *ic = &sc->sc_ic; 8039 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8040 8041 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0) 8042 return; 8043 8044 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN); 8045 8046 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 8047 wakeup(&vap->iv_state); /* wake up iwx_newstate */ 8048 } 8049 8050 /* 8051 * Aging and idle timeouts for the different possible scenarios 8052 * in default configuration 8053 */ 8054 static const uint32_t 8055 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = { 8056 { 8057 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF), 8058 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF) 8059 }, 8060 { 8061 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF), 8062 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF) 8063 }, 8064 { 8065 htole32(IWX_SF_MCAST_AGING_TIMER_DEF), 8066 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF) 8067 }, 8068 { 8069 htole32(IWX_SF_BA_AGING_TIMER_DEF), 8070 htole32(IWX_SF_BA_IDLE_TIMER_DEF) 8071 }, 8072 { 8073 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF), 8074 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF) 8075 }, 8076 }; 8077 8078 /* 8079 * Aging and idle timeouts for the different possible scenarios 8080 * in single BSS MAC configuration. 8081 */ 8082 static const uint32_t 8083 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = { 8084 { 8085 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER), 8086 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER) 8087 }, 8088 { 8089 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER), 8090 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER) 8091 }, 8092 { 8093 htole32(IWX_SF_MCAST_AGING_TIMER), 8094 htole32(IWX_SF_MCAST_IDLE_TIMER) 8095 }, 8096 { 8097 htole32(IWX_SF_BA_AGING_TIMER), 8098 htole32(IWX_SF_BA_IDLE_TIMER) 8099 }, 8100 { 8101 htole32(IWX_SF_TX_RE_AGING_TIMER), 8102 htole32(IWX_SF_TX_RE_IDLE_TIMER) 8103 }, 8104 }; 8105 8106 static void 8107 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd, 8108 struct ieee80211_node *ni) 8109 { 8110 int i, j, watermark; 8111 8112 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN); 8113 8114 /* 8115 * If we are in association flow - check antenna configuration 8116 * capabilities of the AP station, and choose the watermark accordingly. 8117 */ 8118 if (ni) { 8119 if (ni->ni_flags & IEEE80211_NODE_HT) { 8120 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 8121 int hasmimo = 0; 8122 for (i = 0; i < htrs->rs_nrates; i++) { 8123 if (htrs->rs_rates[i] > 7) { 8124 hasmimo = 1; 8125 break; 8126 } 8127 } 8128 if (hasmimo) 8129 watermark = IWX_SF_W_MARK_MIMO2; 8130 else 8131 watermark = IWX_SF_W_MARK_SISO; 8132 } else { 8133 watermark = IWX_SF_W_MARK_LEGACY; 8134 } 8135 /* default watermark value for unassociated mode. */ 8136 } else { 8137 watermark = IWX_SF_W_MARK_MIMO2; 8138 } 8139 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark); 8140 8141 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) { 8142 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) { 8143 sf_cmd->long_delay_timeouts[i][j] = 8144 htole32(IWX_SF_LONG_DELAY_AGING_TIMER); 8145 } 8146 } 8147 8148 if (ni) { 8149 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout, 8150 sizeof(iwx_sf_full_timeout)); 8151 } else { 8152 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def, 8153 sizeof(iwx_sf_full_timeout_def)); 8154 } 8155 8156 } 8157 8158 static int 8159 iwx_sf_config(struct iwx_softc *sc, int new_state) 8160 { 8161 struct ieee80211com *ic = &sc->sc_ic; 8162 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8163 struct ieee80211_node *ni = vap->iv_bss; 8164 struct iwx_sf_cfg_cmd sf_cmd = { 8165 .state = htole32(new_state), 8166 }; 8167 int err = 0; 8168 8169 switch (new_state) { 8170 case IWX_SF_UNINIT: 8171 case IWX_SF_INIT_OFF: 8172 iwx_fill_sf_command(sc, &sf_cmd, NULL); 8173 break; 8174 case IWX_SF_FULL_ON: 8175 iwx_fill_sf_command(sc, &sf_cmd, ni); 8176 break; 8177 default: 8178 return EINVAL; 8179 } 8180 8181 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC, 8182 sizeof(sf_cmd), &sf_cmd); 8183 return err; 8184 } 8185 8186 static int 8187 iwx_send_bt_init_conf(struct iwx_softc *sc) 8188 { 8189 struct iwx_bt_coex_cmd bt_cmd; 8190 8191 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd)); 8192 8193 bt_cmd.mode = htole32(IWX_BT_COEX_NW); 8194 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED; 8195 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET; 8196 8197 8198 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd), 8199 &bt_cmd); 8200 } 8201 8202 static int 8203 iwx_send_soc_conf(struct iwx_softc *sc) 8204 { 8205 struct iwx_soc_configuration_cmd cmd; 8206 int err; 8207 uint32_t cmd_id, flags = 0; 8208 8209 memset(&cmd, 0, sizeof(cmd)); 8210 8211 /* 8212 * In VER_1 of this command, the discrete value is considered 8213 * an integer; In VER_2, it's a bitmask. Since we have only 2 8214 * values in VER_1, this is backwards-compatible with VER_2, 8215 * as long as we don't set any other flag bits. 8216 */ 8217 if (!sc->sc_integrated) { /* VER_1 */ 8218 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE; 8219 } else { /* VER_2 */ 8220 uint8_t scan_cmd_ver; 8221 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE) 8222 flags |= (sc->sc_ltr_delay & 8223 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK); 8224 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, 8225 IWX_SCAN_REQ_UMAC); 8226 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN && 8227 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal) 8228 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY; 8229 } 8230 cmd.flags = htole32(flags); 8231 8232 cmd.latency = htole32(sc->sc_xtal_latency); 8233 8234 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0); 8235 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 8236 if (err) 8237 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err); 8238 return err; 8239 } 8240 8241 static int 8242 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2) 8243 { 8244 struct iwx_mcc_update_cmd mcc_cmd; 8245 struct iwx_host_cmd hcmd = { 8246 .id = IWX_MCC_UPDATE_CMD, 8247 .flags = IWX_CMD_WANT_RESP, 8248 .data = { &mcc_cmd }, 8249 }; 8250 struct iwx_rx_packet *pkt; 8251 struct iwx_mcc_update_resp *resp; 8252 size_t resp_len; 8253 int err; 8254 8255 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 8256 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 8257 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) || 8258 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC)) 8259 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT; 8260 else 8261 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW; 8262 8263 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd); 8264 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX; 8265 8266 err = iwx_send_cmd(sc, &hcmd); 8267 if (err) 8268 return err; 8269 8270 pkt = hcmd.resp_pkt; 8271 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 8272 err = EIO; 8273 goto out; 8274 } 8275 8276 resp_len = iwx_rx_packet_payload_len(pkt); 8277 if (resp_len < sizeof(*resp)) { 8278 err = EIO; 8279 goto out; 8280 } 8281 8282 resp = (void *)pkt->data; 8283 if (resp_len != sizeof(*resp) + 8284 resp->n_channels * sizeof(resp->channels[0])) { 8285 err = EIO; 8286 goto out; 8287 } 8288 8289 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n", 8290 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels)); 8291 8292 out: 8293 iwx_free_resp(sc, &hcmd); 8294 8295 return err; 8296 } 8297 8298 static int 8299 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc) 8300 { 8301 struct iwx_temp_report_ths_cmd cmd; 8302 int err; 8303 8304 /* 8305 * In order to give responsibility for critical-temperature-kill 8306 * and TX backoff to FW we need to send an empty temperature 8307 * reporting command at init time. 8308 */ 8309 memset(&cmd, 0, sizeof(cmd)); 8310 8311 err = iwx_send_cmd_pdu(sc, 8312 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD), 8313 0, sizeof(cmd), &cmd); 8314 if (err) 8315 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n", 8316 DEVNAME(sc), err); 8317 8318 return err; 8319 } 8320 8321 static int 8322 iwx_init_hw(struct iwx_softc *sc) 8323 { 8324 struct ieee80211com *ic = &sc->sc_ic; 8325 int err = 0, i; 8326 8327 err = iwx_run_init_mvm_ucode(sc, 0); 8328 if (err) 8329 return err; 8330 8331 if (!iwx_nic_lock(sc)) 8332 return EBUSY; 8333 8334 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc)); 8335 if (err) { 8336 printf("%s: could not init tx ant config (error %d)\n", 8337 DEVNAME(sc), err); 8338 goto err; 8339 } 8340 8341 if (sc->sc_tx_with_siso_diversity) { 8342 err = iwx_send_phy_cfg_cmd(sc); 8343 if (err) { 8344 printf("%s: could not send phy config (error %d)\n", 8345 DEVNAME(sc), err); 8346 goto err; 8347 } 8348 } 8349 8350 err = iwx_send_bt_init_conf(sc); 8351 if (err) { 8352 printf("%s: could not init bt coex (error %d)\n", 8353 DEVNAME(sc), err); 8354 return err; 8355 } 8356 8357 err = iwx_send_soc_conf(sc); 8358 if (err) { 8359 printf("%s: iwx_send_soc_conf failed\n", __func__); 8360 return err; 8361 } 8362 8363 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) { 8364 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__); 8365 err = iwx_send_dqa_cmd(sc); 8366 if (err) { 8367 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT " 8368 "failed (error %d)\n", __func__, err); 8369 return err; 8370 } 8371 } 8372 // TODO phyctxt 8373 for (i = 0; i < IWX_NUM_PHY_CTX; i++) { 8374 /* 8375 * The channel used here isn't relevant as it's 8376 * going to be overwritten in the other flows. 8377 * For now use the first channel we have. 8378 */ 8379 sc->sc_phyctxt[i].id = i; 8380 sc->sc_phyctxt[i].channel = &ic->ic_channels[1]; 8381 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1, 8382 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0); 8383 if (err) { 8384 printf("%s: could not add phy context %d (error %d)\n", 8385 DEVNAME(sc), i, err); 8386 goto err; 8387 } 8388 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 8389 IWX_RLC_CONFIG_CMD) == 2) { 8390 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1); 8391 if (err) { 8392 printf("%s: could not configure RLC for PHY " 8393 "%d (error %d)\n", DEVNAME(sc), i, err); 8394 goto err; 8395 } 8396 } 8397 } 8398 8399 err = iwx_config_ltr(sc); 8400 if (err) { 8401 printf("%s: PCIe LTR configuration failed (error %d)\n", 8402 DEVNAME(sc), err); 8403 } 8404 8405 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) { 8406 err = iwx_send_temp_report_ths_cmd(sc); 8407 if (err) { 8408 printf("%s: iwx_send_temp_report_ths_cmd failed\n", 8409 __func__); 8410 goto err; 8411 } 8412 } 8413 8414 err = iwx_power_update_device(sc); 8415 if (err) { 8416 printf("%s: could not send power command (error %d)\n", 8417 DEVNAME(sc), err); 8418 goto err; 8419 } 8420 8421 if (sc->sc_nvm.lar_enabled) { 8422 err = iwx_send_update_mcc_cmd(sc, "ZZ"); 8423 if (err) { 8424 printf("%s: could not init LAR (error %d)\n", 8425 DEVNAME(sc), err); 8426 goto err; 8427 } 8428 } 8429 8430 err = iwx_config_umac_scan_reduced(sc); 8431 if (err) { 8432 printf("%s: could not configure scan (error %d)\n", 8433 DEVNAME(sc), err); 8434 goto err; 8435 } 8436 8437 err = iwx_disable_beacon_filter(sc); 8438 if (err) { 8439 printf("%s: could not disable beacon filter (error %d)\n", 8440 DEVNAME(sc), err); 8441 goto err; 8442 } 8443 8444 err: 8445 iwx_nic_unlock(sc); 8446 return err; 8447 } 8448 8449 /* Allow multicast from our BSSID. */ 8450 static int 8451 iwx_allow_mcast(struct iwx_softc *sc) 8452 { 8453 struct ieee80211com *ic = &sc->sc_ic; 8454 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8455 struct iwx_node *in = IWX_NODE(vap->iv_bss); 8456 struct iwx_mcast_filter_cmd *cmd; 8457 size_t size; 8458 int err; 8459 8460 size = roundup(sizeof(*cmd), 4); 8461 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 8462 if (cmd == NULL) 8463 return ENOMEM; 8464 cmd->filter_own = 1; 8465 cmd->port_id = 0; 8466 cmd->count = 0; 8467 cmd->pass_all = 1; 8468 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr); 8469 8470 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD, 8471 0, size, cmd); 8472 free(cmd, M_DEVBUF); 8473 return err; 8474 } 8475 8476 static int 8477 iwx_init(struct iwx_softc *sc) 8478 { 8479 int err, generation; 8480 generation = ++sc->sc_generation; 8481 iwx_preinit(sc); 8482 8483 err = iwx_start_hw(sc); 8484 if (err) { 8485 printf("%s: iwx_start_hw failed\n", __func__); 8486 return err; 8487 } 8488 8489 err = iwx_init_hw(sc); 8490 if (err) { 8491 if (generation == sc->sc_generation) 8492 iwx_stop_device(sc); 8493 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err); 8494 return err; 8495 } 8496 8497 sc->sc_flags |= IWX_FLAG_HW_INITED; 8498 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc); 8499 8500 return 0; 8501 } 8502 8503 static void 8504 iwx_start(struct iwx_softc *sc) 8505 { 8506 struct ieee80211_node *ni; 8507 struct mbuf *m; 8508 8509 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 8510 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 8511 if (iwx_tx(sc, m, ni) != 0) { 8512 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 8513 continue; 8514 } 8515 } 8516 } 8517 8518 static void 8519 iwx_stop(struct iwx_softc *sc) 8520 { 8521 struct ieee80211com *ic = &sc->sc_ic; 8522 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8523 struct iwx_vap *ivp = IWX_VAP(vap); 8524 8525 iwx_stop_device(sc); 8526 8527 /* Reset soft state. */ 8528 sc->sc_generation++; 8529 ivp->phy_ctxt = NULL; 8530 8531 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN); 8532 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 8533 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 8534 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 8535 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 8536 sc->sc_flags &= ~IWX_FLAG_HW_ERR; 8537 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN; 8538 sc->sc_flags &= ~IWX_FLAG_TXFLUSH; 8539 8540 sc->sc_rx_ba_sessions = 0; 8541 sc->ba_rx.start_tidmask = 0; 8542 sc->ba_rx.stop_tidmask = 0; 8543 memset(sc->aggqid, 0, sizeof(sc->aggqid)); 8544 sc->ba_tx.start_tidmask = 0; 8545 sc->ba_tx.stop_tidmask = 0; 8546 } 8547 8548 static void 8549 iwx_watchdog(void *arg) 8550 { 8551 struct iwx_softc *sc = arg; 8552 struct ieee80211com *ic = &sc->sc_ic; 8553 int i; 8554 8555 /* 8556 * We maintain a separate timer for each Tx queue because 8557 * Tx aggregation queues can get "stuck" while other queues 8558 * keep working. The Linux driver uses a similar workaround. 8559 */ 8560 for (i = 0; i < nitems(sc->sc_tx_timer); i++) { 8561 if (sc->sc_tx_timer[i] > 0) { 8562 if (--sc->sc_tx_timer[i] == 0) { 8563 printf("%s: device timeout\n", DEVNAME(sc)); 8564 8565 iwx_nic_error(sc); 8566 iwx_dump_driver_status(sc); 8567 ieee80211_restart_all(ic); 8568 return; 8569 } 8570 } 8571 } 8572 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc); 8573 } 8574 8575 /* 8576 * Note: This structure is read from the device with IO accesses, 8577 * and the reading already does the endian conversion. As it is 8578 * read with uint32_t-sized accesses, any members with a different size 8579 * need to be ordered correctly though! 8580 */ 8581 struct iwx_error_event_table { 8582 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 8583 uint32_t error_id; /* type of error */ 8584 uint32_t trm_hw_status0; /* TRM HW status */ 8585 uint32_t trm_hw_status1; /* TRM HW status */ 8586 uint32_t blink2; /* branch link */ 8587 uint32_t ilink1; /* interrupt link */ 8588 uint32_t ilink2; /* interrupt link */ 8589 uint32_t data1; /* error-specific data */ 8590 uint32_t data2; /* error-specific data */ 8591 uint32_t data3; /* error-specific data */ 8592 uint32_t bcon_time; /* beacon timer */ 8593 uint32_t tsf_low; /* network timestamp function timer */ 8594 uint32_t tsf_hi; /* network timestamp function timer */ 8595 uint32_t gp1; /* GP1 timer register */ 8596 uint32_t gp2; /* GP2 timer register */ 8597 uint32_t fw_rev_type; /* firmware revision type */ 8598 uint32_t major; /* uCode version major */ 8599 uint32_t minor; /* uCode version minor */ 8600 uint32_t hw_ver; /* HW Silicon version */ 8601 uint32_t brd_ver; /* HW board version */ 8602 uint32_t log_pc; /* log program counter */ 8603 uint32_t frame_ptr; /* frame pointer */ 8604 uint32_t stack_ptr; /* stack pointer */ 8605 uint32_t hcmd; /* last host command header */ 8606 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 8607 * rxtx_flag */ 8608 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 8609 * host_flag */ 8610 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 8611 * enc_flag */ 8612 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 8613 * time_flag */ 8614 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 8615 * wico interrupt */ 8616 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 8617 uint32_t wait_event; /* wait event() caller address */ 8618 uint32_t l2p_control; /* L2pControlField */ 8619 uint32_t l2p_duration; /* L2pDurationField */ 8620 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 8621 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 8622 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 8623 * (LMPM_PMG_SEL) */ 8624 uint32_t u_timestamp; /* indicate when the date and time of the 8625 * compilation */ 8626 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 8627 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 8628 8629 /* 8630 * UMAC error struct - relevant starting from family 8000 chip. 8631 * Note: This structure is read from the device with IO accesses, 8632 * and the reading already does the endian conversion. As it is 8633 * read with u32-sized accesses, any members with a different size 8634 * need to be ordered correctly though! 8635 */ 8636 struct iwx_umac_error_event_table { 8637 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 8638 uint32_t error_id; /* type of error */ 8639 uint32_t blink1; /* branch link */ 8640 uint32_t blink2; /* branch link */ 8641 uint32_t ilink1; /* interrupt link */ 8642 uint32_t ilink2; /* interrupt link */ 8643 uint32_t data1; /* error-specific data */ 8644 uint32_t data2; /* error-specific data */ 8645 uint32_t data3; /* error-specific data */ 8646 uint32_t umac_major; 8647 uint32_t umac_minor; 8648 uint32_t frame_pointer; /* core register 27*/ 8649 uint32_t stack_pointer; /* core register 28 */ 8650 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 8651 uint32_t nic_isr_pref; /* ISR status register */ 8652 } __packed; 8653 8654 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 8655 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 8656 8657 static void 8658 iwx_nic_umac_error(struct iwx_softc *sc) 8659 { 8660 struct iwx_umac_error_event_table table; 8661 uint32_t base; 8662 8663 base = sc->sc_uc.uc_umac_error_event_table; 8664 8665 if (base < 0x400000) { 8666 printf("%s: Invalid error log pointer 0x%08x\n", 8667 DEVNAME(sc), base); 8668 return; 8669 } 8670 8671 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 8672 printf("%s: reading errlog failed\n", DEVNAME(sc)); 8673 return; 8674 } 8675 8676 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 8677 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc)); 8678 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc), 8679 sc->sc_flags, table.valid); 8680 } 8681 8682 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id, 8683 iwx_desc_lookup(table.error_id)); 8684 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1); 8685 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2); 8686 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1); 8687 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2); 8688 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1); 8689 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2); 8690 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3); 8691 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major); 8692 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor); 8693 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc), 8694 table.frame_pointer); 8695 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc), 8696 table.stack_pointer); 8697 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header); 8698 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc), 8699 table.nic_isr_pref); 8700 } 8701 8702 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000 8703 static struct { 8704 const char *name; 8705 uint8_t num; 8706 } advanced_lookup[] = { 8707 { "NMI_INTERRUPT_WDG", 0x34 }, 8708 { "SYSASSERT", 0x35 }, 8709 { "UCODE_VERSION_MISMATCH", 0x37 }, 8710 { "BAD_COMMAND", 0x38 }, 8711 { "BAD_COMMAND", 0x39 }, 8712 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 8713 { "FATAL_ERROR", 0x3D }, 8714 { "NMI_TRM_HW_ERR", 0x46 }, 8715 { "NMI_INTERRUPT_TRM", 0x4C }, 8716 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 8717 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 8718 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 8719 { "NMI_INTERRUPT_HOST", 0x66 }, 8720 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, 8721 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, 8722 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, 8723 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 8724 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 8725 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 8726 { "ADVANCED_SYSASSERT", 0 }, 8727 }; 8728 8729 static const char * 8730 iwx_desc_lookup(uint32_t num) 8731 { 8732 int i; 8733 8734 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 8735 if (advanced_lookup[i].num == 8736 (num & ~IWX_FW_SYSASSERT_CPU_MASK)) 8737 return advanced_lookup[i].name; 8738 8739 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 8740 return advanced_lookup[i].name; 8741 } 8742 8743 /* 8744 * Support for dumping the error log seemed like a good idea ... 8745 * but it's mostly hex junk and the only sensible thing is the 8746 * hw/ucode revision (which we know anyway). Since it's here, 8747 * I'll just leave it in, just in case e.g. the Intel guys want to 8748 * help us decipher some "ADVANCED_SYSASSERT" later. 8749 */ 8750 static void 8751 iwx_nic_error(struct iwx_softc *sc) 8752 { 8753 struct iwx_error_event_table table; 8754 uint32_t base; 8755 8756 printf("%s: dumping device error log\n", DEVNAME(sc)); 8757 printf("%s: GOS-3758: 1\n", __func__); 8758 base = sc->sc_uc.uc_lmac_error_event_table[0]; 8759 printf("%s: GOS-3758: 2\n", __func__); 8760 if (base < 0x400000) { 8761 printf("%s: Invalid error log pointer 0x%08x\n", 8762 DEVNAME(sc), base); 8763 return; 8764 } 8765 8766 printf("%s: GOS-3758: 3\n", __func__); 8767 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 8768 printf("%s: reading errlog failed\n", DEVNAME(sc)); 8769 return; 8770 } 8771 8772 printf("%s: GOS-3758: 4\n", __func__); 8773 if (!table.valid) { 8774 printf("%s: errlog not found, skipping\n", DEVNAME(sc)); 8775 return; 8776 } 8777 8778 printf("%s: GOS-3758: 5\n", __func__); 8779 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 8780 printf("%s: Start Error Log Dump:\n", DEVNAME(sc)); 8781 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc), 8782 sc->sc_flags, table.valid); 8783 } 8784 8785 printf("%s: GOS-3758: 6\n", __func__); 8786 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id, 8787 iwx_desc_lookup(table.error_id)); 8788 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc), 8789 table.trm_hw_status0); 8790 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc), 8791 table.trm_hw_status1); 8792 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2); 8793 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1); 8794 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2); 8795 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1); 8796 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2); 8797 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3); 8798 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time); 8799 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low); 8800 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi); 8801 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1); 8802 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2); 8803 printf("%s: %08X | uCode revision type\n", DEVNAME(sc), 8804 table.fw_rev_type); 8805 printf("%s: %08X | uCode version major\n", DEVNAME(sc), 8806 table.major); 8807 printf("%s: %08X | uCode version minor\n", DEVNAME(sc), 8808 table.minor); 8809 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver); 8810 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver); 8811 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd); 8812 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0); 8813 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1); 8814 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2); 8815 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3); 8816 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4); 8817 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id); 8818 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event); 8819 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control); 8820 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration); 8821 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid); 8822 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match); 8823 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel); 8824 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp); 8825 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler); 8826 8827 if (sc->sc_uc.uc_umac_error_event_table) 8828 iwx_nic_umac_error(sc); 8829 } 8830 8831 static void 8832 iwx_dump_driver_status(struct iwx_softc *sc) 8833 { 8834 struct ieee80211com *ic = &sc->sc_ic; 8835 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8836 enum ieee80211_state state = vap->iv_state; 8837 int i; 8838 8839 printf("driver status:\n"); 8840 for (i = 0; i < nitems(sc->txq); i++) { 8841 struct iwx_tx_ring *ring = &sc->txq[i]; 8842 printf(" tx ring %2d: qid=%-2d cur=%-3d " 8843 "cur_hw=%-3d queued=%-3d\n", 8844 i, ring->qid, ring->cur, ring->cur_hw, 8845 ring->queued); 8846 } 8847 printf(" rx ring: cur=%d\n", sc->rxq.cur); 8848 printf(" 802.11 state %s\n", ieee80211_state_name[state]); 8849 } 8850 8851 #define SYNC_RESP_STRUCT(_var_, _pkt_) \ 8852 do { \ 8853 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \ 8854 _var_ = (void *)((_pkt_)+1); \ 8855 } while (/*CONSTCOND*/0) 8856 8857 static int 8858 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt) 8859 { 8860 int qid, idx, code; 8861 8862 qid = pkt->hdr.qid & ~0x80; 8863 idx = pkt->hdr.idx; 8864 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 8865 8866 return (!(qid == 0 && idx == 0 && code == 0) && 8867 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID)); 8868 } 8869 8870 static void 8871 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml) 8872 { 8873 struct ieee80211com *ic = &sc->sc_ic; 8874 struct iwx_rx_packet *pkt, *nextpkt; 8875 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len; 8876 struct mbuf *m0, *m; 8877 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr); 8878 int qid, idx, code, handled = 1; 8879 8880 m0 = data->m; 8881 while (m0 && offset + minsz < IWX_RBUF_SIZE) { 8882 pkt = (struct iwx_rx_packet *)(m0->m_data + offset); 8883 qid = pkt->hdr.qid; 8884 idx = pkt->hdr.idx; 8885 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 8886 8887 if (!iwx_rx_pkt_valid(pkt)) 8888 break; 8889 8890 /* 8891 * XXX Intel inside (tm) 8892 * Any commands in the LONG_GROUP could actually be in the 8893 * LEGACY group. Firmware API versions >= 50 reject commands 8894 * in group 0, forcing us to use this hack. 8895 */ 8896 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) { 8897 struct iwx_tx_ring *ring = &sc->txq[qid]; 8898 struct iwx_tx_data *txdata = &ring->data[idx]; 8899 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW) 8900 code = iwx_cmd_opcode(code); 8901 } 8902 8903 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt); 8904 if (len < minsz || len > (IWX_RBUF_SIZE - offset)) 8905 break; 8906 8907 // TODO ??? 8908 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) { 8909 /* Take mbuf m0 off the RX ring. */ 8910 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) { 8911 break; 8912 } 8913 KASSERT((data->m != m0), ("%s: data->m != m0", __func__)); 8914 } 8915 8916 switch (code) { 8917 case IWX_REPLY_RX_PHY_CMD: 8918 /* XXX-THJ: I've not managed to hit this path in testing */ 8919 iwx_rx_rx_phy_cmd(sc, pkt, data); 8920 break; 8921 8922 case IWX_REPLY_RX_MPDU_CMD: { 8923 size_t maxlen = IWX_RBUF_SIZE - offset - minsz; 8924 nextoff = offset + 8925 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN); 8926 nextpkt = (struct iwx_rx_packet *) 8927 (m0->m_data + nextoff); 8928 /* AX210 devices ship only one packet per Rx buffer. */ 8929 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 || 8930 nextoff + minsz >= IWX_RBUF_SIZE || 8931 !iwx_rx_pkt_valid(nextpkt)) { 8932 /* No need to copy last frame in buffer. */ 8933 if (offset > 0) 8934 m_adj(m0, offset); 8935 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen); 8936 m0 = NULL; /* stack owns m0 now; abort loop */ 8937 } else { 8938 /* 8939 * Create an mbuf which points to the current 8940 * packet. Always copy from offset zero to 8941 * preserve m_pkthdr. 8942 */ 8943 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT); 8944 if (m == NULL) { 8945 m_freem(m0); 8946 m0 = NULL; 8947 break; 8948 } 8949 m_adj(m, offset); 8950 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen); 8951 } 8952 break; 8953 } 8954 8955 // case IWX_BAR_FRAME_RELEASE: 8956 // iwx_rx_bar_frame_release(sc, pkt, ml); 8957 // break; 8958 // 8959 case IWX_TX_CMD: 8960 iwx_rx_tx_cmd(sc, pkt, data); 8961 break; 8962 8963 case IWX_BA_NOTIF: 8964 iwx_rx_compressed_ba(sc, pkt); 8965 break; 8966 8967 case IWX_MISSED_BEACONS_NOTIFICATION: 8968 iwx_rx_bmiss(sc, pkt, data); 8969 DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n", 8970 __func__)); 8971 ieee80211_beacon_miss(ic); 8972 break; 8973 8974 case IWX_MFUART_LOAD_NOTIFICATION: 8975 break; 8976 8977 case IWX_ALIVE: { 8978 struct iwx_alive_resp_v4 *resp4; 8979 struct iwx_alive_resp_v5 *resp5; 8980 struct iwx_alive_resp_v6 *resp6; 8981 8982 DPRINTF(("%s: firmware alive\n", __func__)); 8983 sc->sc_uc.uc_ok = 0; 8984 8985 /* 8986 * For v5 and above, we can check the version, for older 8987 * versions we need to check the size. 8988 */ 8989 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP, 8990 IWX_ALIVE) == 6) { 8991 SYNC_RESP_STRUCT(resp6, pkt); 8992 if (iwx_rx_packet_payload_len(pkt) != 8993 sizeof(*resp6)) { 8994 sc->sc_uc.uc_intr = 1; 8995 wakeup(&sc->sc_uc); 8996 break; 8997 } 8998 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 8999 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9000 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9001 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9002 sc->sc_uc.uc_log_event_table = le32toh( 9003 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9004 sc->sc_uc.uc_umac_error_event_table = le32toh( 9005 resp6->umac_data.dbg_ptrs.error_info_addr); 9006 sc->sc_sku_id[0] = 9007 le32toh(resp6->sku_id.data[0]); 9008 sc->sc_sku_id[1] = 9009 le32toh(resp6->sku_id.data[1]); 9010 sc->sc_sku_id[2] = 9011 le32toh(resp6->sku_id.data[2]); 9012 if (resp6->status == IWX_ALIVE_STATUS_OK) { 9013 sc->sc_uc.uc_ok = 1; 9014 } 9015 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP, 9016 IWX_ALIVE) == 5) { 9017 SYNC_RESP_STRUCT(resp5, pkt); 9018 if (iwx_rx_packet_payload_len(pkt) != 9019 sizeof(*resp5)) { 9020 sc->sc_uc.uc_intr = 1; 9021 wakeup(&sc->sc_uc); 9022 break; 9023 } 9024 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 9025 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9026 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9027 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9028 sc->sc_uc.uc_log_event_table = le32toh( 9029 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9030 sc->sc_uc.uc_umac_error_event_table = le32toh( 9031 resp5->umac_data.dbg_ptrs.error_info_addr); 9032 sc->sc_sku_id[0] = 9033 le32toh(resp5->sku_id.data[0]); 9034 sc->sc_sku_id[1] = 9035 le32toh(resp5->sku_id.data[1]); 9036 sc->sc_sku_id[2] = 9037 le32toh(resp5->sku_id.data[2]); 9038 if (resp5->status == IWX_ALIVE_STATUS_OK) 9039 sc->sc_uc.uc_ok = 1; 9040 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) { 9041 SYNC_RESP_STRUCT(resp4, pkt); 9042 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 9043 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9044 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9045 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9046 sc->sc_uc.uc_log_event_table = le32toh( 9047 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9048 sc->sc_uc.uc_umac_error_event_table = le32toh( 9049 resp4->umac_data.dbg_ptrs.error_info_addr); 9050 if (resp4->status == IWX_ALIVE_STATUS_OK) 9051 sc->sc_uc.uc_ok = 1; 9052 } else 9053 printf("unknown payload version"); 9054 9055 sc->sc_uc.uc_intr = 1; 9056 wakeup(&sc->sc_uc); 9057 break; 9058 } 9059 9060 case IWX_STATISTICS_NOTIFICATION: { 9061 struct iwx_notif_statistics *stats; 9062 SYNC_RESP_STRUCT(stats, pkt); 9063 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 9064 sc->sc_noise = iwx_get_noise(&stats->rx.general); 9065 break; 9066 } 9067 9068 case IWX_DTS_MEASUREMENT_NOTIFICATION: 9069 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9070 IWX_DTS_MEASUREMENT_NOTIF_WIDE): 9071 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9072 IWX_TEMP_REPORTING_THRESHOLDS_CMD): 9073 break; 9074 9075 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9076 IWX_CT_KILL_NOTIFICATION): { 9077 struct iwx_ct_kill_notif *notif; 9078 SYNC_RESP_STRUCT(notif, pkt); 9079 printf("%s: device at critical temperature (%u degC), " 9080 "stopping device\n", 9081 DEVNAME(sc), le16toh(notif->temperature)); 9082 sc->sc_flags |= IWX_FLAG_HW_ERR; 9083 ieee80211_restart_all(ic); 9084 break; 9085 } 9086 9087 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9088 IWX_SCD_QUEUE_CONFIG_CMD): 9089 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9090 IWX_RX_BAID_ALLOCATION_CONFIG_CMD): 9091 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP, 9092 IWX_SESSION_PROTECTION_CMD): 9093 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9094 IWX_NVM_GET_INFO): 9095 case IWX_ADD_STA_KEY: 9096 case IWX_PHY_CONFIGURATION_CMD: 9097 case IWX_TX_ANT_CONFIGURATION_CMD: 9098 case IWX_ADD_STA: 9099 case IWX_MAC_CONTEXT_CMD: 9100 case IWX_REPLY_SF_CFG_CMD: 9101 case IWX_POWER_TABLE_CMD: 9102 case IWX_LTR_CONFIG: 9103 case IWX_PHY_CONTEXT_CMD: 9104 case IWX_BINDING_CONTEXT_CMD: 9105 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD): 9106 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC): 9107 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC): 9108 case IWX_REPLY_BEACON_FILTERING_CMD: 9109 case IWX_MAC_PM_POWER_TABLE: 9110 case IWX_TIME_QUOTA_CMD: 9111 case IWX_REMOVE_STA: 9112 case IWX_TXPATH_FLUSH: 9113 case IWX_BT_CONFIG: 9114 case IWX_MCC_UPDATE_CMD: 9115 case IWX_TIME_EVENT_CMD: 9116 case IWX_STATISTICS_CMD: 9117 case IWX_SCD_QUEUE_CFG: { 9118 size_t pkt_len; 9119 9120 if (sc->sc_cmd_resp_pkt[idx] == NULL) 9121 break; 9122 9123 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9124 BUS_DMASYNC_POSTREAD); 9125 9126 pkt_len = sizeof(pkt->len_n_flags) + 9127 iwx_rx_packet_len(pkt); 9128 9129 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) || 9130 pkt_len < sizeof(*pkt) || 9131 pkt_len > sc->sc_cmd_resp_len[idx]) { 9132 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF); 9133 sc->sc_cmd_resp_pkt[idx] = NULL; 9134 break; 9135 } 9136 9137 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9138 BUS_DMASYNC_POSTREAD); 9139 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len); 9140 break; 9141 } 9142 9143 case IWX_INIT_COMPLETE_NOTIF: 9144 sc->sc_init_complete |= IWX_INIT_COMPLETE; 9145 wakeup(&sc->sc_init_complete); 9146 break; 9147 9148 case IWX_SCAN_COMPLETE_UMAC: { 9149 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__)); 9150 struct iwx_umac_scan_complete *notif __attribute__((unused)); 9151 SYNC_RESP_STRUCT(notif, pkt); 9152 DPRINTF(("%s: scan complete notif->status=%d\n", __func__, 9153 notif->status)); 9154 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task); 9155 iwx_endscan(sc); 9156 break; 9157 } 9158 9159 case IWX_SCAN_ITERATION_COMPLETE_UMAC: { 9160 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n", 9161 __func__)); 9162 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused)); 9163 SYNC_RESP_STRUCT(notif, pkt); 9164 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__, 9165 notif->status)); 9166 iwx_endscan(sc); 9167 break; 9168 } 9169 9170 case IWX_MCC_CHUB_UPDATE_CMD: { 9171 struct iwx_mcc_chub_notif *notif; 9172 SYNC_RESP_STRUCT(notif, pkt); 9173 iwx_mcc_update(sc, notif); 9174 break; 9175 } 9176 9177 case IWX_REPLY_ERROR: { 9178 struct iwx_error_resp *resp; 9179 SYNC_RESP_STRUCT(resp, pkt); 9180 printf("%s: firmware error 0x%x, cmd 0x%x\n", 9181 DEVNAME(sc), le32toh(resp->error_type), 9182 resp->cmd_id); 9183 break; 9184 } 9185 9186 case IWX_TIME_EVENT_NOTIFICATION: { 9187 struct iwx_time_event_notif *notif; 9188 uint32_t action; 9189 SYNC_RESP_STRUCT(notif, pkt); 9190 9191 if (sc->sc_time_event_uid != le32toh(notif->unique_id)) 9192 break; 9193 action = le32toh(notif->action); 9194 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END) 9195 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 9196 break; 9197 } 9198 9199 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP, 9200 IWX_SESSION_PROTECTION_NOTIF): { 9201 struct iwx_session_prot_notif *notif; 9202 uint32_t status, start, conf_id; 9203 9204 SYNC_RESP_STRUCT(notif, pkt); 9205 9206 status = le32toh(notif->status); 9207 start = le32toh(notif->start); 9208 conf_id = le32toh(notif->conf_id); 9209 /* Check for end of successful PROTECT_CONF_ASSOC. */ 9210 if (status == 1 && start == 0 && 9211 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC) 9212 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 9213 break; 9214 } 9215 9216 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, 9217 IWX_FSEQ_VER_MISMATCH_NOTIFICATION): 9218 break; 9219 9220 /* 9221 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 9222 * messages. Just ignore them for now. 9223 */ 9224 case IWX_DEBUG_LOG_MSG: 9225 break; 9226 9227 case IWX_MCAST_FILTER_CMD: 9228 break; 9229 9230 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD): 9231 break; 9232 9233 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD): 9234 break; 9235 9236 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD): 9237 break; 9238 9239 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9240 IWX_NVM_ACCESS_COMPLETE): 9241 break; 9242 9243 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF): 9244 break; /* happens in monitor mode; ignore for now */ 9245 9246 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD): 9247 break; 9248 9249 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9250 IWX_TLC_MNG_UPDATE_NOTIF): { 9251 struct iwx_tlc_update_notif *notif; 9252 SYNC_RESP_STRUCT(notif, pkt); 9253 (void)notif; 9254 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif)) 9255 iwx_rs_update(sc, notif); 9256 break; 9257 } 9258 9259 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD): 9260 break; 9261 9262 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */ 9263 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8): 9264 break; 9265 9266 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9267 IWX_PNVM_INIT_COMPLETE): 9268 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__)); 9269 sc->sc_init_complete |= IWX_PNVM_COMPLETE; 9270 wakeup(&sc->sc_init_complete); 9271 break; 9272 9273 default: 9274 handled = 0; 9275 /* XXX wulf: Get rid of bluetooth-related spam */ 9276 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) || 9277 (code == 0xce && pkt->len_n_flags == 0x2000002c)) 9278 break; 9279 printf("%s: unhandled firmware response 0x%x/0x%x " 9280 "rx ring %d[%d]\n", 9281 DEVNAME(sc), code, pkt->len_n_flags, 9282 (qid & ~0x80), idx); 9283 break; 9284 } 9285 9286 /* 9287 * uCode sets bit 0x80 when it originates the notification, 9288 * i.e. when the notification is not a direct response to a 9289 * command sent by the driver. 9290 * For example, uCode issues IWX_REPLY_RX when it sends a 9291 * received frame to the driver. 9292 */ 9293 if (handled && !(qid & (1 << 7))) { 9294 iwx_cmd_done(sc, qid, idx, code); 9295 } 9296 9297 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN); 9298 9299 /* AX210 devices ship only one packet per Rx buffer. */ 9300 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 9301 break; 9302 } 9303 9304 if (m0 && m0 != data->m) 9305 m_freem(m0); 9306 } 9307 9308 static void 9309 iwx_notif_intr(struct iwx_softc *sc) 9310 { 9311 struct mbuf m; 9312 uint16_t hw; 9313 9314 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 9315 BUS_DMASYNC_POSTREAD); 9316 9317 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 9318 uint16_t *status = sc->rxq.stat_dma.vaddr; 9319 hw = le16toh(*status) & 0xfff; 9320 } else 9321 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 9322 hw &= (IWX_RX_MQ_RING_COUNT - 1); 9323 while (sc->rxq.cur != hw) { 9324 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 9325 9326 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9327 BUS_DMASYNC_POSTREAD); 9328 9329 iwx_rx_pkt(sc, data, &m); 9330 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT; 9331 } 9332 9333 /* 9334 * Tell the firmware what we have processed. 9335 * Seems like the hardware gets upset unless we align the write by 8?? 9336 */ 9337 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1; 9338 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7); 9339 } 9340 9341 #if 0 9342 int 9343 iwx_intr(void *arg) 9344 { 9345 struct iwx_softc *sc = arg; 9346 struct ieee80211com *ic = &sc->sc_ic; 9347 struct ifnet *ifp = IC2IFP(ic); 9348 int r1, r2, rv = 0; 9349 9350 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 9351 9352 if (sc->sc_flags & IWX_FLAG_USE_ICT) { 9353 uint32_t *ict = sc->ict_dma.vaddr; 9354 int tmp; 9355 9356 tmp = htole32(ict[sc->ict_cur]); 9357 if (!tmp) 9358 goto out_ena; 9359 9360 /* 9361 * ok, there was something. keep plowing until we have all. 9362 */ 9363 r1 = r2 = 0; 9364 while (tmp) { 9365 r1 |= tmp; 9366 ict[sc->ict_cur] = 0; 9367 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT; 9368 tmp = htole32(ict[sc->ict_cur]); 9369 } 9370 9371 /* this is where the fun begins. don't ask */ 9372 if (r1 == 0xffffffff) 9373 r1 = 0; 9374 9375 /* i am not expected to understand this */ 9376 if (r1 & 0xc0000) 9377 r1 |= 0x8000; 9378 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 9379 } else { 9380 r1 = IWX_READ(sc, IWX_CSR_INT); 9381 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 9382 goto out; 9383 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS); 9384 } 9385 if (r1 == 0 && r2 == 0) { 9386 goto out_ena; 9387 } 9388 9389 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask); 9390 9391 if (r1 & IWX_CSR_INT_BIT_ALIVE) { 9392 #if 0 9393 int i; 9394 /* Firmware has now configured the RFH. */ 9395 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) 9396 iwx_update_rx_desc(sc, &sc->rxq, i); 9397 #endif 9398 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8); 9399 } 9400 9401 9402 if (r1 & IWX_CSR_INT_BIT_RF_KILL) { 9403 iwx_check_rfkill(sc); 9404 rv = 1; 9405 goto out_ena; 9406 } 9407 9408 if (r1 & IWX_CSR_INT_BIT_SW_ERR) { 9409 if (ifp->if_flags & IFF_DEBUG) { 9410 iwx_nic_error(sc); 9411 iwx_dump_driver_status(sc); 9412 } 9413 printf("%s: fatal firmware error\n", DEVNAME(sc)); 9414 ieee80211_restart_all(ic); 9415 rv = 1; 9416 goto out; 9417 9418 } 9419 9420 if (r1 & IWX_CSR_INT_BIT_HW_ERR) { 9421 printf("%s: hardware error, stopping device \n", DEVNAME(sc)); 9422 iwx_stop(sc); 9423 rv = 1; 9424 goto out; 9425 } 9426 9427 /* firmware chunk loaded */ 9428 if (r1 & IWX_CSR_INT_BIT_FH_TX) { 9429 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK); 9430 9431 sc->sc_fw_chunk_done = 1; 9432 wakeup(&sc->sc_fw); 9433 } 9434 9435 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX | 9436 IWX_CSR_INT_BIT_RX_PERIODIC)) { 9437 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) { 9438 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK); 9439 } 9440 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) { 9441 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC); 9442 } 9443 9444 /* Disable periodic interrupt; we use it as just a one-shot. */ 9445 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS); 9446 9447 /* 9448 * Enable periodic interrupt in 8 msec only if we received 9449 * real RX interrupt (instead of just periodic int), to catch 9450 * any dangling Rx interrupt. If it was just the periodic 9451 * interrupt, there was no dangling Rx activity, and no need 9452 * to extend the periodic interrupt; one-shot is enough. 9453 */ 9454 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) 9455 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, 9456 IWX_CSR_INT_PERIODIC_ENA); 9457 9458 iwx_notif_intr(sc); 9459 } 9460 9461 rv = 1; 9462 9463 out_ena: 9464 iwx_restore_interrupts(sc); 9465 out: 9466 return rv; 9467 } 9468 #endif 9469 9470 static void 9471 iwx_intr_msix(void *arg) 9472 { 9473 struct iwx_softc *sc = arg; 9474 struct ieee80211com *ic = &sc->sc_ic; 9475 uint32_t inta_fh, inta_hw; 9476 int vector = 0; 9477 9478 IWX_LOCK(sc); 9479 9480 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD); 9481 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD); 9482 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 9483 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 9484 inta_fh &= sc->sc_fh_mask; 9485 inta_hw &= sc->sc_hw_mask; 9486 9487 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 || 9488 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) { 9489 iwx_notif_intr(sc); 9490 } 9491 9492 /* firmware chunk loaded */ 9493 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 9494 sc->sc_fw_chunk_done = 1; 9495 wakeup(&sc->sc_fw); 9496 } 9497 9498 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) || 9499 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) || 9500 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { 9501 if (sc->sc_debug) { 9502 iwx_nic_error(sc); 9503 iwx_dump_driver_status(sc); 9504 } 9505 printf("%s: fatal firmware error\n", DEVNAME(sc)); 9506 ieee80211_restart_all(ic); 9507 goto out; 9508 } 9509 9510 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) { 9511 iwx_check_rfkill(sc); 9512 } 9513 9514 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) { 9515 printf("%s: hardware error, stopping device \n", DEVNAME(sc)); 9516 sc->sc_flags |= IWX_FLAG_HW_ERR; 9517 iwx_stop(sc); 9518 goto out; 9519 } 9520 9521 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) { 9522 IWX_DPRINTF(sc, IWX_DEBUG_TRACE, 9523 "%s:%d WARNING: Skipping rx desc update\n", 9524 __func__, __LINE__); 9525 #if 0 9526 /* 9527 * XXX-THJ: we don't have the dma segment handy. This is hacked 9528 * out in the fc release, return to it if we ever get this 9529 * warning. 9530 */ 9531 /* Firmware has now configured the RFH. */ 9532 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++) 9533 iwx_update_rx_desc(sc, &sc->rxq, i); 9534 #endif 9535 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8); 9536 } 9537 9538 /* 9539 * Before sending the interrupt the HW disables it to prevent 9540 * a nested interrupt. This is done by writing 1 to the corresponding 9541 * bit in the mask register. After handling the interrupt, it should be 9542 * re-enabled by clearing this bit. This register is defined as 9543 * write 1 clear (W1C) register, meaning that it's being clear 9544 * by writing 1 to the bit. 9545 */ 9546 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector); 9547 out: 9548 IWX_UNLOCK(sc); 9549 return; 9550 } 9551 9552 /* 9553 * The device info table below contains device-specific config overrides. 9554 * The most important parameter derived from this table is the name of the 9555 * firmware image to load. 9556 * 9557 * The Linux iwlwifi driver uses an "old" and a "new" device info table. 9558 * The "old" table matches devices based on PCI vendor/product IDs only. 9559 * The "new" table extends this with various device parameters derived 9560 * from MAC type, and RF type. 9561 * 9562 * In iwlwifi "old" and "new" tables share the same array, where "old" 9563 * entries contain dummy values for data defined only for "new" entries. 9564 * As of 2022, Linux developers are still in the process of moving entries 9565 * from "old" to "new" style and it looks like this effort has stalled in 9566 * in some work-in-progress state for quite a while. Linux commits moving 9567 * entries from "old" to "new" have at times been reverted due to regressions. 9568 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4) 9569 * devices in the same driver. 9570 * 9571 * Our table below contains mostly "new" entries declared in iwlwifi 9572 * with the _IWL_DEV_INFO() macro (with a leading underscore). 9573 * Other devices are matched based on PCI vendor/product ID as usual, 9574 * unless matching specific PCI subsystem vendor/product IDs is required. 9575 * 9576 * Some "old"-style entries are required to identify the firmware image to use. 9577 * Others might be used to print a specific marketing name into Linux dmesg, 9578 * but we can't be sure whether the corresponding devices would be matched 9579 * correctly in the absence of their entries. So we include them just in case. 9580 */ 9581 9582 struct iwx_dev_info { 9583 uint16_t device; 9584 uint16_t subdevice; 9585 uint16_t mac_type; 9586 uint16_t rf_type; 9587 uint8_t mac_step; 9588 uint8_t rf_id; 9589 uint8_t no_160; 9590 uint8_t cores; 9591 uint8_t cdb; 9592 uint8_t jacket; 9593 const struct iwx_device_cfg *cfg; 9594 }; 9595 9596 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ 9597 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \ 9598 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ 9599 .mac_type = _mac_type, .rf_type = _rf_type, \ 9600 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ 9601 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } 9602 9603 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \ 9604 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \ 9605 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \ 9606 IWX_CFG_ANY, IWX_CFG_ANY, _cfg) 9607 9608 /* 9609 * When adding entries to this table keep in mind that entries must 9610 * be listed in the same order as in the Linux driver. Code walks this 9611 * table backwards and uses the first matching entry it finds. 9612 * Device firmware must be available in fw_update(8). 9613 */ 9614 static const struct iwx_dev_info iwx_dev_info_table[] = { 9615 /* So with HR */ 9616 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0), 9617 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0), 9618 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0), 9619 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0), 9620 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0), 9621 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0), 9622 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0), 9623 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0), 9624 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0), 9625 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0), 9626 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0), 9627 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0), 9628 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */ 9629 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */ 9630 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9631 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9632 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0), 9633 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0), 9634 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9635 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9636 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long), 9637 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long), 9638 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long), 9639 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long), 9640 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long), 9641 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long), 9642 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0), 9643 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0), 9644 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0), 9645 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9646 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9647 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0), 9648 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0), 9649 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0), 9650 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9651 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9652 9653 /* So with GF2 */ 9654 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9655 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9656 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9657 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9658 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9659 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9660 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9661 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9662 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9663 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9664 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9665 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9666 9667 /* Qu with Jf, C step */ 9668 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9669 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9670 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9671 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9672 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */ 9673 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9674 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9675 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9676 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9677 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */ 9678 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9679 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9680 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9681 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9682 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */ 9683 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9684 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9685 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9686 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9687 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */ 9688 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9689 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9690 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9691 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9692 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */ 9693 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9694 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9695 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9696 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9697 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */ 9698 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551, 9699 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9700 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9701 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9702 IWX_CFG_ANY, 9703 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */ 9704 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552, 9705 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9706 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9707 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9708 IWX_CFG_ANY, 9709 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */ 9710 9711 /* QuZ with Jf */ 9712 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9713 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9714 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9715 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9716 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */ 9717 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9718 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9719 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9720 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9721 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */ 9722 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9723 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9724 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9725 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9726 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */ 9727 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9728 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9729 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9730 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9731 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */ 9732 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551, 9733 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9734 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9735 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9736 IWX_CFG_ANY, 9737 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */ 9738 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552, 9739 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9740 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9741 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9742 IWX_CFG_ANY, 9743 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */ 9744 9745 /* Qu with Hr, B step */ 9746 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9747 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP, 9748 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9749 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9750 iwx_qu_b0_hr1_b0), /* AX101 */ 9751 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9752 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP, 9753 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9754 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9755 iwx_qu_b0_hr_b0), /* AX203 */ 9756 9757 /* Qu with Hr, C step */ 9758 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9759 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9760 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9761 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9762 iwx_qu_c0_hr1_b0), /* AX101 */ 9763 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9764 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9765 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9766 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9767 iwx_qu_c0_hr_b0), /* AX203 */ 9768 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9769 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9770 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9771 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9772 iwx_qu_c0_hr_b0), /* AX201 */ 9773 9774 /* QuZ with Hr */ 9775 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9776 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9777 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9778 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9779 iwx_quz_a0_hr1_b0), /* AX101 */ 9780 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9781 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP, 9782 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9783 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9784 iwx_cfg_quz_a0_hr_b0), /* AX203 */ 9785 9786 /* SoF with JF2 */ 9787 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9788 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9789 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9790 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9791 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */ 9792 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9793 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9794 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9795 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9796 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */ 9797 9798 /* SoF with JF */ 9799 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9800 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9801 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9802 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9803 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */ 9804 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9805 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9806 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9807 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9808 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */ 9809 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9810 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9811 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9812 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9813 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */ 9814 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9815 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9816 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9817 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9818 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */ 9819 9820 /* So with Hr */ 9821 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9822 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9823 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9824 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9825 iwx_cfg_so_a0_hr_b0), /* AX203 */ 9826 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9827 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9828 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9829 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9830 iwx_cfg_so_a0_hr_b0), /* ax101 */ 9831 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9832 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9833 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9834 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9835 iwx_cfg_so_a0_hr_b0), /* ax201 */ 9836 9837 /* So-F with Hr */ 9838 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9839 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9840 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9841 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9842 iwx_cfg_so_a0_hr_b0), /* AX203 */ 9843 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9844 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9845 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9846 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9847 iwx_cfg_so_a0_hr_b0), /* AX101 */ 9848 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9849 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9850 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9851 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9852 iwx_cfg_so_a0_hr_b0), /* AX201 */ 9853 9854 /* So-F with GF */ 9855 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9856 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9857 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9858 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9859 iwx_2ax_cfg_so_gf_a0), /* AX211 */ 9860 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9861 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9862 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9863 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY, 9864 iwx_2ax_cfg_so_gf4_a0), /* AX411 */ 9865 9866 /* So with GF */ 9867 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9868 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9869 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9870 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9871 iwx_2ax_cfg_so_gf_a0), /* AX211 */ 9872 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9873 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9874 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9875 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY, 9876 iwx_2ax_cfg_so_gf4_a0), /* AX411 */ 9877 9878 /* So with JF2 */ 9879 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9880 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9881 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9882 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9883 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */ 9884 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9885 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9886 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9887 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9888 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */ 9889 9890 /* So with JF */ 9891 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9892 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9893 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9894 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9895 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */ 9896 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9897 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9898 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9899 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9900 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */ 9901 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9902 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9903 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9904 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9905 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */ 9906 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9907 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9908 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9909 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9910 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */ 9911 }; 9912 9913 static int 9914 iwx_preinit(struct iwx_softc *sc) 9915 { 9916 struct ieee80211com *ic = &sc->sc_ic; 9917 int err; 9918 9919 err = iwx_prepare_card_hw(sc); 9920 if (err) { 9921 printf("%s: could not initialize hardware\n", DEVNAME(sc)); 9922 return err; 9923 } 9924 9925 if (sc->attached) { 9926 return 0; 9927 } 9928 9929 err = iwx_start_hw(sc); 9930 if (err) { 9931 printf("%s: could not initialize hardware\n", DEVNAME(sc)); 9932 return err; 9933 } 9934 9935 err = iwx_run_init_mvm_ucode(sc, 1); 9936 iwx_stop_device(sc); 9937 if (err) { 9938 printf("%s: failed to stop device\n", DEVNAME(sc)); 9939 return err; 9940 } 9941 9942 /* Print version info and MAC address on first successful fw load. */ 9943 sc->attached = 1; 9944 if (sc->sc_pnvm_ver) { 9945 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, " 9946 "address %s\n", 9947 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK, 9948 sc->sc_fwver, sc->sc_pnvm_ver, 9949 ether_sprintf(sc->sc_nvm.hw_addr)); 9950 } else { 9951 printf("%s: hw rev 0x%x, fw %s, address %s\n", 9952 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK, 9953 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr)); 9954 } 9955 9956 /* not all hardware can do 5GHz band */ 9957 if (!sc->sc_nvm.sku_cap_band_52GHz_enable) 9958 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 9959 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 9960 9961 return 0; 9962 } 9963 9964 static void 9965 iwx_attach_hook(void *self) 9966 { 9967 struct iwx_softc *sc = (void *)self; 9968 struct ieee80211com *ic = &sc->sc_ic; 9969 int err; 9970 9971 IWX_LOCK(sc); 9972 err = iwx_preinit(sc); 9973 IWX_UNLOCK(sc); 9974 if (err != 0) 9975 goto out; 9976 9977 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 9978 ic->ic_channels); 9979 9980 ieee80211_ifattach(ic); 9981 ic->ic_vap_create = iwx_vap_create; 9982 ic->ic_vap_delete = iwx_vap_delete; 9983 ic->ic_raw_xmit = iwx_raw_xmit; 9984 ic->ic_node_alloc = iwx_node_alloc; 9985 ic->ic_scan_start = iwx_scan_start; 9986 ic->ic_scan_end = iwx_scan_end; 9987 ic->ic_update_mcast = iwx_update_mcast; 9988 ic->ic_getradiocaps = iwx_init_channel_map; 9989 9990 ic->ic_set_channel = iwx_set_channel; 9991 ic->ic_scan_curchan = iwx_scan_curchan; 9992 ic->ic_scan_mindwell = iwx_scan_mindwell; 9993 ic->ic_wme.wme_update = iwx_wme_update; 9994 ic->ic_parent = iwx_parent; 9995 ic->ic_transmit = iwx_transmit; 9996 9997 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 9998 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start; 9999 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 10000 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop; 10001 10002 sc->sc_addba_request = ic->ic_addba_request; 10003 ic->ic_addba_request = iwx_addba_request; 10004 sc->sc_addba_response = ic->ic_addba_response; 10005 ic->ic_addba_response = iwx_addba_response; 10006 10007 iwx_radiotap_attach(sc); 10008 ieee80211_announce(ic); 10009 out: 10010 config_intrhook_disestablish(&sc->sc_preinit_hook); 10011 } 10012 10013 const struct iwx_device_cfg * 10014 iwx_find_device_cfg(struct iwx_softc *sc) 10015 { 10016 uint16_t sdev_id, mac_type, rf_type; 10017 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores; 10018 int i; 10019 10020 sdev_id = pci_get_subdevice(sc->sc_dev); 10021 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev); 10022 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2); 10023 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id); 10024 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id); 10025 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id); 10026 10027 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id); 10028 no_160 = IWX_SUBDEVICE_NO_160(sdev_id); 10029 cores = IWX_SUBDEVICE_CORES(sdev_id); 10030 10031 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) { 10032 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i]; 10033 10034 if (dev_info->device != (uint16_t)IWX_CFG_ANY && 10035 dev_info->device != sc->sc_pid) 10036 continue; 10037 10038 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY && 10039 dev_info->subdevice != sdev_id) 10040 continue; 10041 10042 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY && 10043 dev_info->mac_type != mac_type) 10044 continue; 10045 10046 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY && 10047 dev_info->mac_step != mac_step) 10048 continue; 10049 10050 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY && 10051 dev_info->rf_type != rf_type) 10052 continue; 10053 10054 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY && 10055 dev_info->cdb != cdb) 10056 continue; 10057 10058 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY && 10059 dev_info->jacket != jacket) 10060 continue; 10061 10062 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY && 10063 dev_info->rf_id != rf_id) 10064 continue; 10065 10066 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY && 10067 dev_info->no_160 != no_160) 10068 continue; 10069 10070 if (dev_info->cores != (uint8_t)IWX_CFG_ANY && 10071 dev_info->cores != cores) 10072 continue; 10073 10074 return dev_info->cfg; 10075 } 10076 10077 return NULL; 10078 } 10079 10080 static int 10081 iwx_probe(device_t dev) 10082 { 10083 int i; 10084 10085 for (i = 0; i < nitems(iwx_devices); i++) { 10086 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 10087 pci_get_device(dev) == iwx_devices[i].device) { 10088 device_set_desc(dev, iwx_devices[i].name); 10089 10090 /* 10091 * Due to significant existing deployments using 10092 * iwlwifi lower the priority of iwx. 10093 * 10094 * This inverts the advice in bus.h where drivers 10095 * supporting newer hardware should return 10096 * BUS_PROBE_DEFAULT and drivers for older devices 10097 * return BUS_PROBE_LOW_PRIORITY. 10098 * 10099 */ 10100 return (BUS_PROBE_LOW_PRIORITY); 10101 } 10102 } 10103 10104 return (ENXIO); 10105 } 10106 10107 static int 10108 iwx_attach(device_t dev) 10109 { 10110 struct iwx_softc *sc = device_get_softc(dev); 10111 struct ieee80211com *ic = &sc->sc_ic; 10112 const struct iwx_device_cfg *cfg; 10113 int err; 10114 int txq_i, i, j; 10115 size_t ctxt_info_size; 10116 int rid; 10117 int count; 10118 int error; 10119 sc->sc_dev = dev; 10120 sc->sc_pid = pci_get_device(dev); 10121 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 10122 10123 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc); 10124 IWX_LOCK_INIT(sc); 10125 mbufq_init(&sc->sc_snd, ifqmaxlen); 10126 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc); 10127 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc); 10128 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK, 10129 taskqueue_thread_enqueue, &sc->sc_tq); 10130 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq"); 10131 if (error != 0) { 10132 device_printf(dev, "can't start taskq thread, error %d\n", 10133 error); 10134 return (ENXIO); 10135 } 10136 10137 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 10138 if (sc->sc_cap_off == 0) { 10139 device_printf(dev, "PCIe capability structure not found!\n"); 10140 return (ENXIO); 10141 } 10142 10143 /* 10144 * We disable the RETRY_TIMEOUT register (0x41) to keep 10145 * PCI Tx retries from interfering with C3 CPU state. 10146 */ 10147 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 10148 10149 if (pci_msix_count(dev)) { 10150 sc->sc_msix = 1; 10151 } else { 10152 device_printf(dev, "no MSI-X found\n"); 10153 return (ENXIO); 10154 } 10155 10156 pci_enable_busmaster(dev); 10157 rid = PCIR_BAR(0); 10158 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 10159 RF_ACTIVE); 10160 if (sc->sc_mem == NULL) { 10161 device_printf(sc->sc_dev, "can't map mem space\n"); 10162 return (ENXIO); 10163 } 10164 sc->sc_st = rman_get_bustag(sc->sc_mem); 10165 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 10166 10167 count = 1; 10168 rid = 0; 10169 if (pci_alloc_msix(dev, &count) == 0) 10170 rid = 1; 10171 DPRINTF(("%s: count=%d\n", __func__, count)); 10172 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 10173 (rid != 0 ? 0 : RF_SHAREABLE)); 10174 if (sc->sc_irq == NULL) { 10175 device_printf(dev, "can't map interrupt\n"); 10176 return (ENXIO); 10177 } 10178 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 10179 NULL, iwx_intr_msix, sc, &sc->sc_ih); 10180 if (error != 0) { 10181 device_printf(dev, "can't establish interrupt\n"); 10182 return (ENXIO); 10183 } 10184 10185 /* Clear pending interrupts. */ 10186 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 10187 IWX_WRITE(sc, IWX_CSR_INT, ~0); 10188 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0); 10189 10190 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV); 10191 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev)); 10192 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID); 10193 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id)); 10194 10195 /* 10196 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 10197 * changed, and now the revision step also includes bit 0-1 (no more 10198 * "dash" value). To keep hw_rev backwards compatible - we'll store it 10199 * in the old format. 10200 */ 10201 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 10202 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 10203 10204 switch (sc->sc_pid) { 10205 case PCI_PRODUCT_INTEL_WL_22500_1: 10206 sc->sc_fwname = IWX_CC_A_FW; 10207 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10208 sc->sc_integrated = 0; 10209 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE; 10210 sc->sc_low_latency_xtal = 0; 10211 sc->sc_xtal_latency = 0; 10212 sc->sc_tx_with_siso_diversity = 0; 10213 sc->sc_uhb_supported = 0; 10214 break; 10215 case PCI_PRODUCT_INTEL_WL_22500_2: 10216 case PCI_PRODUCT_INTEL_WL_22500_5: 10217 /* These devices should be QuZ only. */ 10218 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) { 10219 device_printf(dev, "unsupported AX201 adapter\n"); 10220 return (ENXIO); 10221 } 10222 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10223 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10224 sc->sc_integrated = 1; 10225 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200; 10226 sc->sc_low_latency_xtal = 0; 10227 sc->sc_xtal_latency = 500; 10228 sc->sc_tx_with_siso_diversity = 0; 10229 sc->sc_uhb_supported = 0; 10230 break; 10231 case PCI_PRODUCT_INTEL_WL_22500_3: 10232 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10233 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10234 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10235 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10236 else 10237 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10238 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10239 sc->sc_integrated = 1; 10240 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200; 10241 sc->sc_low_latency_xtal = 0; 10242 sc->sc_xtal_latency = 500; 10243 sc->sc_tx_with_siso_diversity = 0; 10244 sc->sc_uhb_supported = 0; 10245 break; 10246 case PCI_PRODUCT_INTEL_WL_22500_4: 10247 case PCI_PRODUCT_INTEL_WL_22500_7: 10248 case PCI_PRODUCT_INTEL_WL_22500_8: 10249 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10250 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10251 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10252 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10253 else 10254 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10255 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10256 sc->sc_integrated = 1; 10257 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820; 10258 sc->sc_low_latency_xtal = 0; 10259 sc->sc_xtal_latency = 1820; 10260 sc->sc_tx_with_siso_diversity = 0; 10261 sc->sc_uhb_supported = 0; 10262 break; 10263 case PCI_PRODUCT_INTEL_WL_22500_6: 10264 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10265 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10266 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10267 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10268 else 10269 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10270 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10271 sc->sc_integrated = 1; 10272 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500; 10273 sc->sc_low_latency_xtal = 1; 10274 sc->sc_xtal_latency = 12000; 10275 sc->sc_tx_with_siso_diversity = 0; 10276 sc->sc_uhb_supported = 0; 10277 break; 10278 case PCI_PRODUCT_INTEL_WL_22500_9: 10279 case PCI_PRODUCT_INTEL_WL_22500_10: 10280 case PCI_PRODUCT_INTEL_WL_22500_11: 10281 case PCI_PRODUCT_INTEL_WL_22500_13: 10282 /* _14 is an MA device, not yet supported */ 10283 case PCI_PRODUCT_INTEL_WL_22500_15: 10284 case PCI_PRODUCT_INTEL_WL_22500_16: 10285 sc->sc_fwname = IWX_SO_A_GF_A_FW; 10286 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM; 10287 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210; 10288 sc->sc_integrated = 0; 10289 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE; 10290 sc->sc_low_latency_xtal = 0; 10291 sc->sc_xtal_latency = 0; 10292 sc->sc_tx_with_siso_diversity = 0; 10293 sc->sc_uhb_supported = 1; 10294 break; 10295 case PCI_PRODUCT_INTEL_WL_22500_12: 10296 case PCI_PRODUCT_INTEL_WL_22500_17: 10297 sc->sc_fwname = IWX_SO_A_GF_A_FW; 10298 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM; 10299 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210; 10300 sc->sc_integrated = 1; 10301 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500; 10302 sc->sc_low_latency_xtal = 1; 10303 sc->sc_xtal_latency = 12000; 10304 sc->sc_tx_with_siso_diversity = 0; 10305 sc->sc_uhb_supported = 0; 10306 sc->sc_imr_enabled = 1; 10307 break; 10308 default: 10309 device_printf(dev, "unknown adapter type\n"); 10310 return (ENXIO); 10311 } 10312 10313 cfg = iwx_find_device_cfg(sc); 10314 DPRINTF(("%s: cfg=%p\n", __func__, cfg)); 10315 if (cfg) { 10316 sc->sc_fwname = cfg->fw_name; 10317 sc->sc_pnvm_name = cfg->pnvm_name; 10318 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity; 10319 sc->sc_uhb_supported = cfg->uhb_supported; 10320 if (cfg->xtal_latency) { 10321 sc->sc_xtal_latency = cfg->xtal_latency; 10322 sc->sc_low_latency_xtal = cfg->low_latency_xtal; 10323 } 10324 } 10325 10326 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */ 10327 10328 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 10329 sc->sc_umac_prph_offset = 0x300000; 10330 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3; 10331 } else 10332 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX; 10333 10334 /* Allocate DMA memory for loading firmware. */ 10335 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 10336 ctxt_info_size = sizeof(struct iwx_context_info_gen3); 10337 else 10338 ctxt_info_size = sizeof(struct iwx_context_info); 10339 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma, 10340 ctxt_info_size, 1); 10341 if (err) { 10342 device_printf(dev, 10343 "could not allocate memory for loading firmware\n"); 10344 return (ENXIO); 10345 } 10346 10347 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 10348 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma, 10349 sizeof(struct iwx_prph_scratch), 1); 10350 if (err) { 10351 device_printf(dev, 10352 "could not allocate prph scratch memory\n"); 10353 goto fail1; 10354 } 10355 10356 /* 10357 * Allocate prph information. The driver doesn't use this. 10358 * We use the second half of this page to give the device 10359 * some dummy TR/CR tail pointers - which shouldn't be 10360 * necessary as we don't use this, but the hardware still 10361 * reads/writes there and we can't let it go do that with 10362 * a NULL pointer. 10363 */ 10364 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2), 10365 ("iwx_prph_info has wrong size")); 10366 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma, 10367 PAGE_SIZE, 1); 10368 if (err) { 10369 device_printf(dev, 10370 "could not allocate prph info memory\n"); 10371 goto fail1; 10372 } 10373 } 10374 10375 /* Allocate interrupt cause table (ICT).*/ 10376 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 10377 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT); 10378 if (err) { 10379 device_printf(dev, "could not allocate ICT table\n"); 10380 goto fail1; 10381 } 10382 10383 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 10384 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i); 10385 if (err) { 10386 device_printf(dev, "could not allocate TX ring %d\n", 10387 txq_i); 10388 goto fail4; 10389 } 10390 } 10391 10392 err = iwx_alloc_rx_ring(sc, &sc->rxq); 10393 if (err) { 10394 device_printf(sc->sc_dev, "could not allocate RX ring\n"); 10395 goto fail4; 10396 } 10397 10398 #ifdef IWX_DEBUG 10399 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10400 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 10401 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging"); 10402 10403 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark", 10405 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark"); 10406 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark", 10408 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark"); 10409 10410 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10411 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk", 10412 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask"); 10413 10414 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10415 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0", 10416 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0"); 10417 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1", 10419 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1"); 10420 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2", 10422 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2"); 10423 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3", 10425 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3"); 10426 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10427 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4", 10428 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4"); 10429 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10430 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5", 10431 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5"); 10432 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10433 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6", 10434 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6"); 10435 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10436 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7", 10437 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7"); 10438 #endif 10439 ic->ic_softc = sc; 10440 ic->ic_name = device_get_nameunit(sc->sc_dev); 10441 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 10442 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 10443 10444 /* Set device capabilities. */ 10445 ic->ic_caps = 10446 IEEE80211_C_STA | 10447 IEEE80211_C_MONITOR | 10448 IEEE80211_C_WPA | /* WPA/RSN */ 10449 IEEE80211_C_WME | 10450 IEEE80211_C_PMGT | 10451 IEEE80211_C_SHSLOT | /* short slot time supported */ 10452 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 10453 IEEE80211_C_BGSCAN /* capable of bg scanning */ 10454 ; 10455 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 10456 10457 ic->ic_txstream = 2; 10458 ic->ic_rxstream = 2; 10459 ic->ic_htcaps |= IEEE80211_HTC_HT 10460 | IEEE80211_HTCAP_SMPS_OFF 10461 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 10462 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 10463 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 10464 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 10465 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */ 10466 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */ 10467 10468 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 10469 10470 /* 10471 * XXX: setupcurchan() expects vhtcaps to be non-zero 10472 * https://bugs.freebsd.org/274156 10473 */ 10474 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 10475 | IEEE80211_VHTCAP_SHORT_GI_80 10476 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S 10477 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN 10478 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN; 10479 10480 ic->ic_flags_ext |= IEEE80211_FEXT_VHT; 10481 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 10482 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 10483 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 10484 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 10485 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 10486 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 10487 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 10488 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; 10489 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap); 10490 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap); 10491 10492 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 10493 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 10494 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i]; 10495 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID; 10496 rxba->sc = sc; 10497 for (j = 0; j < nitems(rxba->entries); j++) 10498 mbufq_init(&rxba->entries[j].frames, ifqmaxlen); 10499 } 10500 10501 sc->sc_preinit_hook.ich_func = iwx_attach_hook; 10502 sc->sc_preinit_hook.ich_arg = sc; 10503 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 10504 device_printf(dev, 10505 "config_intrhook_establish failed\n"); 10506 goto fail4; 10507 } 10508 10509 return (0); 10510 10511 fail4: 10512 while (--txq_i >= 0) 10513 iwx_free_tx_ring(sc, &sc->txq[txq_i]); 10514 iwx_free_rx_ring(sc, &sc->rxq); 10515 if (sc->ict_dma.vaddr != NULL) 10516 iwx_dma_contig_free(&sc->ict_dma); 10517 10518 fail1: 10519 iwx_dma_contig_free(&sc->ctxt_info_dma); 10520 iwx_dma_contig_free(&sc->prph_scratch_dma); 10521 iwx_dma_contig_free(&sc->prph_info_dma); 10522 return (ENXIO); 10523 } 10524 10525 static int 10526 iwx_detach(device_t dev) 10527 { 10528 struct iwx_softc *sc = device_get_softc(dev); 10529 int txq_i; 10530 10531 iwx_stop_device(sc); 10532 10533 taskqueue_drain_all(sc->sc_tq); 10534 taskqueue_free(sc->sc_tq); 10535 10536 ieee80211_ifdetach(&sc->sc_ic); 10537 10538 callout_drain(&sc->watchdog_to); 10539 10540 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) 10541 iwx_free_tx_ring(sc, &sc->txq[txq_i]); 10542 iwx_free_rx_ring(sc, &sc->rxq); 10543 10544 if (sc->sc_fwp != NULL) { 10545 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD); 10546 sc->sc_fwp = NULL; 10547 } 10548 10549 if (sc->sc_pnvm != NULL) { 10550 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD); 10551 sc->sc_pnvm = NULL; 10552 } 10553 10554 if (sc->sc_irq != NULL) { 10555 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 10556 bus_release_resource(dev, SYS_RES_IRQ, 10557 rman_get_rid(sc->sc_irq), sc->sc_irq); 10558 pci_release_msi(dev); 10559 } 10560 if (sc->sc_mem != NULL) 10561 bus_release_resource(dev, SYS_RES_MEMORY, 10562 rman_get_rid(sc->sc_mem), sc->sc_mem); 10563 10564 IWX_LOCK_DESTROY(sc); 10565 10566 return (0); 10567 } 10568 10569 static void 10570 iwx_radiotap_attach(struct iwx_softc *sc) 10571 { 10572 struct ieee80211com *ic = &sc->sc_ic; 10573 10574 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE, 10575 "->%s begin\n", __func__); 10576 10577 ieee80211_radiotap_attach(ic, 10578 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 10579 IWX_TX_RADIOTAP_PRESENT, 10580 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 10581 IWX_RX_RADIOTAP_PRESENT); 10582 10583 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE, 10584 "->%s end\n", __func__); 10585 } 10586 10587 struct ieee80211vap * 10588 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 10589 enum ieee80211_opmode opmode, int flags, 10590 const uint8_t bssid[IEEE80211_ADDR_LEN], 10591 const uint8_t mac[IEEE80211_ADDR_LEN]) 10592 { 10593 struct iwx_vap *ivp; 10594 struct ieee80211vap *vap; 10595 10596 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 10597 return NULL; 10598 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO); 10599 vap = &ivp->iv_vap; 10600 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 10601 vap->iv_bmissthreshold = 10; /* override default */ 10602 /* Override with driver methods. */ 10603 ivp->iv_newstate = vap->iv_newstate; 10604 vap->iv_newstate = iwx_newstate; 10605 10606 ivp->id = IWX_DEFAULT_MACID; 10607 ivp->color = IWX_DEFAULT_COLOR; 10608 10609 ivp->have_wme = TRUE; 10610 ivp->ps_disabled = FALSE; 10611 10612 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 10613 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; 10614 10615 /* h/w crypto support */ 10616 vap->iv_key_alloc = iwx_key_alloc; 10617 vap->iv_key_delete = iwx_key_delete; 10618 vap->iv_key_set = iwx_key_set; 10619 vap->iv_key_update_begin = iwx_key_update_begin; 10620 vap->iv_key_update_end = iwx_key_update_end; 10621 10622 ieee80211_ratectl_init(vap); 10623 /* Complete setup. */ 10624 ieee80211_vap_attach(vap, ieee80211_media_change, 10625 ieee80211_media_status, mac); 10626 ic->ic_opmode = opmode; 10627 10628 return vap; 10629 } 10630 10631 static void 10632 iwx_vap_delete(struct ieee80211vap *vap) 10633 { 10634 struct iwx_vap *ivp = IWX_VAP(vap); 10635 10636 ieee80211_ratectl_deinit(vap); 10637 ieee80211_vap_detach(vap); 10638 free(ivp, M_80211_VAP); 10639 } 10640 10641 static void 10642 iwx_parent(struct ieee80211com *ic) 10643 { 10644 struct iwx_softc *sc = ic->ic_softc; 10645 IWX_LOCK(sc); 10646 10647 if (sc->sc_flags & IWX_FLAG_HW_INITED) { 10648 iwx_stop(sc); 10649 sc->sc_flags &= ~IWX_FLAG_HW_INITED; 10650 } else { 10651 iwx_init(sc); 10652 ieee80211_start_all(ic); 10653 } 10654 IWX_UNLOCK(sc); 10655 } 10656 10657 static int 10658 iwx_suspend(device_t dev) 10659 { 10660 struct iwx_softc *sc = device_get_softc(dev); 10661 struct ieee80211com *ic = &sc->sc_ic; 10662 10663 if (sc->sc_flags & IWX_FLAG_HW_INITED) { 10664 ieee80211_suspend_all(ic); 10665 10666 iwx_stop(sc); 10667 sc->sc_flags &= ~IWX_FLAG_HW_INITED; 10668 } 10669 return (0); 10670 } 10671 10672 static int 10673 iwx_resume(device_t dev) 10674 { 10675 struct iwx_softc *sc = device_get_softc(dev); 10676 struct ieee80211com *ic = &sc->sc_ic; 10677 int err; 10678 10679 /* 10680 * We disable the RETRY_TIMEOUT register (0x41) to keep 10681 * PCI Tx retries from interfering with C3 CPU state. 10682 */ 10683 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 10684 10685 IWX_LOCK(sc); 10686 10687 err = iwx_init(sc); 10688 if (err) { 10689 iwx_stop_device(sc); 10690 IWX_UNLOCK(sc); 10691 return err; 10692 } 10693 10694 IWX_UNLOCK(sc); 10695 10696 ieee80211_resume_all(ic); 10697 return (0); 10698 } 10699 10700 static void 10701 iwx_scan_start(struct ieee80211com *ic) 10702 { 10703 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 10704 struct iwx_softc *sc = ic->ic_softc; 10705 int err; 10706 10707 IWX_LOCK(sc); 10708 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0) 10709 err = iwx_scan(sc); 10710 else 10711 err = iwx_bgscan(ic); 10712 IWX_UNLOCK(sc); 10713 if (err) 10714 ieee80211_cancel_scan(vap); 10715 10716 return; 10717 } 10718 10719 static void 10720 iwx_update_mcast(struct ieee80211com *ic) 10721 { 10722 } 10723 10724 static void 10725 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 10726 { 10727 } 10728 10729 static void 10730 iwx_scan_mindwell(struct ieee80211_scan_state *ss) 10731 { 10732 } 10733 10734 static void 10735 iwx_scan_end(struct ieee80211com *ic) 10736 { 10737 iwx_endscan(ic->ic_softc); 10738 } 10739 10740 static void 10741 iwx_set_channel(struct ieee80211com *ic) 10742 { 10743 #if 0 10744 struct iwx_softc *sc = ic->ic_softc; 10745 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 10746 10747 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__); 10748 iwx_phy_ctxt_task((void *)sc); 10749 #endif 10750 } 10751 10752 static void 10753 iwx_endscan_cb(void *arg, int pending) 10754 { 10755 struct iwx_softc *sc = arg; 10756 struct ieee80211com *ic = &sc->sc_ic; 10757 10758 DPRINTF(("scan ended\n")); 10759 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 10760 } 10761 10762 static int 10763 iwx_wme_update(struct ieee80211com *ic) 10764 { 10765 return 0; 10766 } 10767 10768 static int 10769 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 10770 const struct ieee80211_bpf_params *params) 10771 { 10772 struct ieee80211com *ic = ni->ni_ic; 10773 struct iwx_softc *sc = ic->ic_softc; 10774 int err; 10775 10776 IWX_LOCK(sc); 10777 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) { 10778 err = iwx_tx(sc, m, ni); 10779 IWX_UNLOCK(sc); 10780 return err; 10781 } else { 10782 IWX_UNLOCK(sc); 10783 return EIO; 10784 } 10785 } 10786 10787 static int 10788 iwx_transmit(struct ieee80211com *ic, struct mbuf *m) 10789 { 10790 struct iwx_softc *sc = ic->ic_softc; 10791 int error; 10792 10793 // TODO: mbufq_enqueue in iwm 10794 // TODO dequeue in iwm_start, counters, locking 10795 IWX_LOCK(sc); 10796 error = mbufq_enqueue(&sc->sc_snd, m); 10797 if (error) { 10798 IWX_UNLOCK(sc); 10799 return (error); 10800 } 10801 10802 iwx_start(sc); 10803 IWX_UNLOCK(sc); 10804 return (0); 10805 } 10806 10807 static int 10808 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 10809 int baparamset, int batimeout, int baseqctl) 10810 { 10811 struct ieee80211com *ic = ni->ni_ic; 10812 struct iwx_softc *sc = ic->ic_softc; 10813 int tid; 10814 10815 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); 10816 sc->ni_rx_ba[tid].ba_winstart = 10817 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START); 10818 sc->ni_rx_ba[tid].ba_winsize = 10819 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ); 10820 sc->ni_rx_ba[tid].ba_timeout_val = batimeout; 10821 10822 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS || 10823 tid >= IWX_MAX_TID_COUNT) 10824 return ENOSPC; 10825 10826 if (sc->ba_rx.start_tidmask & (1 << tid)) { 10827 DPRINTF(("%s: tid %d already added\n", __func__, tid)); 10828 return EBUSY; 10829 } 10830 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask)); 10831 10832 sc->ba_rx.start_tidmask |= (1 << tid); 10833 DPRINTF(("%s: tid=%i\n", __func__, tid)); 10834 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart)); 10835 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize)); 10836 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val)); 10837 10838 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task); 10839 10840 // TODO:misha move to ba_task (serialize) 10841 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 10842 10843 return (0); 10844 } 10845 10846 static void 10847 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 10848 { 10849 return; 10850 } 10851 10852 static int 10853 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 10854 int dialogtoken, int baparamset, int batimeout) 10855 { 10856 struct iwx_softc *sc = ni->ni_ic->ic_softc; 10857 int tid; 10858 10859 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); 10860 DPRINTF(("%s: tid=%i\n", __func__, tid)); 10861 sc->ba_tx.start_tidmask |= (1 << tid); 10862 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task); 10863 return 0; 10864 } 10865 10866 10867 static int 10868 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 10869 int code, int baparamset, int batimeout) 10870 { 10871 return 0; 10872 } 10873 10874 static void 10875 iwx_key_update_begin(struct ieee80211vap *vap) 10876 { 10877 return; 10878 } 10879 10880 static void 10881 iwx_key_update_end(struct ieee80211vap *vap) 10882 { 10883 return; 10884 } 10885 10886 static int 10887 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, 10888 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 10889 { 10890 10891 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 10892 return 1; 10893 } 10894 if (!(&vap->iv_nw_keys[0] <= k && 10895 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { 10896 /* 10897 * Not in the global key table, the driver should handle this 10898 * by allocating a slot in the h/w key table/cache. In 10899 * lieu of that return key slot 0 for any unicast key 10900 * request. We disallow the request if this is a group key. 10901 * This default policy does the right thing for legacy hardware 10902 * with a 4 key table. It also handles devices that pass 10903 * packets through untouched when marked with the WEP bit 10904 * and key index 0. 10905 */ 10906 if (k->wk_flags & IEEE80211_KEY_GROUP) 10907 return 0; 10908 *keyix = 0; /* NB: use key index 0 for ucast key */ 10909 } else { 10910 *keyix = ieee80211_crypto_get_key_wepidx(vap, k); 10911 } 10912 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */ 10913 return 1; 10914 } 10915 10916 static int 10917 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 10918 { 10919 struct ieee80211com *ic = vap->iv_ic; 10920 struct iwx_softc *sc = ic->ic_softc; 10921 struct iwx_add_sta_key_cmd cmd; 10922 uint32_t status; 10923 int err; 10924 int id; 10925 10926 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) { 10927 return 1; 10928 } 10929 10930 IWX_LOCK(sc); 10931 /* 10932 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid. 10933 * Currently we only implement station mode where 'ni' is always 10934 * ic->ic_bss so there is no need to validate arguments beyond this: 10935 */ 10936 10937 memset(&cmd, 0, sizeof(cmd)); 10938 10939 if (k->wk_flags & IEEE80211_KEY_GROUP) { 10940 DPRINTF(("%s: adding group key\n", __func__)); 10941 } else { 10942 DPRINTF(("%s: adding key\n", __func__)); 10943 } 10944 if (k >= &vap->iv_nw_keys[0] && 10945 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) 10946 id = (k - vap->iv_nw_keys); 10947 else 10948 id = (0); 10949 DPRINTF(("%s: setting keyid=%i\n", __func__, id)); 10950 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM | 10951 IWX_STA_KEY_FLG_WEP_KEY_MAP | 10952 ((id << IWX_STA_KEY_FLG_KEYID_POS) & 10953 IWX_STA_KEY_FLG_KEYID_MSK)); 10954 if (k->wk_flags & IEEE80211_KEY_GROUP) { 10955 cmd.common.key_offset = 1; 10956 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST); 10957 } else { 10958 cmd.common.key_offset = 0; 10959 } 10960 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key), 10961 k->wk_keylen)); 10962 DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen)); 10963 for (int i=0; i<k->wk_keylen; i++) { 10964 DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i])); 10965 } 10966 cmd.common.sta_id = IWX_STATION_ID; 10967 10968 cmd.transmit_seq_cnt = htole64(k->wk_keytsc); 10969 DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc)); 10970 10971 status = IWX_ADD_STA_SUCCESS; 10972 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd, 10973 &status); 10974 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 10975 err = EIO; 10976 if (err) { 10977 printf("%s: can't set wpa2 keys (error %d)\n", __func__, err); 10978 IWX_UNLOCK(sc); 10979 return err; 10980 } else 10981 DPRINTF(("%s: key added successfully\n", __func__)); 10982 IWX_UNLOCK(sc); 10983 return 1; 10984 } 10985 10986 static int 10987 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 10988 { 10989 return 1; 10990 } 10991 10992 static device_method_t iwx_pci_methods[] = { 10993 /* Device interface */ 10994 DEVMETHOD(device_probe, iwx_probe), 10995 DEVMETHOD(device_attach, iwx_attach), 10996 DEVMETHOD(device_detach, iwx_detach), 10997 DEVMETHOD(device_suspend, iwx_suspend), 10998 DEVMETHOD(device_resume, iwx_resume), 10999 11000 DEVMETHOD_END 11001 }; 11002 11003 static driver_t iwx_pci_driver = { 11004 "iwx", 11005 iwx_pci_methods, 11006 sizeof (struct iwx_softc) 11007 }; 11008 11009 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL); 11010 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver, 11011 iwx_devices, nitems(iwx_devices)); 11012 MODULE_DEPEND(iwx, firmware, 1, 1, 1); 11013 MODULE_DEPEND(iwx, pci, 1, 1, 1); 11014 MODULE_DEPEND(iwx, wlan, 1, 1, 1); 11015