1 /*- 2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC 3 */ 4 5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */ 6 7 /* 8 * 9 * Copyright (c) 2025 The FreeBSD Foundation 10 * 11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org> 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Permission to use, copy, modify, and distribute this software for any 15 * purpose with or without fee is hereby granted, provided that the above 16 * copyright notice and this permission notice appear in all copies. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 25 * 26 */ 27 28 /*- 29 * Copyright (c) 2024 Future Crew, LLC 30 * Author: Mikhail Pchelin <misha@FreeBSD.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45 /* 46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de> 47 * Author: Stefan Sperling <stsp@openbsd.org> 48 * Copyright (c) 2014 Fixup Software Ltd. 49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org> 50 * 51 * Permission to use, copy, modify, and distribute this software for any 52 * purpose with or without fee is hereby granted, provided that the above 53 * copyright notice and this permission notice appear in all copies. 54 * 55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 62 */ 63 64 /*- 65 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 66 * which were used as the reference documentation for this implementation. 67 * 68 ****************************************************************************** 69 * 70 * This file is provided under a dual BSD/GPLv2 license. When using or 71 * redistributing this file, you may do so under either license. 72 * 73 * GPL LICENSE SUMMARY 74 * 75 * Copyright(c) 2017 Intel Deutschland GmbH 76 * Copyright(c) 2018 - 2019 Intel Corporation 77 * 78 * This program is free software; you can redistribute it and/or modify 79 * it under the terms of version 2 of the GNU General Public License as 80 * published by the Free Software Foundation. 81 * 82 * This program is distributed in the hope that it will be useful, but 83 * WITHOUT ANY WARRANTY; without even the implied warranty of 84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 85 * General Public License for more details. 86 * 87 * BSD LICENSE 88 * 89 * Copyright(c) 2017 Intel Deutschland GmbH 90 * Copyright(c) 2018 - 2019 Intel Corporation 91 * All rights reserved. 92 * 93 * Redistribution and use in source and binary forms, with or without 94 * modification, are permitted provided that the following conditions 95 * are met: 96 * 97 * * Redistributions of source code must retain the above copyright 98 * notice, this list of conditions and the following disclaimer. 99 * * Redistributions in binary form must reproduce the above copyright 100 * notice, this list of conditions and the following disclaimer in 101 * the documentation and/or other materials provided with the 102 * distribution. 103 * * Neither the name Intel Corporation nor the names of its 104 * contributors may be used to endorse or promote products derived 105 * from this software without specific prior written permission. 106 * 107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 118 * 119 ***************************************************************************** 120 */ 121 122 /*- 123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 124 * 125 * Permission to use, copy, modify, and distribute this software for any 126 * purpose with or without fee is hereby granted, provided that the above 127 * copyright notice and this permission notice appear in all copies. 128 * 129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 136 */ 137 138 #include <sys/param.h> 139 #include <sys/bus.h> 140 #include <sys/module.h> 141 #include <sys/conf.h> 142 #include <sys/kernel.h> 143 #include <sys/malloc.h> 144 #include <sys/mbuf.h> 145 #include <sys/mutex.h> 146 #include <sys/proc.h> 147 #include <sys/rman.h> 148 #include <sys/rwlock.h> 149 #include <sys/socket.h> 150 #include <sys/sockio.h> 151 #include <sys/systm.h> 152 #include <sys/endian.h> 153 #include <sys/linker.h> 154 #include <sys/firmware.h> 155 #include <sys/epoch.h> 156 #include <sys/kdb.h> 157 158 #include <machine/bus.h> 159 #include <machine/endian.h> 160 #include <machine/resource.h> 161 162 #include <dev/pci/pcireg.h> 163 #include <dev/pci/pcivar.h> 164 165 #include <net/bpf.h> 166 167 #include <net/if.h> 168 #include <net/if_var.h> 169 #include <net/if_dl.h> 170 #include <net/if_media.h> 171 172 #include <netinet/in.h> 173 #include <netinet/if_ether.h> 174 175 #include <net80211/ieee80211_var.h> 176 #include <net80211/ieee80211_radiotap.h> 177 #include <net80211/ieee80211_regdomain.h> 178 #include <net80211/ieee80211_ratectl.h> 179 #include <net80211/ieee80211_vht.h> 180 181 int iwx_himark = 224; 182 int iwx_lomark = 192; 183 184 #define IWX_FBSD_RSP_V3 3 185 #define IWX_FBSD_RSP_V4 4 186 187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev)) 188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp) 189 190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_))) 191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_))) 192 193 #include <dev/iwx/if_iwxreg.h> 194 #include <dev/iwx/if_iwxvar.h> 195 196 #include <dev/iwx/if_iwx_debug.h> 197 198 #define PCI_CFG_RETRY_TIMEOUT 0x41 199 200 #define PCI_VENDOR_INTEL 0x8086 201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */ 202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */ 203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */ 204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */ 205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */ 206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */ 207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */ 208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */ 209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */ 210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */ 211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */ 212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */ 213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */ 214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */ 215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */ 216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */ 217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */ 218 219 static const struct iwx_devices { 220 uint16_t device; 221 char *name; 222 } iwx_devices[] = { 223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" }, 224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" }, 225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" }, 226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" }, 227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" }, 228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" }, 229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" }, 230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" }, 231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" }, 232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" }, 233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" }, 234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" }, 235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" }, 236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" }, 237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" }, 238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" }, 239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" }, 240 }; 241 242 static const uint8_t iwx_nvm_channels_8000[] = { 243 /* 2.4 GHz */ 244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 245 /* 5 GHz */ 246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 248 149, 153, 157, 161, 165, 169, 173, 177, 181 249 }; 250 251 static const uint8_t iwx_nvm_channels_uhb[] = { 252 /* 2.4 GHz */ 253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 254 /* 5 GHz */ 255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 257 149, 153, 157, 161, 165, 169, 173, 177, 181, 258 /* 6-7 GHz */ 259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185, 262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233 263 }; 264 265 #define IWX_NUM_2GHZ_CHANNELS 14 266 #define IWX_NUM_5GHZ_CHANNELS 37 267 268 const struct iwx_rate { 269 uint16_t rate; 270 uint8_t plcp; 271 uint8_t ht_plcp; 272 } iwx_rates[] = { 273 /* Legacy */ /* HT */ 274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP }, 279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP }, 280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP }, 281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP }, 282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP }, 283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP }, 284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP }, 285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP }, 286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP }, 287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP }, 288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP }, 289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP }, 290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP }, 291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP }, 292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP }, 293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP }, 294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP }, 295 }; 296 #define IWX_RIDX_CCK 0 297 #define IWX_RIDX_OFDM 4 298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1) 299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM) 300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM) 301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22) 302 303 /* Convert an MCS index into an iwx_rates[] index. */ 304 const int iwx_mcs2ridx[] = { 305 IWX_RATE_MCS_0_INDEX, 306 IWX_RATE_MCS_1_INDEX, 307 IWX_RATE_MCS_2_INDEX, 308 IWX_RATE_MCS_3_INDEX, 309 IWX_RATE_MCS_4_INDEX, 310 IWX_RATE_MCS_5_INDEX, 311 IWX_RATE_MCS_6_INDEX, 312 IWX_RATE_MCS_7_INDEX, 313 IWX_RATE_MCS_8_INDEX, 314 IWX_RATE_MCS_9_INDEX, 315 IWX_RATE_MCS_10_INDEX, 316 IWX_RATE_MCS_11_INDEX, 317 IWX_RATE_MCS_12_INDEX, 318 IWX_RATE_MCS_13_INDEX, 319 IWX_RATE_MCS_14_INDEX, 320 IWX_RATE_MCS_15_INDEX, 321 }; 322 323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t); 324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t); 325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t); 326 #if 0 327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t); 328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t); 329 #endif 330 static int iwx_apply_debug_destination(struct iwx_softc *); 331 static void iwx_set_ltr(struct iwx_softc *); 332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *); 333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *, 334 const struct iwx_fw_sects *); 335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *); 336 static void iwx_ctxt_info_free_paging(struct iwx_softc *); 337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *, 338 struct iwx_context_info_dram *); 339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t); 340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type, 341 const uint8_t *, size_t); 342 static int iwx_set_default_calib(struct iwx_softc *, const void *); 343 static void iwx_fw_info_free(struct iwx_fw_info *); 344 static int iwx_read_firmware(struct iwx_softc *); 345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *); 346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t); 347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t); 348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t); 349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t); 350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t); 351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t); 352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int); 353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int); 354 static int iwx_nic_lock(struct iwx_softc *); 355 static void iwx_nic_assert_locked(struct iwx_softc *); 356 static void iwx_nic_unlock(struct iwx_softc *); 357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t, 358 uint32_t); 359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t); 360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t); 361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int); 362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, 363 bus_size_t, bus_size_t); 364 static void iwx_dma_contig_free(struct iwx_dma_info *); 365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 366 static void iwx_disable_rx_dma(struct iwx_softc *); 367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *); 369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int); 370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *); 371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *); 372 static void iwx_enable_rfkill_int(struct iwx_softc *); 373 static int iwx_check_rfkill(struct iwx_softc *); 374 static void iwx_enable_interrupts(struct iwx_softc *); 375 static void iwx_enable_fwload_interrupt(struct iwx_softc *); 376 #if 0 377 static void iwx_restore_interrupts(struct iwx_softc *); 378 #endif 379 static void iwx_disable_interrupts(struct iwx_softc *); 380 static void iwx_ict_reset(struct iwx_softc *); 381 static int iwx_set_hw_ready(struct iwx_softc *); 382 static int iwx_prepare_card_hw(struct iwx_softc *); 383 static int iwx_force_power_gating(struct iwx_softc *); 384 static void iwx_apm_config(struct iwx_softc *); 385 static int iwx_apm_init(struct iwx_softc *); 386 static void iwx_apm_stop(struct iwx_softc *); 387 static int iwx_allow_mcast(struct iwx_softc *); 388 static void iwx_init_msix_hw(struct iwx_softc *); 389 static void iwx_conf_msix_hw(struct iwx_softc *, int); 390 static int iwx_clear_persistence_bit(struct iwx_softc *); 391 static int iwx_start_hw(struct iwx_softc *); 392 static void iwx_stop_device(struct iwx_softc *); 393 static void iwx_nic_config(struct iwx_softc *); 394 static int iwx_nic_rx_init(struct iwx_softc *); 395 static int iwx_nic_init(struct iwx_softc *); 396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int); 397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t); 398 static void iwx_post_alive(struct iwx_softc *); 399 static int iwx_schedule_session_protection(struct iwx_softc *, 400 struct iwx_node *, uint32_t); 401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *); 402 static void iwx_init_channel_map(struct ieee80211com *, int, int *, 403 struct ieee80211_channel[]); 404 static int iwx_mimo_enabled(struct iwx_softc *); 405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t, 406 uint16_t); 407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *); 408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t, 409 uint16_t, uint16_t, int, int); 410 static void iwx_sta_tx_agg_start(struct iwx_softc *, 411 struct ieee80211_node *, uint8_t); 412 static void iwx_ba_rx_task(void *, int); 413 static void iwx_ba_tx_task(void *, int); 414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *); 415 static int iwx_is_valid_mac_addr(const uint8_t *); 416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *); 417 static int iwx_nvm_get(struct iwx_softc *); 418 static int iwx_load_firmware(struct iwx_softc *); 419 static int iwx_start_fw(struct iwx_softc *); 420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t); 421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t); 422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *); 423 static int iwx_load_pnvm(struct iwx_softc *); 424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t); 425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *); 426 static int iwx_load_ucode_wait_alive(struct iwx_softc *); 427 static int iwx_send_dqa_cmd(struct iwx_softc *); 428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int); 429 static int iwx_config_ltr(struct iwx_softc *); 430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *); 431 static int iwx_rx_addbuf(struct iwx_softc *, int, int); 432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *); 433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *, 434 struct iwx_rx_data *); 435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *); 436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t); 437 #if 0 438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *, 439 struct ieee80211_node *, struct ieee80211_rxinfo *); 440 #endif 441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, 442 int, int, uint32_t, uint8_t); 443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int); 444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *, 445 struct iwx_tx_data *); 446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t); 447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *, 448 struct iwx_rx_data *); 449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *); 450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *, 451 struct iwx_rx_data *); 452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t); 453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *); 454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, 455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int); 456 #if 0 457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *, 458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int); 459 #endif 460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, 461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t); 462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *); 463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t, 464 const void *); 465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *, 466 uint32_t *); 467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t, 468 const void *, uint32_t *); 469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *); 470 static void iwx_cmd_done(struct iwx_softc *, int, int, int); 471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t); 472 static uint32_t iwx_fw_rateidx_cck(uint8_t); 473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, 474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *, 475 struct mbuf *); 476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int, 477 uint16_t, uint16_t); 478 static int iwx_tx(struct iwx_softc *, struct mbuf *, 479 struct ieee80211_node *); 480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t); 481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int); 482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *); 483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *, 484 struct iwx_beacon_filter_cmd *); 485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, 486 int); 487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *, 488 struct iwx_mac_power_cmd *); 489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *); 490 static int iwx_power_update_device(struct iwx_softc *); 491 #if 0 492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *); 493 #endif 494 static int iwx_disable_beacon_filter(struct iwx_softc *); 495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int); 496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *); 497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *); 498 static int iwx_fill_probe_req(struct iwx_softc *, 499 struct iwx_scan_probe_req *); 500 static int iwx_config_umac_scan_reduced(struct iwx_softc *); 501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int); 502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *, 503 struct iwx_scan_general_params_v10 *, int); 504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *, 505 struct iwx_scan_general_params_v10 *, uint16_t, int); 506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *, 507 struct iwx_scan_channel_params_v6 *, uint32_t, int); 508 static int iwx_umac_scan_v14(struct iwx_softc *, int); 509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *); 510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int); 511 static int iwx_rval2ridx(int); 512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, 513 int *); 514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *, 515 struct iwx_mac_ctx_cmd *, uint32_t); 516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *, 517 struct iwx_mac_data_sta *, int); 518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, 519 uint32_t, int); 520 static int iwx_clear_statistics(struct iwx_softc *); 521 static int iwx_scan(struct iwx_softc *); 522 static int iwx_bgscan(struct ieee80211com *); 523 static int iwx_enable_mgmt_queue(struct iwx_softc *); 524 static int iwx_disable_mgmt_queue(struct iwx_softc *); 525 static int iwx_rs_rval2idx(uint8_t); 526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, 527 int); 528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int); 529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *); 530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *); 531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *); 532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *, 533 uint8_t, uint8_t); 534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *, 535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t, 536 uint8_t); 537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *); 538 static int iwx_deauth(struct iwx_softc *); 539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *); 540 static int iwx_run_stop(struct iwx_softc *); 541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *, 542 const uint8_t[IEEE80211_ADDR_LEN]); 543 #if 0 544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *, 545 struct ieee80211_key *); 546 void iwx_setkey_task(void *); 547 void iwx_delete_key(struct ieee80211com *, 548 struct ieee80211_node *, struct ieee80211_key *); 549 #endif 550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int); 551 static void iwx_endscan(struct iwx_softc *); 552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *, 553 struct ieee80211_node *); 554 static int iwx_sf_config(struct iwx_softc *, int); 555 static int iwx_send_bt_init_conf(struct iwx_softc *); 556 static int iwx_send_soc_conf(struct iwx_softc *); 557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *); 558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *); 559 static int iwx_init_hw(struct iwx_softc *); 560 static int iwx_init(struct iwx_softc *); 561 static void iwx_stop(struct iwx_softc *); 562 static void iwx_watchdog(void *); 563 static const char *iwx_desc_lookup(uint32_t); 564 static void iwx_nic_error(struct iwx_softc *); 565 static void iwx_dump_driver_status(struct iwx_softc *); 566 static void iwx_nic_umac_error(struct iwx_softc *); 567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t); 568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *); 569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *, 570 struct mbuf *); 571 static void iwx_notif_intr(struct iwx_softc *); 572 #if 0 573 /* XXX-THJ - I don't have hardware for this */ 574 static int iwx_intr(void *); 575 #endif 576 static void iwx_intr_msix(void *); 577 static int iwx_preinit(struct iwx_softc *); 578 static void iwx_attach_hook(void *); 579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *); 580 static int iwx_probe(device_t); 581 static int iwx_attach(device_t); 582 static int iwx_detach(device_t); 583 584 /* FreeBSD specific glue */ 585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = 586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 587 588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] = 589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 590 591 #if IWX_DEBUG 592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0) 593 #else 594 #define DPRINTF(x) do { ; } while (0) 595 #endif 596 597 /* FreeBSD specific functions */ 598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *, 599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int, 600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]); 601 static void iwx_vap_delete(struct ieee80211vap *); 602 static void iwx_parent(struct ieee80211com *); 603 static void iwx_scan_start(struct ieee80211com *); 604 static void iwx_scan_end(struct ieee80211com *); 605 static void iwx_update_mcast(struct ieee80211com *ic); 606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long); 607 static void iwx_scan_mindwell(struct ieee80211_scan_state *); 608 static void iwx_set_channel(struct ieee80211com *); 609 static void iwx_endscan_cb(void *, int ); 610 static int iwx_wme_update(struct ieee80211com *); 611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *, 612 const struct ieee80211_bpf_params *); 613 static int iwx_transmit(struct ieee80211com *, struct mbuf *); 614 static void iwx_start(struct iwx_softc *); 615 static int iwx_ampdu_rx_start(struct ieee80211_node *, 616 struct ieee80211_rx_ampdu *, int, int, int); 617 static void iwx_ampdu_rx_stop(struct ieee80211_node *, 618 struct ieee80211_rx_ampdu *); 619 static int iwx_addba_request(struct ieee80211_node *, 620 struct ieee80211_tx_ampdu *, int, int, int); 621 static int iwx_addba_response(struct ieee80211_node *, 622 struct ieee80211_tx_ampdu *, int, int, int); 623 static void iwx_key_update_begin(struct ieee80211vap *); 624 static void iwx_key_update_end(struct ieee80211vap *); 625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *, 626 ieee80211_keyix *,ieee80211_keyix *); 627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *); 628 static int iwx_key_delete(struct ieee80211vap *, 629 const struct ieee80211_key *); 630 static int iwx_suspend(device_t); 631 static int iwx_resume(device_t); 632 static void iwx_radiotap_attach(struct iwx_softc *); 633 634 /* OpenBSD compat defines */ 635 #define IEEE80211_HTOP0_SCO_SCN 0 636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0 637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1 638 639 #define IEEE80211_HT_RATESET_SISO 0 640 #define IEEE80211_HT_RATESET_MIMO2 2 641 642 const struct ieee80211_rateset ieee80211_std_rateset_11a = 643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } }; 644 645 const struct ieee80211_rateset ieee80211_std_rateset_11b = 646 { 4, { 2, 4, 11, 22 } }; 647 648 const struct ieee80211_rateset ieee80211_std_rateset_11g = 649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 650 651 inline int 652 ieee80211_has_addr4(const struct ieee80211_frame *wh) 653 { 654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == 655 IEEE80211_FC1_DIR_DSTODS; 656 } 657 658 static uint8_t 659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd) 660 { 661 const struct iwx_fw_cmd_version *entry; 662 int i; 663 664 for (i = 0; i < sc->n_cmd_versions; i++) { 665 entry = &sc->cmd_versions[i]; 666 if (entry->group == grp && entry->cmd == cmd) 667 return entry->cmd_ver; 668 } 669 670 return IWX_FW_CMD_VER_UNKNOWN; 671 } 672 673 uint8_t 674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd) 675 { 676 const struct iwx_fw_cmd_version *entry; 677 int i; 678 679 for (i = 0; i < sc->n_cmd_versions; i++) { 680 entry = &sc->cmd_versions[i]; 681 if (entry->group == grp && entry->cmd == cmd) 682 return entry->notif_ver; 683 } 684 685 return IWX_FW_CMD_VER_UNKNOWN; 686 } 687 688 static int 689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen) 690 { 691 const struct iwx_fw_cscheme_list *l = (const void *)data; 692 693 if (dlen < sizeof(*l) || 694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 695 return EINVAL; 696 697 /* we don't actually store anything for now, always use s/w crypto */ 698 699 return 0; 700 } 701 702 static int 703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc, 704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram) 705 { 706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1); 707 if (err) { 708 printf("%s: could not allocate context info DMA memory\n", 709 DEVNAME(sc)); 710 return err; 711 } 712 713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len); 714 715 return 0; 716 } 717 718 static void 719 iwx_ctxt_info_free_paging(struct iwx_softc *sc) 720 { 721 struct iwx_self_init_dram *dram = &sc->init_dram; 722 int i; 723 724 if (!dram->paging) 725 return; 726 727 /* free paging*/ 728 for (i = 0; i < dram->paging_cnt; i++) 729 iwx_dma_contig_free(&dram->paging[i]); 730 731 free(dram->paging, M_DEVBUF); 732 dram->paging_cnt = 0; 733 dram->paging = NULL; 734 } 735 736 static int 737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start) 738 { 739 int i = 0; 740 741 while (start < fws->fw_count && 742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION && 743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) { 744 start++; 745 i++; 746 } 747 748 return i; 749 } 750 751 static int 752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws, 753 struct iwx_context_info_dram *ctxt_dram) 754 { 755 struct iwx_self_init_dram *dram = &sc->init_dram; 756 int i, ret, fw_cnt = 0; 757 758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec")); 759 760 dram->lmac_cnt = iwx_get_num_sections(fws, 0); 761 /* add 1 due to separator */ 762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1); 763 /* add 2 due to separators */ 764 dram->paging_cnt = iwx_get_num_sections(fws, 765 dram->lmac_cnt + dram->umac_cnt + 2); 766 767 IWX_UNLOCK(sc); 768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt, 769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT); 770 if (!dram->fw) { 771 printf("%s: could not allocate memory for firmware sections\n", 772 DEVNAME(sc)); 773 IWX_LOCK(sc); 774 return ENOMEM; 775 } 776 777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging), 778 M_DEVBUF, M_ZERO | M_WAITOK); 779 IWX_LOCK(sc); 780 if (!dram->paging) { 781 printf("%s: could not allocate memory for firmware paging\n", 782 DEVNAME(sc)); 783 return ENOMEM; 784 } 785 786 /* initialize lmac sections */ 787 for (i = 0; i < dram->lmac_cnt; i++) { 788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i], 789 &dram->fw[fw_cnt]); 790 if (ret) 791 return ret; 792 ctxt_dram->lmac_img[i] = 793 htole64(dram->fw[fw_cnt].paddr); 794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 795 "%s: firmware LMAC section %d at 0x%llx size %lld\n", 796 __func__, i, 797 (unsigned long long)dram->fw[fw_cnt].paddr, 798 (unsigned long long)dram->fw[fw_cnt].size); 799 fw_cnt++; 800 } 801 802 /* initialize umac sections */ 803 for (i = 0; i < dram->umac_cnt; i++) { 804 /* access FW with +1 to make up for lmac separator */ 805 ret = iwx_ctxt_info_alloc_dma(sc, 806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]); 807 if (ret) 808 return ret; 809 ctxt_dram->umac_img[i] = 810 htole64(dram->fw[fw_cnt].paddr); 811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 812 "%s: firmware UMAC section %d at 0x%llx size %lld\n", 813 __func__, i, 814 (unsigned long long)dram->fw[fw_cnt].paddr, 815 (unsigned long long)dram->fw[fw_cnt].size); 816 fw_cnt++; 817 } 818 819 /* 820 * Initialize paging. 821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is 822 * stored separately. 823 * This is since the timing of its release is different - 824 * while fw memory can be released on alive, the paging memory can be 825 * freed only when the device goes down. 826 * Given that, the logic here in accessing the fw image is a bit 827 * different - fw_cnt isn't changing so loop counter is added to it. 828 */ 829 for (i = 0; i < dram->paging_cnt; i++) { 830 /* access FW with +2 to make up for lmac & umac separators */ 831 int fw_idx = fw_cnt + i + 2; 832 833 ret = iwx_ctxt_info_alloc_dma(sc, 834 &fws->fw_sect[fw_idx], &dram->paging[i]); 835 if (ret) 836 return ret; 837 838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr); 839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 840 "%s: firmware paging section %d at 0x%llx size %lld\n", 841 __func__, i, 842 (unsigned long long)dram->paging[i].paddr, 843 (unsigned long long)dram->paging[i].size); 844 } 845 846 return 0; 847 } 848 849 static void 850 iwx_fw_version_str(char *buf, size_t bufsize, 851 uint32_t major, uint32_t minor, uint32_t api) 852 { 853 /* 854 * Starting with major version 35 the Linux driver prints the minor 855 * version in hexadecimal. 856 */ 857 if (major >= 35) 858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api); 859 else 860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api); 861 } 862 #if 0 863 static int 864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power, 865 uint8_t min_power) 866 { 867 struct iwx_dma_info *fw_mon = &sc->fw_mon; 868 uint32_t size = 0; 869 uint8_t power; 870 int err; 871 872 if (fw_mon->size) 873 return 0; 874 875 for (power = max_power; power >= min_power; power--) { 876 size = (1 << power); 877 878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0); 879 if (err) 880 continue; 881 882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 883 "%s: allocated 0x%08x bytes for firmware monitor.\n", 884 DEVNAME(sc), size); 885 break; 886 } 887 888 if (err) { 889 fw_mon->size = 0; 890 return err; 891 } 892 893 if (power != max_power) 894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n", 896 DEVNAME(sc), (unsigned long)(1 << (power - 10)), 897 (unsigned long)(1 << (max_power - 10))); 898 899 return 0; 900 } 901 902 static int 903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power) 904 { 905 if (!max_power) { 906 /* default max_power is maximum */ 907 max_power = 26; 908 } else { 909 max_power += 11; 910 } 911 912 if (max_power > 26) { 913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 914 "%s: External buffer size for monitor is too big %d, " 915 "check the FW TLV\n", DEVNAME(sc), max_power); 916 return 0; 917 } 918 919 if (sc->fw_mon.size) 920 return 0; 921 922 return iwx_alloc_fw_monitor_block(sc, max_power, 11); 923 } 924 #endif 925 926 static int 927 iwx_apply_debug_destination(struct iwx_softc *sc) 928 { 929 #if 0 930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1; 931 int i, err; 932 uint8_t mon_mode, size_power, base_shift, end_shift; 933 uint32_t base_reg, end_reg; 934 935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1; 936 mon_mode = dest_v1->monitor_mode; 937 size_power = dest_v1->size_power; 938 base_reg = le32toh(dest_v1->base_reg); 939 end_reg = le32toh(dest_v1->end_reg); 940 base_shift = dest_v1->base_shift; 941 end_shift = dest_v1->end_shift; 942 943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode)); 944 945 if (mon_mode == EXTERNAL_MODE) { 946 err = iwx_alloc_fw_monitor(sc, size_power); 947 if (err) 948 return err; 949 } 950 951 if (!iwx_nic_lock(sc)) 952 return EBUSY; 953 954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) { 955 uint32_t addr, val; 956 uint8_t op; 957 958 addr = le32toh(dest_v1->reg_ops[i].addr); 959 val = le32toh(dest_v1->reg_ops[i].val); 960 op = dest_v1->reg_ops[i].op; 961 962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val)); 963 switch (op) { 964 case CSR_ASSIGN: 965 IWX_WRITE(sc, addr, val); 966 break; 967 case CSR_SETBIT: 968 IWX_SETBITS(sc, addr, (1 << val)); 969 break; 970 case CSR_CLEARBIT: 971 IWX_CLRBITS(sc, addr, (1 << val)); 972 break; 973 case PRPH_ASSIGN: 974 iwx_write_prph(sc, addr, val); 975 break; 976 case PRPH_SETBIT: 977 err = iwx_set_bits_prph(sc, addr, (1 << val)); 978 if (err) 979 return err; 980 break; 981 case PRPH_CLEARBIT: 982 err = iwx_clear_bits_prph(sc, addr, (1 << val)); 983 if (err) 984 return err; 985 break; 986 case PRPH_BLOCKBIT: 987 if (iwx_read_prph(sc, addr) & (1 << val)) 988 goto monitor; 989 break; 990 default: 991 DPRINTF(("%s: FW debug - unknown OP %d\n", 992 DEVNAME(sc), op)); 993 break; 994 } 995 } 996 997 monitor: 998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) { 999 iwx_write_prph(sc, le32toh(base_reg), 1000 sc->fw_mon.paddr >> base_shift); 1001 iwx_write_prph(sc, end_reg, 1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256) 1003 >> end_shift); 1004 } 1005 1006 iwx_nic_unlock(sc); 1007 return 0; 1008 #else 1009 return 0; 1010 #endif 1011 } 1012 1013 static void 1014 iwx_set_ltr(struct iwx_softc *sc) 1015 { 1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | 1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC << 1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) & 1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) | 1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) & 1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) | 1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ | 1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC << 1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) & 1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) | 1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL); 1027 1028 /* 1029 * To workaround hardware latency issues during the boot process, 1030 * initialize the LTR to ~250 usec (see ltr_val above). 1031 * The firmware initializes this again later (to a smaller value). 1032 */ 1033 if (!sc->sc_integrated) { 1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val); 1035 } else if (sc->sc_integrated && 1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) { 1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR, 1038 IWX_HPM_MAC_LRT_ENABLE_ALL); 1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val); 1040 } 1041 } 1042 1043 int 1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws) 1045 { 1046 struct iwx_context_info *ctxt_info; 1047 struct iwx_context_info_rbd_cfg *rx_cfg; 1048 uint32_t control_flags = 0; 1049 uint64_t paddr; 1050 int err; 1051 1052 ctxt_info = sc->ctxt_info_dma.vaddr; 1053 memset(ctxt_info, 0, sizeof(*ctxt_info)); 1054 1055 ctxt_info->version.version = 0; 1056 ctxt_info->version.mac_id = 1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV)); 1058 /* size is in DWs */ 1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4); 1060 1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF, 1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size")); 1063 1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG | 1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) << 1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) | 1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS); 1068 ctxt_info->control.control_flags = htole32(control_flags); 1069 1070 /* initialize RX default queue */ 1071 rx_cfg = &ctxt_info->rbd_cfg; 1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr); 1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr); 1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr); 1075 1076 /* initialize TX command queue */ 1077 ctxt_info->hcmd_cfg.cmd_queue_addr = 1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr); 1079 ctxt_info->hcmd_cfg.cmd_queue_size = 1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT); 1081 1082 /* allocate ucode sections in dram and set addresses */ 1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram); 1084 if (err) { 1085 iwx_ctxt_info_free_fw_img(sc); 1086 return err; 1087 } 1088 1089 /* Configure debug, if exists */ 1090 if (sc->sc_fw.dbg_dest_tlv_v1) { 1091 #if 1 1092 err = iwx_apply_debug_destination(sc); 1093 if (err) { 1094 iwx_ctxt_info_free_fw_img(sc); 1095 return err; 1096 } 1097 #endif 1098 } 1099 1100 /* 1101 * Write the context info DMA base address. The device expects a 1102 * 64-bit address but a simple bus_space_write_8 to this register 1103 * won't work on some devices, such as the AX201. 1104 */ 1105 paddr = sc->ctxt_info_dma.paddr; 1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff); 1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32); 1108 1109 /* kick FW self load */ 1110 if (!iwx_nic_lock(sc)) { 1111 iwx_ctxt_info_free_fw_img(sc); 1112 return EBUSY; 1113 } 1114 1115 iwx_set_ltr(sc); 1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1); 1117 iwx_nic_unlock(sc); 1118 1119 /* Context info will be released upon alive or failure to get one */ 1120 1121 return 0; 1122 } 1123 1124 static int 1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws) 1126 { 1127 struct iwx_context_info_gen3 *ctxt_info_gen3; 1128 struct iwx_prph_scratch *prph_scratch; 1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl; 1130 uint16_t cb_size; 1131 uint32_t control_flags, scratch_size; 1132 uint64_t paddr; 1133 int err; 1134 1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) { 1136 printf("%s: no image loader found in firmware file\n", 1137 DEVNAME(sc)); 1138 iwx_ctxt_info_free_fw_img(sc); 1139 return EINVAL; 1140 } 1141 1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma, 1143 sc->sc_fw.iml_len, 1); 1144 if (err) { 1145 printf("%s: could not allocate DMA memory for " 1146 "firmware image loader\n", DEVNAME(sc)); 1147 iwx_ctxt_info_free_fw_img(sc); 1148 return ENOMEM; 1149 } 1150 1151 prph_scratch = sc->prph_scratch_dma.vaddr; 1152 memset(prph_scratch, 0, sizeof(*prph_scratch)); 1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg; 1154 prph_sc_ctrl->version.version = 0; 1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV)); 1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4); 1157 1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K | 1159 IWX_PRPH_SCRATCH_MTR_MODE | 1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT); 1161 if (sc->sc_imr_enabled) 1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN; 1163 prph_sc_ctrl->control.control_flags = htole32(control_flags); 1164 1165 /* initialize RX default queue */ 1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr = 1167 htole64(sc->rxq.free_desc_dma.paddr); 1168 1169 /* allocate ucode sections in dram and set addresses */ 1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram); 1171 if (err) { 1172 iwx_dma_contig_free(&sc->iml_dma); 1173 iwx_ctxt_info_free_fw_img(sc); 1174 return err; 1175 } 1176 1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr; 1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3)); 1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr); 1180 ctxt_info_gen3->prph_scratch_base_addr = 1181 htole64(sc->prph_scratch_dma.paddr); 1182 scratch_size = sizeof(*prph_scratch); 1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size); 1184 ctxt_info_gen3->cr_head_idx_arr_base_addr = 1185 htole64(sc->rxq.stat_dma.paddr); 1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr = 1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2); 1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr = 1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4); 1190 ctxt_info_gen3->mtr_base_addr = 1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr); 1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr); 1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT); 1194 ctxt_info_gen3->mtr_size = htole16(cb_size); 1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE); 1196 ctxt_info_gen3->mcr_size = htole16(cb_size); 1197 1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len); 1199 1200 paddr = sc->ctxt_info_dma.paddr; 1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff); 1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32); 1203 1204 paddr = sc->iml_dma.paddr; 1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff); 1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32); 1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len); 1208 1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL, 1210 IWX_CSR_AUTO_FUNC_BOOT_ENA); 1211 1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1213 "%s:%d kicking fw to get going\n", __func__, __LINE__); 1214 1215 /* kick FW self load */ 1216 if (!iwx_nic_lock(sc)) { 1217 iwx_dma_contig_free(&sc->iml_dma); 1218 iwx_ctxt_info_free_fw_img(sc); 1219 return EBUSY; 1220 } 1221 iwx_set_ltr(sc); 1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1); 1223 iwx_nic_unlock(sc); 1224 1225 /* Context info will be released upon alive or failure to get one */ 1226 return 0; 1227 } 1228 1229 static void 1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc) 1231 { 1232 struct iwx_self_init_dram *dram = &sc->init_dram; 1233 int i; 1234 1235 if (!dram->fw) 1236 return; 1237 1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++) 1239 iwx_dma_contig_free(&dram->fw[i]); 1240 1241 free(dram->fw, M_DEVBUF); 1242 dram->lmac_cnt = 0; 1243 dram->umac_cnt = 0; 1244 dram->fw = NULL; 1245 } 1246 1247 static int 1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type, 1249 const uint8_t *data, size_t dlen) 1250 { 1251 struct iwx_fw_sects *fws; 1252 struct iwx_fw_onesect *fwone; 1253 1254 if (type >= IWX_UCODE_TYPE_MAX) 1255 return EINVAL; 1256 if (dlen < sizeof(uint32_t)) 1257 return EINVAL; 1258 1259 fws = &sc->sc_fw.fw_sects[type]; 1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count); 1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX) 1263 return EINVAL; 1264 1265 fwone = &fws->fw_sect[fws->fw_count]; 1266 1267 /* first 32bit are device load offset */ 1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t)); 1269 1270 /* rest is data */ 1271 fwone->fws_data = data + sizeof(uint32_t); 1272 fwone->fws_len = dlen - sizeof(uint32_t); 1273 1274 fws->fw_count++; 1275 fws->fw_totlen += fwone->fws_len; 1276 1277 return 0; 1278 } 1279 1280 #define IWX_DEFAULT_SCAN_CHANNELS 40 1281 /* Newer firmware might support more channels. Raise this value if needed. */ 1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */ 1283 1284 struct iwx_tlv_calib_data { 1285 uint32_t ucode_type; 1286 struct iwx_tlv_calib_ctrl calib; 1287 } __packed; 1288 1289 static int 1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data) 1291 { 1292 const struct iwx_tlv_calib_data *def_calib = data; 1293 uint32_t ucode_type = le32toh(def_calib->ucode_type); 1294 1295 if (ucode_type >= IWX_UCODE_TYPE_MAX) 1296 return EINVAL; 1297 1298 sc->sc_default_calib[ucode_type].flow_trigger = 1299 def_calib->calib.flow_trigger; 1300 sc->sc_default_calib[ucode_type].event_trigger = 1301 def_calib->calib.event_trigger; 1302 1303 return 0; 1304 } 1305 1306 static void 1307 iwx_fw_info_free(struct iwx_fw_info *fw) 1308 { 1309 free(fw->fw_rawdata, M_DEVBUF); 1310 fw->fw_rawdata = NULL; 1311 fw->fw_rawsize = 0; 1312 /* don't touch fw->fw_status */ 1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects)); 1314 free(fw->iml, M_DEVBUF); 1315 fw->iml = NULL; 1316 fw->iml_len = 0; 1317 } 1318 1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000 1320 1321 static int 1322 iwx_read_firmware(struct iwx_softc *sc) 1323 { 1324 struct iwx_fw_info *fw = &sc->sc_fw; 1325 const struct iwx_tlv_ucode_header *uhdr; 1326 struct iwx_ucode_tlv tlv; 1327 uint32_t tlv_type; 1328 const uint8_t *data; 1329 int err = 0; 1330 size_t len; 1331 const struct firmware *fwp; 1332 1333 if (fw->fw_status == IWX_FW_STATUS_DONE) 1334 return 0; 1335 1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS; 1337 fwp = firmware_get(sc->sc_fwname); 1338 sc->sc_fwp = fwp; 1339 1340 if (fwp == NULL) { 1341 printf("%s: could not read firmware %s\n", 1342 DEVNAME(sc), sc->sc_fwname); 1343 err = ENOENT; 1344 goto out; 1345 } 1346 1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n", 1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname); 1349 1350 1351 sc->sc_capaflags = 0; 1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS; 1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa)); 1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api)); 1355 sc->n_cmd_versions = 0; 1356 1357 uhdr = (const void *)(fwp->data); 1358 if (*(const uint32_t *)fwp->data != 0 1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) { 1360 printf("%s: invalid firmware %s\n", 1361 DEVNAME(sc), sc->sc_fwname); 1362 err = EINVAL; 1363 goto out; 1364 } 1365 1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), 1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)), 1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)), 1369 IWX_UCODE_API(le32toh(uhdr->ver))); 1370 1371 data = uhdr->data; 1372 len = fwp->datasize - sizeof(*uhdr); 1373 1374 while (len >= sizeof(tlv)) { 1375 size_t tlv_len; 1376 const void *tlv_data; 1377 1378 memcpy(&tlv, data, sizeof(tlv)); 1379 tlv_len = le32toh(tlv.length); 1380 tlv_type = le32toh(tlv.type); 1381 1382 len -= sizeof(tlv); 1383 data += sizeof(tlv); 1384 tlv_data = data; 1385 1386 if (len < tlv_len) { 1387 printf("%s: firmware too short: %zu bytes\n", 1388 DEVNAME(sc), len); 1389 err = EINVAL; 1390 goto parse_out; 1391 } 1392 1393 switch (tlv_type) { 1394 case IWX_UCODE_TLV_PROBE_MAX_LEN: 1395 if (tlv_len < sizeof(uint32_t)) { 1396 err = EINVAL; 1397 goto parse_out; 1398 } 1399 sc->sc_capa_max_probe_len 1400 = le32toh(*(const uint32_t *)tlv_data); 1401 if (sc->sc_capa_max_probe_len > 1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 1403 err = EINVAL; 1404 goto parse_out; 1405 } 1406 break; 1407 case IWX_UCODE_TLV_PAN: 1408 if (tlv_len) { 1409 err = EINVAL; 1410 goto parse_out; 1411 } 1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN; 1413 break; 1414 case IWX_UCODE_TLV_FLAGS: 1415 if (tlv_len < sizeof(uint32_t)) { 1416 err = EINVAL; 1417 goto parse_out; 1418 } 1419 /* 1420 * Apparently there can be many flags, but Linux driver 1421 * parses only the first one, and so do we. 1422 * 1423 * XXX: why does this override IWX_UCODE_TLV_PAN? 1424 * Intentional or a bug? Observations from 1425 * current firmware file: 1426 * 1) TLV_PAN is parsed first 1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 1428 * ==> this resets TLV_PAN to itself... hnnnk 1429 */ 1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data); 1431 break; 1432 case IWX_UCODE_TLV_CSCHEME: 1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len); 1434 if (err) 1435 goto parse_out; 1436 break; 1437 case IWX_UCODE_TLV_NUM_OF_CPU: { 1438 uint32_t num_cpu; 1439 if (tlv_len != sizeof(uint32_t)) { 1440 err = EINVAL; 1441 goto parse_out; 1442 } 1443 num_cpu = le32toh(*(const uint32_t *)tlv_data); 1444 if (num_cpu < 1 || num_cpu > 2) { 1445 err = EINVAL; 1446 goto parse_out; 1447 } 1448 break; 1449 } 1450 case IWX_UCODE_TLV_SEC_RT: 1451 err = iwx_firmware_store_section(sc, 1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len); 1453 if (err) 1454 goto parse_out; 1455 break; 1456 case IWX_UCODE_TLV_SEC_INIT: 1457 err = iwx_firmware_store_section(sc, 1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len); 1459 if (err) 1460 goto parse_out; 1461 break; 1462 case IWX_UCODE_TLV_SEC_WOWLAN: 1463 err = iwx_firmware_store_section(sc, 1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len); 1465 if (err) 1466 goto parse_out; 1467 break; 1468 case IWX_UCODE_TLV_DEF_CALIB: 1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) { 1470 err = EINVAL; 1471 goto parse_out; 1472 } 1473 err = iwx_set_default_calib(sc, tlv_data); 1474 if (err) 1475 goto parse_out; 1476 break; 1477 case IWX_UCODE_TLV_PHY_SKU: 1478 if (tlv_len != sizeof(uint32_t)) { 1479 err = EINVAL; 1480 goto parse_out; 1481 } 1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data); 1483 break; 1484 1485 case IWX_UCODE_TLV_API_CHANGES_SET: { 1486 const struct iwx_ucode_api *api; 1487 int idx, i; 1488 if (tlv_len != sizeof(*api)) { 1489 err = EINVAL; 1490 goto parse_out; 1491 } 1492 api = (const struct iwx_ucode_api *)tlv_data; 1493 idx = le32toh(api->api_index); 1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) { 1495 err = EINVAL; 1496 goto parse_out; 1497 } 1498 for (i = 0; i < 32; i++) { 1499 if ((le32toh(api->api_flags) & (1 << i)) == 0) 1500 continue; 1501 setbit(sc->sc_ucode_api, i + (32 * idx)); 1502 } 1503 break; 1504 } 1505 1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: { 1507 const struct iwx_ucode_capa *capa; 1508 int idx, i; 1509 if (tlv_len != sizeof(*capa)) { 1510 err = EINVAL; 1511 goto parse_out; 1512 } 1513 capa = (const struct iwx_ucode_capa *)tlv_data; 1514 idx = le32toh(capa->api_index); 1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) { 1516 goto parse_out; 1517 } 1518 for (i = 0; i < 32; i++) { 1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0) 1520 continue; 1521 setbit(sc->sc_enabled_capa, i + (32 * idx)); 1522 } 1523 break; 1524 } 1525 1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR: 1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA: 1528 /* ignore, not used by current driver */ 1529 break; 1530 1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER: 1532 err = iwx_firmware_store_section(sc, 1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data, 1534 tlv_len); 1535 if (err) 1536 goto parse_out; 1537 break; 1538 1539 case IWX_UCODE_TLV_PAGING: 1540 if (tlv_len != sizeof(uint32_t)) { 1541 err = EINVAL; 1542 goto parse_out; 1543 } 1544 break; 1545 1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS: 1547 if (tlv_len != sizeof(uint32_t)) { 1548 err = EINVAL; 1549 goto parse_out; 1550 } 1551 sc->sc_capa_n_scan_channels = 1552 le32toh(*(const uint32_t *)tlv_data); 1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) { 1554 err = ERANGE; 1555 goto parse_out; 1556 } 1557 break; 1558 1559 case IWX_UCODE_TLV_FW_VERSION: 1560 if (tlv_len != sizeof(uint32_t) * 3) { 1561 err = EINVAL; 1562 goto parse_out; 1563 } 1564 1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), 1566 le32toh(((const uint32_t *)tlv_data)[0]), 1567 le32toh(((const uint32_t *)tlv_data)[1]), 1568 le32toh(((const uint32_t *)tlv_data)[2])); 1569 break; 1570 1571 case IWX_UCODE_TLV_FW_DBG_DEST: { 1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; 1573 1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data; 1575 if (*fw->dbg_dest_ver != 0) { 1576 err = EINVAL; 1577 goto parse_out; 1578 } 1579 1580 if (fw->dbg_dest_tlv_init) 1581 break; 1582 fw->dbg_dest_tlv_init = true; 1583 1584 dest_v1 = (const void *)tlv_data; 1585 fw->dbg_dest_tlv_v1 = dest_v1; 1586 fw->n_dest_reg = tlv_len - 1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops); 1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]); 1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1590 "%s: found debug dest; n_dest_reg=%d\n", 1591 __func__, fw->n_dest_reg); 1592 break; 1593 } 1594 1595 case IWX_UCODE_TLV_FW_DBG_CONF: { 1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data; 1597 1598 if (!fw->dbg_dest_tlv_init || 1599 conf->id >= nitems(fw->dbg_conf_tlv) || 1600 fw->dbg_conf_tlv[conf->id] != NULL) 1601 break; 1602 1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 1604 "Found debug configuration: %d\n", conf->id); 1605 fw->dbg_conf_tlv[conf->id] = conf; 1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len; 1607 break; 1608 } 1609 1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: { 1611 const struct iwx_umac_debug_addrs *dbg_ptrs = 1612 (const void *)tlv_data; 1613 1614 if (tlv_len != sizeof(*dbg_ptrs)) { 1615 err = EINVAL; 1616 goto parse_out; 1617 } 1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000) 1619 break; 1620 sc->sc_uc.uc_umac_error_event_table = 1621 le32toh(dbg_ptrs->error_info_addr) & 1622 ~IWX_FW_ADDR_CACHE_CONTROL; 1623 sc->sc_uc.error_event_table_tlv_status |= 1624 IWX_ERROR_EVENT_TABLE_UMAC; 1625 break; 1626 } 1627 1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: { 1629 const struct iwx_lmac_debug_addrs *dbg_ptrs = 1630 (const void *)tlv_data; 1631 1632 if (tlv_len != sizeof(*dbg_ptrs)) { 1633 err = EINVAL; 1634 goto parse_out; 1635 } 1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000) 1637 break; 1638 sc->sc_uc.uc_lmac_error_event_table[0] = 1639 le32toh(dbg_ptrs->error_event_table_ptr) & 1640 ~IWX_FW_ADDR_CACHE_CONTROL; 1641 sc->sc_uc.error_event_table_tlv_status |= 1642 IWX_ERROR_EVENT_TABLE_LMAC1; 1643 break; 1644 } 1645 1646 case IWX_UCODE_TLV_FW_MEM_SEG: 1647 break; 1648 1649 case IWX_UCODE_TLV_IML: 1650 if (sc->sc_fw.iml != NULL) { 1651 free(fw->iml, M_DEVBUF); 1652 fw->iml_len = 0; 1653 } 1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF, 1655 M_WAITOK | M_ZERO); 1656 if (sc->sc_fw.iml == NULL) { 1657 err = ENOMEM; 1658 goto parse_out; 1659 } 1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len); 1661 sc->sc_fw.iml_len = tlv_len; 1662 break; 1663 1664 case IWX_UCODE_TLV_CMD_VERSIONS: 1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) { 1666 tlv_len /= sizeof(struct iwx_fw_cmd_version); 1667 tlv_len *= sizeof(struct iwx_fw_cmd_version); 1668 } 1669 if (sc->n_cmd_versions != 0) { 1670 err = EINVAL; 1671 goto parse_out; 1672 } 1673 if (tlv_len > sizeof(sc->cmd_versions)) { 1674 err = EINVAL; 1675 goto parse_out; 1676 } 1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len); 1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version); 1679 break; 1680 1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO: 1682 break; 1683 1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION: 1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION: 1686 case IWX_UCODE_TLV_FW_NUM_STATIONS: 1687 case IWX_UCODE_TLV_FW_NUM_BEACONS: 1688 break; 1689 1690 /* undocumented TLVs found in iwx-cc-a0-46 image */ 1691 case 58: 1692 case 0x1000003: 1693 case 0x1000004: 1694 break; 1695 1696 /* undocumented TLVs found in iwx-cc-a0-48 image */ 1697 case 0x1000000: 1698 case 0x1000002: 1699 break; 1700 1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO: 1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION: 1703 case IWX_UCODE_TLV_TYPE_HCMD: 1704 case IWX_UCODE_TLV_TYPE_REGIONS: 1705 case IWX_UCODE_TLV_TYPE_TRIGGERS: 1706 case IWX_UCODE_TLV_TYPE_CONF_SET: 1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR: 1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR: 1709 case IWX_UCODE_TLV_CURRENT_PC: 1710 break; 1711 1712 /* undocumented TLV found in iwx-cc-a0-67 image */ 1713 case 0x100000b: 1714 break; 1715 1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */ 1717 case 0x101: 1718 break; 1719 1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */ 1721 case 0x100000c: 1722 break; 1723 1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */ 1725 case 69: 1726 break; 1727 1728 default: 1729 err = EINVAL; 1730 goto parse_out; 1731 } 1732 1733 /* 1734 * Check for size_t overflow and ignore missing padding at 1735 * end of firmware file. 1736 */ 1737 if (roundup(tlv_len, 4) > len) 1738 break; 1739 1740 len -= roundup(tlv_len, 4); 1741 data += roundup(tlv_len, 4); 1742 } 1743 1744 KASSERT(err == 0, ("unhandled fw parse error")); 1745 1746 parse_out: 1747 if (err) { 1748 printf("%s: firmware parse error %d, " 1749 "section type %d\n", DEVNAME(sc), err, tlv_type); 1750 } 1751 1752 out: 1753 if (err) { 1754 fw->fw_status = IWX_FW_STATUS_NONE; 1755 if (fw->fw_rawdata != NULL) 1756 iwx_fw_info_free(fw); 1757 } else 1758 fw->fw_status = IWX_FW_STATUS_DONE; 1759 return err; 1760 } 1761 1762 static uint32_t 1763 iwx_prph_addr_mask(struct iwx_softc *sc) 1764 { 1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 1766 return 0x00ffffff; 1767 else 1768 return 0x000fffff; 1769 } 1770 1771 static uint32_t 1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr) 1773 { 1774 uint32_t mask = iwx_prph_addr_mask(sc); 1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24))); 1776 IWX_BARRIER_READ_WRITE(sc); 1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT); 1778 } 1779 1780 uint32_t 1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr) 1782 { 1783 iwx_nic_assert_locked(sc); 1784 return iwx_read_prph_unlocked(sc, addr); 1785 } 1786 1787 static void 1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1789 { 1790 uint32_t mask = iwx_prph_addr_mask(sc); 1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24))); 1792 IWX_BARRIER_WRITE(sc); 1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val); 1794 } 1795 1796 static void 1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1798 { 1799 iwx_nic_assert_locked(sc); 1800 iwx_write_prph_unlocked(sc, addr, val); 1801 } 1802 1803 static uint32_t 1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr) 1805 { 1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset); 1807 } 1808 1809 static void 1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val) 1811 { 1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val); 1813 } 1814 1815 static int 1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords) 1817 { 1818 int offs, err = 0; 1819 uint32_t *vals = buf; 1820 1821 if (iwx_nic_lock(sc)) { 1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr); 1823 for (offs = 0; offs < dwords; offs++) 1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT)); 1825 iwx_nic_unlock(sc); 1826 } else { 1827 err = EBUSY; 1828 } 1829 return err; 1830 } 1831 1832 static int 1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask, 1834 int timo) 1835 { 1836 for (;;) { 1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) { 1838 return 1; 1839 } 1840 if (timo < 10) { 1841 return 0; 1842 } 1843 timo -= 10; 1844 DELAY(10); 1845 } 1846 } 1847 1848 static int 1849 iwx_nic_lock(struct iwx_softc *sc) 1850 { 1851 if (sc->sc_nic_locks > 0) { 1852 iwx_nic_assert_locked(sc); 1853 sc->sc_nic_locks++; 1854 return 1; /* already locked */ 1855 } 1856 1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1859 1860 DELAY(2); 1861 1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY 1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) { 1866 sc->sc_nic_locks++; 1867 return 1; 1868 } 1869 1870 printf("%s: acquiring device failed\n", DEVNAME(sc)); 1871 return 0; 1872 } 1873 1874 static void 1875 iwx_nic_assert_locked(struct iwx_softc *sc) 1876 { 1877 if (sc->sc_nic_locks <= 0) 1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks); 1879 } 1880 1881 static void 1882 iwx_nic_unlock(struct iwx_softc *sc) 1883 { 1884 if (sc->sc_nic_locks > 0) { 1885 if (--sc->sc_nic_locks == 0) 1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1888 } else 1889 printf("%s: NIC already unlocked\n", DEVNAME(sc)); 1890 } 1891 1892 static int 1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits, 1894 uint32_t mask) 1895 { 1896 uint32_t val; 1897 1898 if (iwx_nic_lock(sc)) { 1899 val = iwx_read_prph(sc, reg) & mask; 1900 val |= bits; 1901 iwx_write_prph(sc, reg, val); 1902 iwx_nic_unlock(sc); 1903 return 0; 1904 } 1905 return EBUSY; 1906 } 1907 1908 static int 1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits) 1910 { 1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0); 1912 } 1913 1914 static int 1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits) 1916 { 1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits); 1918 } 1919 1920 static void 1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1922 { 1923 if (error != 0) 1924 return; 1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1926 *(bus_addr_t *)arg = segs[0].ds_addr; 1927 } 1928 1929 static int 1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma, 1931 bus_size_t size, bus_size_t alignment) 1932 { 1933 int error; 1934 1935 dma->tag = NULL; 1936 dma->map = NULL; 1937 dma->size = size; 1938 dma->vaddr = NULL; 1939 1940 error = bus_dma_tag_create(tag, alignment, 1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1942 1, size, 0, NULL, NULL, &dma->tag); 1943 if (error != 0) 1944 goto fail; 1945 1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1948 if (error != 0) 1949 goto fail; 1950 1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1953 if (error != 0) { 1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1955 dma->vaddr = NULL; 1956 goto fail; 1957 } 1958 1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1960 1961 return 0; 1962 1963 fail: 1964 iwx_dma_contig_free(dma); 1965 return error; 1966 } 1967 1968 static void 1969 iwx_dma_contig_free(struct iwx_dma_info *dma) 1970 { 1971 if (dma->vaddr != NULL) { 1972 bus_dmamap_sync(dma->tag, dma->map, 1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1974 bus_dmamap_unload(dma->tag, dma->map); 1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1976 dma->vaddr = NULL; 1977 } 1978 if (dma->tag != NULL) { 1979 bus_dma_tag_destroy(dma->tag); 1980 dma->tag = NULL; 1981 } 1982 } 1983 1984 static int 1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 1986 { 1987 bus_size_t size; 1988 int i, err; 1989 1990 ring->cur = 0; 1991 1992 /* Allocate RX descriptors (256-byte aligned). */ 1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 1994 size = sizeof(struct iwx_rx_transfer_desc); 1995 else 1996 size = sizeof(uint64_t); 1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, 1998 size * IWX_RX_MQ_RING_COUNT, 256); 1999 if (err) { 2000 device_printf(sc->sc_dev, 2001 "could not allocate RX ring DMA memory\n"); 2002 goto fail; 2003 } 2004 ring->desc = ring->free_desc_dma.vaddr; 2005 2006 /* Allocate RX status area (16-byte aligned). */ 2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2008 size = sizeof(uint16_t); 2009 else 2010 size = sizeof(*ring->stat); 2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16); 2012 if (err) { 2013 device_printf(sc->sc_dev, 2014 "could not allocate RX status DMA memory\n"); 2015 goto fail; 2016 } 2017 ring->stat = ring->stat_dma.vaddr; 2018 2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2020 size = sizeof(struct iwx_rx_completion_desc); 2021 else 2022 size = sizeof(uint32_t); 2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, 2024 size * IWX_RX_MQ_RING_COUNT, 256); 2025 if (err) { 2026 device_printf(sc->sc_dev, 2027 "could not allocate RX ring DMA memory\n"); 2028 goto fail; 2029 } 2030 2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, 2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE, 2033 0, NULL, NULL, &ring->data_dmat); 2034 2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) { 2036 struct iwx_rx_data *data = &ring->data[i]; 2037 2038 memset(data, 0, sizeof(*data)); 2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2040 if (err) { 2041 device_printf(sc->sc_dev, 2042 "could not create RX buf DMA map\n"); 2043 goto fail; 2044 } 2045 2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i); 2047 if (err) 2048 goto fail; 2049 } 2050 return 0; 2051 2052 fail: iwx_free_rx_ring(sc, ring); 2053 return err; 2054 } 2055 2056 static void 2057 iwx_disable_rx_dma(struct iwx_softc *sc) 2058 { 2059 int ntries; 2060 2061 if (iwx_nic_lock(sc)) { 2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0); 2064 else 2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0); 2066 for (ntries = 0; ntries < 1000; ntries++) { 2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2068 if (iwx_read_umac_prph(sc, 2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE) 2070 break; 2071 } else { 2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) & 2073 IWX_RXF_DMA_IDLE) 2074 break; 2075 } 2076 DELAY(10); 2077 } 2078 iwx_nic_unlock(sc); 2079 } 2080 } 2081 2082 static void 2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 2084 { 2085 ring->cur = 0; 2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 2087 BUS_DMASYNC_PREWRITE); 2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2089 uint16_t *status = sc->rxq.stat_dma.vaddr; 2090 *status = 0; 2091 } else 2092 memset(ring->stat, 0, sizeof(*ring->stat)); 2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 2094 BUS_DMASYNC_POSTWRITE); 2095 2096 } 2097 2098 static void 2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring) 2100 { 2101 int i; 2102 2103 iwx_dma_contig_free(&ring->free_desc_dma); 2104 iwx_dma_contig_free(&ring->stat_dma); 2105 iwx_dma_contig_free(&ring->used_desc_dma); 2106 2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) { 2108 struct iwx_rx_data *data = &ring->data[i]; 2109 if (data->m != NULL) { 2110 bus_dmamap_sync(ring->data_dmat, data->map, 2111 BUS_DMASYNC_POSTREAD); 2112 bus_dmamap_unload(ring->data_dmat, data->map); 2113 m_freem(data->m); 2114 data->m = NULL; 2115 } 2116 if (data->map != NULL) { 2117 bus_dmamap_destroy(ring->data_dmat, data->map); 2118 data->map = NULL; 2119 } 2120 } 2121 if (ring->data_dmat != NULL) { 2122 bus_dma_tag_destroy(ring->data_dmat); 2123 ring->data_dmat = NULL; 2124 } 2125 } 2126 2127 static int 2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid) 2129 { 2130 bus_addr_t paddr; 2131 bus_size_t size; 2132 int i, err; 2133 size_t bc_tbl_size; 2134 bus_size_t bc_align; 2135 size_t mapsize; 2136 2137 ring->qid = qid; 2138 ring->queued = 0; 2139 ring->cur = 0; 2140 ring->cur_hw = 0; 2141 ring->tail = 0; 2142 ring->tail_hw = 0; 2143 2144 /* Allocate TX descriptors (256-byte aligned). */ 2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd); 2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 2147 if (err) { 2148 device_printf(sc->sc_dev, 2149 "could not allocate TX ring DMA memory\n"); 2150 goto fail; 2151 } 2152 ring->desc = ring->desc_dma.vaddr; 2153 2154 /* 2155 * The hardware supports up to 512 Tx rings which is more 2156 * than we currently need. 2157 * 2158 * In DQA mode we use 1 command queue + 1 default queue for 2159 * management, control, and non-QoS data frames. 2160 * The command is queue sc->txq[0], our default queue is sc->txq[1]. 2161 * 2162 * Tx aggregation requires additional queues, one queue per TID for 2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9]. 2164 * Firmware may assign its own internal IDs for these queues 2165 * depending on which TID gets aggregation enabled first. 2166 * The driver maintains a table mapping driver-side queue IDs 2167 * to firmware-side queue IDs. 2168 */ 2169 2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) * 2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210; 2173 bc_align = 128; 2174 } else { 2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl); 2176 bc_align = 64; 2177 } 2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size, 2179 bc_align); 2180 if (err) { 2181 device_printf(sc->sc_dev, 2182 "could not allocate byte count table DMA memory\n"); 2183 goto fail; 2184 } 2185 2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd); 2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 2188 IWX_FIRST_TB_SIZE_ALIGN); 2189 if (err) { 2190 device_printf(sc->sc_dev, 2191 "could not allocate cmd DMA memory\n"); 2192 goto fail; 2193 } 2194 ring->cmd = ring->cmd_dma.vaddr; 2195 2196 /* FW commands may require more mapped space than packets. */ 2197 if (qid == IWX_DQA_CMD_QUEUE) 2198 mapsize = (sizeof(struct iwx_cmd_header) + 2199 IWX_MAX_CMD_PAYLOAD_SIZE); 2200 else 2201 mapsize = MCLBYTES; 2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, 2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2, 2204 mapsize, 0, NULL, NULL, &ring->data_dmat); 2205 2206 paddr = ring->cmd_dma.paddr; 2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2208 struct iwx_tx_data *data = &ring->data[i]; 2209 2210 data->cmd_paddr = paddr; 2211 paddr += sizeof(struct iwx_device_cmd); 2212 2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2214 if (err) { 2215 device_printf(sc->sc_dev, 2216 "could not create TX buf DMA map\n"); 2217 goto fail; 2218 } 2219 } 2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc")); 2221 return 0; 2222 2223 fail: 2224 return err; 2225 } 2226 2227 static void 2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring) 2229 { 2230 int i; 2231 2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2233 struct iwx_tx_data *data = &ring->data[i]; 2234 2235 if (data->m != NULL) { 2236 bus_dmamap_sync(ring->data_dmat, data->map, 2237 BUS_DMASYNC_POSTWRITE); 2238 bus_dmamap_unload(ring->data_dmat, data->map); 2239 m_freem(data->m); 2240 data->m = NULL; 2241 } 2242 } 2243 2244 /* Clear byte count table. */ 2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size); 2246 2247 /* Clear TX descriptors. */ 2248 memset(ring->desc, 0, ring->desc_dma.size); 2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2250 BUS_DMASYNC_PREWRITE); 2251 sc->qfullmsk &= ~(1 << ring->qid); 2252 sc->qenablemsk &= ~(1 << ring->qid); 2253 for (i = 0; i < nitems(sc->aggqid); i++) { 2254 if (sc->aggqid[i] == ring->qid) { 2255 sc->aggqid[i] = 0; 2256 break; 2257 } 2258 } 2259 ring->queued = 0; 2260 ring->cur = 0; 2261 ring->cur_hw = 0; 2262 ring->tail = 0; 2263 ring->tail_hw = 0; 2264 ring->tid = 0; 2265 } 2266 2267 static void 2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring) 2269 { 2270 int i; 2271 2272 iwx_dma_contig_free(&ring->desc_dma); 2273 iwx_dma_contig_free(&ring->cmd_dma); 2274 iwx_dma_contig_free(&ring->bc_tbl); 2275 2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) { 2277 struct iwx_tx_data *data = &ring->data[i]; 2278 2279 if (data->m != NULL) { 2280 bus_dmamap_sync(ring->data_dmat, data->map, 2281 BUS_DMASYNC_POSTWRITE); 2282 bus_dmamap_unload(ring->data_dmat, data->map); 2283 m_freem(data->m); 2284 data->m = NULL; 2285 } 2286 if (data->map != NULL) { 2287 bus_dmamap_destroy(ring->data_dmat, data->map); 2288 data->map = NULL; 2289 } 2290 } 2291 if (ring->data_dmat != NULL) { 2292 bus_dma_tag_destroy(ring->data_dmat); 2293 ring->data_dmat = NULL; 2294 } 2295 } 2296 2297 static void 2298 iwx_enable_rfkill_int(struct iwx_softc *sc) 2299 { 2300 if (!sc->sc_msix) { 2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL; 2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2303 } else { 2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2305 sc->sc_fh_init_mask); 2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL); 2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL; 2309 } 2310 2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 2313 } 2314 2315 static int 2316 iwx_check_rfkill(struct iwx_softc *sc) 2317 { 2318 uint32_t v; 2319 int rv; 2320 2321 /* 2322 * "documentation" is not really helpful here: 2323 * 27: HW_RF_KILL_SW 2324 * Indicates state of (platform's) hardware RF-Kill switch 2325 * 2326 * But apparently when it's off, it's on ... 2327 */ 2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL); 2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0; 2330 if (rv) { 2331 sc->sc_flags |= IWX_FLAG_RFKILL; 2332 } else { 2333 sc->sc_flags &= ~IWX_FLAG_RFKILL; 2334 } 2335 2336 return rv; 2337 } 2338 2339 static void 2340 iwx_enable_interrupts(struct iwx_softc *sc) 2341 { 2342 if (!sc->sc_msix) { 2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK; 2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2345 } else { 2346 /* 2347 * fh/hw_mask keeps all the unmasked causes. 2348 * Unlike msi, in msix cause is enabled when it is unset. 2349 */ 2350 sc->sc_hw_mask = sc->sc_hw_init_mask; 2351 sc->sc_fh_mask = sc->sc_fh_init_mask; 2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2353 ~sc->sc_fh_mask); 2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2355 ~sc->sc_hw_mask); 2356 } 2357 } 2358 2359 static void 2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc) 2361 { 2362 if (!sc->sc_msix) { 2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX; 2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2365 } else { 2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE); 2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE; 2369 /* 2370 * Leave all the FH causes enabled to get the ALIVE 2371 * notification. 2372 */ 2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2374 ~sc->sc_fh_init_mask); 2375 sc->sc_fh_mask = sc->sc_fh_init_mask; 2376 } 2377 } 2378 2379 #if 0 2380 static void 2381 iwx_restore_interrupts(struct iwx_softc *sc) 2382 { 2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask); 2384 } 2385 #endif 2386 2387 static void 2388 iwx_disable_interrupts(struct iwx_softc *sc) 2389 { 2390 if (!sc->sc_msix) { 2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 2392 2393 /* acknowledge all interrupts */ 2394 IWX_WRITE(sc, IWX_CSR_INT, ~0); 2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0); 2396 } else { 2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2398 sc->sc_fh_init_mask); 2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2400 sc->sc_hw_init_mask); 2401 } 2402 } 2403 2404 static void 2405 iwx_ict_reset(struct iwx_softc *sc) 2406 { 2407 iwx_disable_interrupts(sc); 2408 2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE); 2410 sc->ict_cur = 0; 2411 2412 /* Set physical address of ICT (4KB aligned). */ 2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG, 2414 IWX_CSR_DRAM_INT_TBL_ENABLE 2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK 2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER 2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT); 2418 2419 /* Switch to ICT interrupt mode in driver. */ 2420 sc->sc_flags |= IWX_FLAG_USE_ICT; 2421 2422 IWX_WRITE(sc, IWX_CSR_INT, ~0); 2423 iwx_enable_interrupts(sc); 2424 } 2425 2426 #define IWX_HW_READY_TIMEOUT 50 2427 static int 2428 iwx_set_hw_ready(struct iwx_softc *sc) 2429 { 2430 int ready; 2431 2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 2434 2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG, 2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2438 IWX_HW_READY_TIMEOUT); 2439 if (ready) 2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG, 2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE); 2442 2443 DPRINTF(("%s: ready=%d\n", __func__, ready)); 2444 return ready; 2445 } 2446 #undef IWX_HW_READY_TIMEOUT 2447 2448 static int 2449 iwx_prepare_card_hw(struct iwx_softc *sc) 2450 { 2451 int t = 0; 2452 int ntries; 2453 2454 if (iwx_set_hw_ready(sc)) 2455 return 0; 2456 2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2459 DELAY(1000); 2460 2461 for (ntries = 0; ntries < 10; ntries++) { 2462 /* If HW is not ready, prepare the conditions to check again */ 2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE); 2465 2466 do { 2467 if (iwx_set_hw_ready(sc)) 2468 return 0; 2469 DELAY(200); 2470 t += 200; 2471 } while (t < 150000); 2472 DELAY(25000); 2473 } 2474 2475 return ETIMEDOUT; 2476 } 2477 2478 static int 2479 iwx_force_power_gating(struct iwx_softc *sc) 2480 { 2481 int err; 2482 2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 2485 if (err) 2486 return err; 2487 DELAY(20); 2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN | 2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN); 2491 if (err) 2492 return err; 2493 DELAY(20); 2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG, 2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 2496 return err; 2497 } 2498 2499 static void 2500 iwx_apm_config(struct iwx_softc *sc) 2501 { 2502 uint16_t lctl, cap; 2503 int pcie_ptr; 2504 int error; 2505 2506 /* 2507 * L0S states have been found to be unstable with our devices 2508 * and in newer hardware they are not officially supported at 2509 * all, so we must always set the L0S_DISABLED bit. 2510 */ 2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED); 2512 2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr); 2514 if (error != 0) { 2515 printf("can't fill pcie_ptr\n"); 2516 return; 2517 } 2518 2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL, 2520 sizeof(lctl)); 2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001 2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S); 2523 #define PCI_PCIE_DCSR2 0x28 2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2, 2525 sizeof(lctl)); 2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400 2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0; 2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002 2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n", 2530 DEVNAME(sc), 2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis", 2532 sc->sc_ltr_enabled ? "En" : "Dis")); 2533 #undef PCI_PCIE_LCSR_ASPM_L0S 2534 #undef PCI_PCIE_DCSR2 2535 #undef PCI_PCIE_DCSR2_LTREN 2536 #undef PCI_PCIE_LCSR_ASPM_L1 2537 } 2538 2539 /* 2540 * Start up NIC's basic functionality after it has been reset 2541 * e.g. after platform boot or shutdown. 2542 * NOTE: This does not load uCode nor start the embedded processor 2543 */ 2544 static int 2545 iwx_apm_init(struct iwx_softc *sc) 2546 { 2547 int err = 0; 2548 2549 /* 2550 * Disable L0s without affecting L1; 2551 * don't wait for ICH L0s (ICH bug W/A) 2552 */ 2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS, 2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 2555 2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL); 2558 2559 /* 2560 * Enable HAP INTA (interrupt from management bus) to 2561 * wake device's PCI Express link L1a -> L0s 2562 */ 2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 2565 2566 iwx_apm_config(sc); 2567 2568 /* 2569 * Set "initialization complete" bit to move adapter from 2570 * D0U* --> D0A* (powered-up active) state. 2571 */ 2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2573 2574 /* 2575 * Wait for clock stabilization; once stabilized, access to 2576 * device-internal resources is supported, e.g. iwx_write_prph() 2577 * and accesses to uCode SRAM. 2578 */ 2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) { 2582 printf("%s: timeout waiting for clock stabilization\n", 2583 DEVNAME(sc)); 2584 err = ETIMEDOUT; 2585 goto out; 2586 } 2587 out: 2588 if (err) 2589 printf("%s: apm init error %d\n", DEVNAME(sc), err); 2590 return err; 2591 } 2592 2593 static void 2594 iwx_apm_stop(struct iwx_softc *sc) 2595 { 2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG, 2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE | 2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME); 2601 DELAY(1000); 2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG, 2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED); 2604 DELAY(5000); 2605 2606 /* stop device's busmaster DMA activity */ 2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER); 2608 2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET, 2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100)) 2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc)); 2613 2614 /* 2615 * Clear "initialization complete" bit to move adapter from 2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 2617 */ 2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2620 } 2621 2622 static void 2623 iwx_init_msix_hw(struct iwx_softc *sc) 2624 { 2625 iwx_conf_msix_hw(sc, 0); 2626 2627 if (!sc->sc_msix) 2628 return; 2629 2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD); 2631 sc->sc_fh_mask = sc->sc_fh_init_mask; 2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD); 2633 sc->sc_hw_mask = sc->sc_hw_init_mask; 2634 } 2635 2636 static void 2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped) 2638 { 2639 int vector = 0; 2640 2641 if (!sc->sc_msix) { 2642 /* Newer chips default to MSIX. */ 2643 if (!stopped && iwx_nic_lock(sc)) { 2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK, 2645 IWX_UREG_CHICK_MSI_ENABLE); 2646 iwx_nic_unlock(sc); 2647 } 2648 return; 2649 } 2650 2651 if (!stopped && iwx_nic_lock(sc)) { 2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK, 2653 IWX_UREG_CHICK_MSIX_ENABLE); 2654 iwx_nic_unlock(sc); 2655 } 2656 2657 /* Disable all interrupts */ 2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0); 2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0); 2660 2661 /* Map fallback-queue (command/mgmt) to a single vector */ 2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0), 2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2664 /* Map RSS queue (data) to the same vector */ 2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1), 2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2667 2668 /* Enable the RX queues cause interrupts */ 2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1); 2671 2672 /* Map non-RX causes to the same vector */ 2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM), 2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM), 2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D), 2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR), 2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE), 2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP), 2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE), 2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL), 2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL), 2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC), 2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR), 2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD), 2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX), 2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR), 2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP), 2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE); 2703 2704 /* Enable non-RX causes interrupts */ 2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, 2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | 2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | 2708 IWX_MSIX_FH_INT_CAUSES_S2D | 2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR); 2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, 2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE | 2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | 2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE | 2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | 2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL | 2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | 2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR | 2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD | 2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX | 2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | 2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP); 2722 } 2723 2724 static int 2725 iwx_clear_persistence_bit(struct iwx_softc *sc) 2726 { 2727 uint32_t hpm, wprot; 2728 2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG); 2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) { 2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000); 2732 if (wprot & IWX_PREG_WFPM_ACCESS) { 2733 printf("%s: cannot clear persistence bit\n", 2734 DEVNAME(sc)); 2735 return EPERM; 2736 } 2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG, 2738 hpm & ~IWX_PERSISTENCE_BIT); 2739 } 2740 2741 return 0; 2742 } 2743 2744 static int 2745 iwx_start_hw(struct iwx_softc *sc) 2746 { 2747 int err; 2748 2749 err = iwx_prepare_card_hw(sc); 2750 if (err) 2751 return err; 2752 2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) { 2754 err = iwx_clear_persistence_bit(sc); 2755 if (err) 2756 return err; 2757 } 2758 2759 /* Reset the entire device */ 2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2761 DELAY(5000); 2762 2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 && 2764 sc->sc_integrated) { 2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, 2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2767 DELAY(20); 2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL, 2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) { 2771 printf("%s: timeout waiting for clock stabilization\n", 2772 DEVNAME(sc)); 2773 return ETIMEDOUT; 2774 } 2775 2776 err = iwx_force_power_gating(sc); 2777 if (err) 2778 return err; 2779 2780 /* Reset the entire device */ 2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2782 DELAY(5000); 2783 } 2784 2785 err = iwx_apm_init(sc); 2786 if (err) 2787 return err; 2788 2789 iwx_init_msix_hw(sc); 2790 2791 iwx_enable_rfkill_int(sc); 2792 iwx_check_rfkill(sc); 2793 2794 return 0; 2795 } 2796 2797 static void 2798 iwx_stop_device(struct iwx_softc *sc) 2799 { 2800 int i; 2801 2802 iwx_disable_interrupts(sc); 2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT; 2804 2805 iwx_disable_rx_dma(sc); 2806 iwx_reset_rx_ring(sc, &sc->rxq); 2807 for (i = 0; i < nitems(sc->txq); i++) 2808 iwx_reset_tx_ring(sc, &sc->txq[i]); 2809 #if 0 2810 /* XXX-THJ: Tidy up BA state on stop */ 2811 for (i = 0; i < IEEE80211_NUM_TID; i++) { 2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i]; 2813 if (ba->ba_state != IEEE80211_BA_AGREED) 2814 continue; 2815 ieee80211_delba_request(ic, ni, 0, 1, i); 2816 } 2817 #endif 2818 /* Make sure (redundant) we've released our request to stay awake */ 2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL, 2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2821 if (sc->sc_nic_locks > 0) 2822 printf("%s: %d active NIC locks forcefully cleared\n", 2823 DEVNAME(sc), sc->sc_nic_locks); 2824 sc->sc_nic_locks = 0; 2825 2826 /* Stop the device, and put it in low power state */ 2827 iwx_apm_stop(sc); 2828 2829 /* Reset the on-board processor. */ 2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET); 2831 DELAY(5000); 2832 2833 /* 2834 * Upon stop, the IVAR table gets erased, so msi-x won't 2835 * work. This causes a bug in RF-KILL flows, since the interrupt 2836 * that enables radio won't fire on the correct irq, and the 2837 * driver won't be able to handle the interrupt. 2838 * Configure the IVAR table again after reset. 2839 */ 2840 iwx_conf_msix_hw(sc, 1); 2841 2842 /* 2843 * Upon stop, the APM issues an interrupt if HW RF kill is set. 2844 * Clear the interrupt again. 2845 */ 2846 iwx_disable_interrupts(sc); 2847 2848 /* Even though we stop the HW we still want the RF kill interrupt. */ 2849 iwx_enable_rfkill_int(sc); 2850 iwx_check_rfkill(sc); 2851 2852 iwx_prepare_card_hw(sc); 2853 2854 iwx_ctxt_info_free_paging(sc); 2855 iwx_dma_contig_free(&sc->pnvm_dma); 2856 } 2857 2858 static void 2859 iwx_nic_config(struct iwx_softc *sc) 2860 { 2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 2862 uint32_t mask, val, reg_val = 0; 2863 2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >> 2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS; 2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >> 2867 IWX_FW_PHY_CFG_RADIO_STEP_POS; 2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >> 2869 IWX_FW_PHY_CFG_RADIO_DASH_POS; 2870 2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) << 2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) << 2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 2875 2876 /* radio configuration */ 2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 2880 2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | 2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | 2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | 2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | 2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI; 2888 2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG); 2890 val &= ~mask; 2891 val |= reg_val; 2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val); 2893 } 2894 2895 static int 2896 iwx_nic_rx_init(struct iwx_softc *sc) 2897 { 2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF); 2899 2900 /* 2901 * We don't configure the RFH; the firmware will do that. 2902 * Rx descriptors are set when firmware sends an ALIVE interrupt. 2903 */ 2904 return 0; 2905 } 2906 2907 static int 2908 iwx_nic_init(struct iwx_softc *sc) 2909 { 2910 int err; 2911 2912 iwx_apm_init(sc); 2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210) 2914 iwx_nic_config(sc); 2915 2916 err = iwx_nic_rx_init(sc); 2917 if (err) 2918 return err; 2919 2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 2921 2922 return 0; 2923 } 2924 2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */ 2926 const uint8_t iwx_ac_to_tx_fifo[] = { 2927 IWX_GEN2_EDCA_TX_FIFO_BE, 2928 IWX_GEN2_EDCA_TX_FIFO_BK, 2929 IWX_GEN2_EDCA_TX_FIFO_VI, 2930 IWX_GEN2_EDCA_TX_FIFO_VO, 2931 }; 2932 2933 static int 2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid, 2935 int num_slots) 2936 { 2937 struct iwx_rx_packet *pkt; 2938 struct iwx_tx_queue_cfg_rsp *resp; 2939 struct iwx_tx_queue_cfg_cmd cmd_v0; 2940 struct iwx_scd_queue_cfg_cmd cmd_v3; 2941 struct iwx_host_cmd hcmd = { 2942 .flags = IWX_CMD_WANT_RESP, 2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 2944 }; 2945 struct iwx_tx_ring *ring = &sc->txq[qid]; 2946 int err, fwqid, cmd_ver; 2947 uint32_t wr_idx; 2948 size_t resp_len; 2949 2950 DPRINTF(("%s: tid=%i\n", __func__, tid)); 2951 DPRINTF(("%s: qid=%i\n", __func__, qid)); 2952 iwx_reset_tx_ring(sc, ring); 2953 2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 2955 IWX_SCD_QUEUE_CONFIG_CMD); 2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) { 2957 memset(&cmd_v0, 0, sizeof(cmd_v0)); 2958 cmd_v0.sta_id = sta_id; 2959 cmd_v0.tid = tid; 2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE); 2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots)); 2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr); 2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr); 2964 hcmd.id = IWX_SCD_QUEUE_CFG; 2965 hcmd.data[0] = &cmd_v0; 2966 hcmd.len[0] = sizeof(cmd_v0); 2967 } else if (cmd_ver == 3) { 2968 memset(&cmd_v3, 0, sizeof(cmd_v3)); 2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD); 2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr); 2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr); 2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots)); 2973 cmd_v3.u.add.flags = htole32(0); 2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id); 2975 cmd_v3.u.add.tid = tid; 2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 2977 IWX_SCD_QUEUE_CONFIG_CMD); 2978 hcmd.data[0] = &cmd_v3; 2979 hcmd.len[0] = sizeof(cmd_v3); 2980 } else { 2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n", 2982 DEVNAME(sc), cmd_ver); 2983 return ENOTSUP; 2984 } 2985 2986 err = iwx_send_cmd(sc, &hcmd); 2987 if (err) 2988 return err; 2989 2990 pkt = hcmd.resp_pkt; 2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 2992 err = EIO; 2993 goto out; 2994 } 2995 2996 resp_len = iwx_rx_packet_payload_len(pkt); 2997 if (resp_len != sizeof(*resp)) { 2998 err = EIO; 2999 goto out; 3000 } 3001 3002 resp = (void *)pkt->data; 3003 fwqid = le16toh(resp->queue_number); 3004 wr_idx = le16toh(resp->write_pointer); 3005 3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */ 3007 if (fwqid != qid) { 3008 DPRINTF(("%s: === fwqid != qid\n", __func__)); 3009 err = EIO; 3010 goto out; 3011 } 3012 3013 if (wr_idx != ring->cur_hw) { 3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__)); 3015 err = EIO; 3016 goto out; 3017 } 3018 3019 sc->qenablemsk |= (1 << qid); 3020 ring->tid = tid; 3021 out: 3022 iwx_free_resp(sc, &hcmd); 3023 return err; 3024 } 3025 3026 static int 3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid) 3028 { 3029 struct iwx_rx_packet *pkt; 3030 struct iwx_tx_queue_cfg_rsp *resp; 3031 struct iwx_tx_queue_cfg_cmd cmd_v0; 3032 struct iwx_scd_queue_cfg_cmd cmd_v3; 3033 struct iwx_host_cmd hcmd = { 3034 .flags = IWX_CMD_WANT_RESP, 3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 3036 }; 3037 struct iwx_tx_ring *ring = &sc->txq[qid]; 3038 int err, cmd_ver; 3039 3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 3041 IWX_SCD_QUEUE_CONFIG_CMD); 3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) { 3043 memset(&cmd_v0, 0, sizeof(cmd_v0)); 3044 cmd_v0.sta_id = sta_id; 3045 cmd_v0.tid = tid; 3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */ 3047 cmd_v0.cb_size = htole32(0); 3048 cmd_v0.byte_cnt_addr = htole64(0); 3049 cmd_v0.tfdq_addr = htole64(0); 3050 hcmd.id = IWX_SCD_QUEUE_CFG; 3051 hcmd.data[0] = &cmd_v0; 3052 hcmd.len[0] = sizeof(cmd_v0); 3053 } else if (cmd_ver == 3) { 3054 memset(&cmd_v3, 0, sizeof(cmd_v3)); 3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE); 3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id); 3057 cmd_v3.u.remove.tid = tid; 3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 3059 IWX_SCD_QUEUE_CONFIG_CMD); 3060 hcmd.data[0] = &cmd_v3; 3061 hcmd.len[0] = sizeof(cmd_v3); 3062 } else { 3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n", 3064 DEVNAME(sc), cmd_ver); 3065 return ENOTSUP; 3066 } 3067 3068 err = iwx_send_cmd(sc, &hcmd); 3069 if (err) 3070 return err; 3071 3072 pkt = hcmd.resp_pkt; 3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 3074 err = EIO; 3075 goto out; 3076 } 3077 3078 sc->qenablemsk &= ~(1 << qid); 3079 iwx_reset_tx_ring(sc, ring); 3080 out: 3081 iwx_free_resp(sc, &hcmd); 3082 return err; 3083 } 3084 3085 static void 3086 iwx_post_alive(struct iwx_softc *sc) 3087 { 3088 int txcmd_ver; 3089 3090 iwx_ict_reset(sc); 3091 3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ; 3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6) 3094 sc->sc_rate_n_flags_version = 2; 3095 else 3096 sc->sc_rate_n_flags_version = 1; 3097 3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD); 3099 } 3100 3101 static int 3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in, 3103 uint32_t duration_tu) 3104 { 3105 3106 struct iwx_session_prot_cmd cmd = { 3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 3108 in->in_color)), 3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD), 3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC), 3111 .duration_tu = htole32(duration_tu), 3112 }; 3113 uint32_t cmd_id; 3114 int err; 3115 3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0); 3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 3118 if (!err) 3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE; 3120 return err; 3121 } 3122 3123 static void 3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in) 3125 { 3126 struct iwx_session_prot_cmd cmd = { 3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 3128 in->in_color)), 3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE), 3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC), 3131 .duration_tu = 0, 3132 }; 3133 uint32_t cmd_id; 3134 3135 /* Do nothing if the time event has already ended. */ 3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0) 3137 return; 3138 3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0); 3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0) 3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 3142 } 3143 3144 /* 3145 * NVM read access and content parsing. We do not support 3146 * external NVM or writing NVM. 3147 */ 3148 3149 static uint8_t 3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc) 3151 { 3152 uint8_t tx_ant; 3153 3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN) 3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS); 3156 3157 if (sc->sc_nvm.valid_tx_ant) 3158 tx_ant &= sc->sc_nvm.valid_tx_ant; 3159 3160 return tx_ant; 3161 } 3162 3163 static uint8_t 3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc) 3165 { 3166 uint8_t rx_ant; 3167 3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN) 3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS); 3170 3171 if (sc->sc_nvm.valid_rx_ant) 3172 rx_ant &= sc->sc_nvm.valid_rx_ant; 3173 3174 return rx_ant; 3175 } 3176 3177 static void 3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 3179 struct ieee80211_channel chans[]) 3180 { 3181 struct iwx_softc *sc = ic->ic_softc; 3182 struct iwx_nvm_data *data = &sc->sc_nvm; 3183 uint8_t bands[IEEE80211_MODE_BYTES]; 3184 const uint8_t *nvm_channels; 3185 uint32_t ch_flags; 3186 int ch_idx, nchan; 3187 3188 if (sc->sc_uhb_supported) { 3189 nchan = nitems(iwx_nvm_channels_uhb); 3190 nvm_channels = iwx_nvm_channels_uhb; 3191 } else { 3192 nchan = nitems(iwx_nvm_channels_8000); 3193 nvm_channels = iwx_nvm_channels_8000; 3194 } 3195 3196 /* 2.4Ghz; 1-13: 11b/g channels. */ 3197 if (!data->sku_cap_band_24GHz_enable) 3198 goto band_5; 3199 3200 memset(bands, 0, sizeof(bands)); 3201 setbit(bands, IEEE80211_MODE_11B); 3202 setbit(bands, IEEE80211_MODE_11G); 3203 setbit(bands, IEEE80211_MODE_11NG); 3204 for (ch_idx = 0; 3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan; 3206 ch_idx++) { 3207 3208 uint32_t nflags = 0; 3209 int cflags = 0; 3210 3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) { 3212 ch_flags = le32_to_cpup( 3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx); 3214 } else { 3215 ch_flags = le16_to_cpup( 3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx); 3217 } 3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0) 3219 continue; 3220 3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0) 3222 cflags |= NET80211_CBW_FLAG_HT40; 3223 3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */ 3225 3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans, 3227 nvm_channels[ch_idx], 3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B), 3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22, 3230 nflags, bands, cflags); 3231 } 3232 3233 band_5: 3234 /* 5Ghz */ 3235 if (!data->sku_cap_band_52GHz_enable) 3236 goto band_6; 3237 3238 3239 memset(bands, 0, sizeof(bands)); 3240 setbit(bands, IEEE80211_MODE_11A); 3241 setbit(bands, IEEE80211_MODE_11NA); 3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ); 3243 3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS; 3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan; 3246 ch_idx++) { 3247 uint32_t nflags = 0; 3248 int cflags = 0; 3249 3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) 3251 ch_flags = le32_to_cpup( 3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx); 3253 else 3254 ch_flags = le16_to_cpup( 3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx); 3256 3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0) 3258 continue; 3259 3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0) 3261 cflags |= NET80211_CBW_FLAG_HT40; 3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0) 3263 cflags |= NET80211_CBW_FLAG_VHT80; 3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0) 3265 cflags |= NET80211_CBW_FLAG_VHT160; 3266 3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */ 3268 3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans, 3270 nvm_channels[ch_idx], 3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A), 3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22, 3273 nflags, bands, cflags); 3274 } 3275 band_6: 3276 /* 6GHz one day ... */ 3277 return; 3278 } 3279 3280 static int 3281 iwx_mimo_enabled(struct iwx_softc *sc) 3282 { 3283 3284 return !sc->sc_nvm.sku_cap_mimo_disable; 3285 } 3286 3287 static void 3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf, 3289 uint16_t ssn, uint16_t buf_size) 3290 { 3291 reorder_buf->head_sn = ssn; 3292 reorder_buf->num_stored = 0; 3293 reorder_buf->buf_size = buf_size; 3294 reorder_buf->last_amsdu = 0; 3295 reorder_buf->last_sub_index = 0; 3296 reorder_buf->removed = 0; 3297 reorder_buf->valid = 0; 3298 reorder_buf->consec_oldsn_drops = 0; 3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0; 3300 reorder_buf->consec_oldsn_prev_drop = 0; 3301 } 3302 3303 static void 3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba) 3305 { 3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf; 3307 3308 reorder_buf->removed = 1; 3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID; 3310 } 3311 3312 #define IWX_MAX_RX_BA_SESSIONS 16 3313 3314 static struct iwx_rxba_data * 3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid) 3316 { 3317 int i; 3318 3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 3320 if (sc->sc_rxba_data[i].baid == 3321 IWX_RX_REORDER_DATA_INVALID_BAID) 3322 continue; 3323 if (sc->sc_rxba_data[i].tid == tid) 3324 return &sc->sc_rxba_data[i]; 3325 } 3326 3327 return NULL; 3328 } 3329 3330 static int 3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni, 3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start, 3333 uint8_t *baid) 3334 { 3335 struct iwx_rx_baid_cfg_cmd cmd; 3336 uint32_t new_baid = 0; 3337 int err; 3338 3339 IWX_ASSERT_LOCKED(sc); 3340 3341 memset(&cmd, 0, sizeof(cmd)); 3342 3343 if (start) { 3344 cmd.action = IWX_RX_BAID_ACTION_ADD; 3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID); 3346 cmd.alloc.tid = tid; 3347 cmd.alloc.ssn = htole16(ssn); 3348 cmd.alloc.win_size = htole16(winsize); 3349 } else { 3350 struct iwx_rxba_data *rxba; 3351 3352 rxba = iwx_find_rxba_data(sc, tid); 3353 if (rxba == NULL) 3354 return ENOENT; 3355 *baid = rxba->baid; 3356 3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE; 3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) { 3360 cmd.remove_v1.baid = rxba->baid; 3361 } else { 3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID); 3363 cmd.remove.tid = tid; 3364 } 3365 } 3366 3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid); 3369 if (err) 3370 return err; 3371 3372 if (start) { 3373 if (new_baid >= nitems(sc->sc_rxba_data)) 3374 return ERANGE; 3375 *baid = new_baid; 3376 } 3377 3378 return 0; 3379 } 3380 3381 static void 3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid, 3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start) 3384 { 3385 int err; 3386 struct iwx_rxba_data *rxba = NULL; 3387 uint8_t baid = 0; 3388 3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) { 3390 return; 3391 } 3392 3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) { 3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize, 3395 timeout_val, start, &baid); 3396 } else { 3397 panic("sta_rx_agg unsupported hw"); 3398 } 3399 if (err) { 3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err)); 3401 return; 3402 } else 3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__)); 3404 3405 rxba = &sc->sc_rxba_data[baid]; 3406 3407 /* Deaggregation is done in hardware. */ 3408 if (start) { 3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) { 3410 return; 3411 } 3412 rxba->sta_id = IWX_STATION_ID; 3413 rxba->tid = tid; 3414 rxba->baid = baid; 3415 rxba->timeout = timeout_val; 3416 getmicrouptime(&rxba->last_rx); 3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn, 3418 winsize); 3419 if (timeout_val != 0) { 3420 DPRINTF(("%s: timeout_val != 0\n", __func__)); 3421 return; 3422 } 3423 } else 3424 iwx_clear_reorder_buffer(sc, rxba); 3425 3426 if (start) { 3427 sc->sc_rx_ba_sessions++; 3428 } else if (sc->sc_rx_ba_sessions > 0) 3429 sc->sc_rx_ba_sessions--; 3430 } 3431 3432 static void 3433 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni, 3434 uint8_t tid) 3435 { 3436 int err, qid; 3437 3438 qid = sc->aggqid[tid]; 3439 if (qid == 0) { 3440 /* Firmware should pick the next unused Tx queue. */ 3441 qid = fls(sc->qenablemsk); 3442 } 3443 3444 DPRINTF(("%s: qid=%i\n", __func__, qid)); 3445 3446 /* 3447 * Simply enable the queue. 3448 * Firmware handles Tx Ba session setup and teardown. 3449 */ 3450 if ((sc->qenablemsk & (1 << qid)) == 0) { 3451 if (!iwx_nic_lock(sc)) { 3452 return; 3453 } 3454 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid, 3455 IWX_TX_RING_COUNT); 3456 iwx_nic_unlock(sc); 3457 if (err) { 3458 printf("%s: could not enable Tx queue %d " 3459 "(error %d)\n", DEVNAME(sc), qid, err); 3460 return; 3461 } 3462 } 3463 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING; 3464 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid)); 3465 sc->aggqid[tid] = qid; 3466 } 3467 3468 static void 3469 iwx_ba_rx_task(void *arg, int npending __unused) 3470 { 3471 struct iwx_softc *sc = arg; 3472 struct ieee80211com *ic = &sc->sc_ic; 3473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3474 struct ieee80211_node *ni = vap->iv_bss; 3475 int tid; 3476 3477 IWX_LOCK(sc); 3478 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) { 3479 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 3480 break; 3481 if (sc->ba_rx.start_tidmask & (1 << tid)) { 3482 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid]; 3483 DPRINTF(("%s: ba->ba_flags=%x\n", __func__, 3484 ba->ba_flags)); 3485 if (ba->ba_flags == IWX_BA_DONE) { 3486 DPRINTF(("%s: ampdu for tid %i already added\n", 3487 __func__, tid)); 3488 break; 3489 } 3490 3491 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__, 3492 tid)); 3493 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart, 3494 ba->ba_winsize, ba->ba_timeout_val, 1); 3495 sc->ba_rx.start_tidmask &= ~(1 << tid); 3496 ba->ba_flags = IWX_BA_DONE; 3497 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) { 3498 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0); 3499 sc->ba_rx.stop_tidmask &= ~(1 << tid); 3500 } 3501 } 3502 IWX_UNLOCK(sc); 3503 } 3504 3505 static void 3506 iwx_ba_tx_task(void *arg, int npending __unused) 3507 { 3508 struct iwx_softc *sc = arg; 3509 struct ieee80211com *ic = &sc->sc_ic; 3510 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3511 struct ieee80211_node *ni = vap->iv_bss; 3512 int tid; 3513 3514 IWX_LOCK(sc); 3515 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) { 3516 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 3517 break; 3518 if (sc->ba_tx.start_tidmask & (1 << tid)) { 3519 DPRINTF(("%s: ampdu tx start for tid %i\n", __func__, 3520 tid)); 3521 iwx_sta_tx_agg_start(sc, ni, tid); 3522 sc->ba_tx.start_tidmask &= ~(1 << tid); 3523 sc->sc_flags |= IWX_FLAG_AMPDUTX; 3524 } 3525 } 3526 3527 IWX_UNLOCK(sc); 3528 } 3529 3530 static void 3531 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data) 3532 { 3533 uint32_t mac_addr0, mac_addr1; 3534 3535 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 3536 3537 if (!iwx_nic_lock(sc)) 3538 return; 3539 3540 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc))); 3541 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc))); 3542 3543 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 3544 3545 /* If OEM fused a valid address, use it instead of the one in OTP. */ 3546 if (iwx_is_valid_mac_addr(data->hw_addr)) { 3547 iwx_nic_unlock(sc); 3548 return; 3549 } 3550 3551 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc))); 3552 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc))); 3553 3554 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 3555 3556 iwx_nic_unlock(sc); 3557 } 3558 3559 static int 3560 iwx_is_valid_mac_addr(const uint8_t *addr) 3561 { 3562 static const uint8_t reserved_mac[] = { 3563 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 3564 }; 3565 3566 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 && 3567 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 && 3568 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 && 3569 !ETHER_IS_MULTICAST(addr)); 3570 } 3571 3572 static void 3573 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest) 3574 { 3575 const uint8_t *hw_addr; 3576 3577 hw_addr = (const uint8_t *)&mac_addr0; 3578 dest[0] = hw_addr[3]; 3579 dest[1] = hw_addr[2]; 3580 dest[2] = hw_addr[1]; 3581 dest[3] = hw_addr[0]; 3582 3583 hw_addr = (const uint8_t *)&mac_addr1; 3584 dest[4] = hw_addr[1]; 3585 dest[5] = hw_addr[0]; 3586 } 3587 3588 static int 3589 iwx_nvm_get(struct iwx_softc *sc) 3590 { 3591 struct iwx_nvm_get_info cmd = {}; 3592 struct iwx_nvm_data *nvm = &sc->sc_nvm; 3593 struct iwx_host_cmd hcmd = { 3594 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL, 3595 .data = { &cmd, }, 3596 .len = { sizeof(cmd) }, 3597 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 3598 IWX_NVM_GET_INFO) 3599 }; 3600 int err = 0; 3601 uint32_t mac_flags; 3602 /* 3603 * All the values in iwx_nvm_get_info_rsp v4 are the same as 3604 * in v3, except for the channel profile part of the 3605 * regulatory. So we can just access the new struct, with the 3606 * exception of the latter. 3607 */ 3608 struct iwx_nvm_get_info_rsp *rsp; 3609 struct iwx_nvm_get_info_rsp_v3 *rsp_v3; 3610 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO); 3611 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); 3612 3613 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len; 3614 err = iwx_send_cmd(sc, &hcmd); 3615 if (err) { 3616 printf("%s: failed to send cmd (error %d)", __func__, err); 3617 return err; 3618 } 3619 3620 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) { 3621 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__, 3622 iwx_rx_packet_payload_len(hcmd.resp_pkt)); 3623 printf("%s: resp_len=%zu\n", __func__, resp_len); 3624 err = EIO; 3625 goto out; 3626 } 3627 3628 memset(nvm, 0, sizeof(*nvm)); 3629 3630 iwx_set_mac_addr_from_csr(sc, nvm); 3631 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) { 3632 printf("%s: no valid mac address was found\n", DEVNAME(sc)); 3633 err = EINVAL; 3634 goto out; 3635 } 3636 3637 rsp = (void *)hcmd.resp_pkt->data; 3638 3639 /* Initialize general data */ 3640 nvm->nvm_version = le16toh(rsp->general.nvm_version); 3641 nvm->n_hw_addrs = rsp->general.n_hw_addrs; 3642 3643 /* Initialize MAC sku data */ 3644 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags); 3645 nvm->sku_cap_11ac_enable = 3646 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 3647 nvm->sku_cap_11n_enable = 3648 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 3649 nvm->sku_cap_11ax_enable = 3650 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); 3651 nvm->sku_cap_band_24GHz_enable = 3652 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 3653 nvm->sku_cap_band_52GHz_enable = 3654 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); 3655 nvm->sku_cap_mimo_disable = 3656 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED); 3657 3658 /* Initialize PHY sku data */ 3659 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains); 3660 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains); 3661 3662 if (le32toh(rsp->regulatory.lar_enabled) && 3663 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) { 3664 nvm->lar_enabled = 1; 3665 } 3666 3667 memcpy(&sc->sc_rsp_info, rsp, resp_len); 3668 if (v4) { 3669 sc->sc_rsp_vers = IWX_FBSD_RSP_V4; 3670 } else { 3671 sc->sc_rsp_vers = IWX_FBSD_RSP_V3; 3672 } 3673 out: 3674 iwx_free_resp(sc, &hcmd); 3675 return err; 3676 } 3677 3678 static int 3679 iwx_load_firmware(struct iwx_softc *sc) 3680 { 3681 struct iwx_fw_sects *fws; 3682 int err; 3683 3684 IWX_ASSERT_LOCKED(sc) 3685 3686 sc->sc_uc.uc_intr = 0; 3687 sc->sc_uc.uc_ok = 0; 3688 3689 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR]; 3690 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 3691 err = iwx_ctxt_info_gen3_init(sc, fws); 3692 else 3693 err = iwx_ctxt_info_init(sc, fws); 3694 if (err) { 3695 printf("%s: could not init context info\n", DEVNAME(sc)); 3696 return err; 3697 } 3698 3699 /* wait for the firmware to load */ 3700 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz); 3701 if (err || !sc->sc_uc.uc_ok) { 3702 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err); 3703 iwx_ctxt_info_free_paging(sc); 3704 } 3705 3706 iwx_dma_contig_free(&sc->iml_dma); 3707 iwx_ctxt_info_free_fw_img(sc); 3708 3709 if (!sc->sc_uc.uc_ok) 3710 return EINVAL; 3711 3712 return err; 3713 } 3714 3715 static int 3716 iwx_start_fw(struct iwx_softc *sc) 3717 { 3718 int err; 3719 3720 IWX_WRITE(sc, IWX_CSR_INT, ~0); 3721 3722 iwx_disable_interrupts(sc); 3723 3724 /* make sure rfkill handshake bits are cleared */ 3725 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL); 3726 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, 3727 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3728 3729 /* clear (again), then enable firmware load interrupt */ 3730 IWX_WRITE(sc, IWX_CSR_INT, ~0); 3731 3732 err = iwx_nic_init(sc); 3733 if (err) { 3734 printf("%s: unable to init nic\n", DEVNAME(sc)); 3735 return err; 3736 } 3737 3738 iwx_enable_fwload_interrupt(sc); 3739 3740 return iwx_load_firmware(sc); 3741 } 3742 3743 static int 3744 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data, 3745 size_t len) 3746 { 3747 const struct iwx_ucode_tlv *tlv; 3748 uint32_t sha1 = 0; 3749 uint16_t mac_type = 0, rf_id = 0; 3750 uint8_t *pnvm_data = NULL, *tmp; 3751 int hw_match = 0; 3752 uint32_t size = 0; 3753 int err; 3754 3755 while (len >= sizeof(*tlv)) { 3756 uint32_t tlv_len, tlv_type; 3757 3758 len -= sizeof(*tlv); 3759 tlv = (const void *)data; 3760 3761 tlv_len = le32toh(tlv->length); 3762 tlv_type = le32toh(tlv->type); 3763 3764 if (len < tlv_len) { 3765 printf("%s: invalid TLV len: %zd/%u\n", 3766 DEVNAME(sc), len, tlv_len); 3767 err = EINVAL; 3768 goto out; 3769 } 3770 3771 data += sizeof(*tlv); 3772 3773 switch (tlv_type) { 3774 case IWX_UCODE_TLV_PNVM_VERSION: 3775 if (tlv_len < sizeof(uint32_t)) 3776 break; 3777 3778 sha1 = le32_to_cpup((const uint32_t *)data); 3779 break; 3780 case IWX_UCODE_TLV_HW_TYPE: 3781 if (tlv_len < 2 * sizeof(uint16_t)) 3782 break; 3783 3784 if (hw_match) 3785 break; 3786 3787 mac_type = le16_to_cpup((const uint16_t *)data); 3788 rf_id = le16_to_cpup((const uint16_t *)(data + 3789 sizeof(uint16_t))); 3790 3791 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) && 3792 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id)) 3793 hw_match = 1; 3794 break; 3795 case IWX_UCODE_TLV_SEC_RT: { 3796 const struct iwx_pnvm_section *section; 3797 uint32_t data_len; 3798 3799 section = (const void *)data; 3800 data_len = tlv_len - sizeof(*section); 3801 3802 /* TODO: remove, this is a deprecated separator */ 3803 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee) 3804 break; 3805 3806 tmp = malloc(size + data_len, M_DEVBUF, 3807 M_WAITOK | M_ZERO); 3808 if (tmp == NULL) { 3809 err = ENOMEM; 3810 goto out; 3811 } 3812 // XXX:misha pnvm_data is NULL and size is 0 at first pass 3813 memcpy(tmp, pnvm_data, size); 3814 memcpy(tmp + size, section->data, data_len); 3815 free(pnvm_data, M_DEVBUF); 3816 pnvm_data = tmp; 3817 size += data_len; 3818 break; 3819 } 3820 case IWX_UCODE_TLV_PNVM_SKU: 3821 /* New PNVM section started, stop parsing. */ 3822 goto done; 3823 default: 3824 break; 3825 } 3826 3827 if (roundup(tlv_len, 4) > len) 3828 break; 3829 len -= roundup(tlv_len, 4); 3830 data += roundup(tlv_len, 4); 3831 } 3832 done: 3833 if (!hw_match || size == 0) { 3834 err = ENOENT; 3835 goto out; 3836 } 3837 3838 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1); 3839 if (err) { 3840 printf("%s: could not allocate DMA memory for PNVM\n", 3841 DEVNAME(sc)); 3842 err = ENOMEM; 3843 goto out; 3844 } 3845 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size); 3846 iwx_ctxt_info_gen3_set_pnvm(sc); 3847 sc->sc_pnvm_ver = sha1; 3848 out: 3849 free(pnvm_data, M_DEVBUF); 3850 return err; 3851 } 3852 3853 static int 3854 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len) 3855 { 3856 const struct iwx_ucode_tlv *tlv; 3857 3858 while (len >= sizeof(*tlv)) { 3859 uint32_t tlv_len, tlv_type; 3860 3861 len -= sizeof(*tlv); 3862 tlv = (const void *)data; 3863 3864 tlv_len = le32toh(tlv->length); 3865 tlv_type = le32toh(tlv->type); 3866 3867 if (len < tlv_len || roundup(tlv_len, 4) > len) 3868 return EINVAL; 3869 3870 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) { 3871 const struct iwx_sku_id *sku_id = 3872 (const void *)(data + sizeof(*tlv)); 3873 3874 data += sizeof(*tlv) + roundup(tlv_len, 4); 3875 len -= roundup(tlv_len, 4); 3876 3877 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) && 3878 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) && 3879 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) && 3880 iwx_pnvm_handle_section(sc, data, len) == 0) 3881 return 0; 3882 } else { 3883 data += sizeof(*tlv) + roundup(tlv_len, 4); 3884 len -= roundup(tlv_len, 4); 3885 } 3886 } 3887 3888 return ENOENT; 3889 } 3890 3891 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */ 3892 static void 3893 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc) 3894 { 3895 struct iwx_prph_scratch *prph_scratch; 3896 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl; 3897 3898 prph_scratch = sc->prph_scratch_dma.vaddr; 3899 prph_sc_ctrl = &prph_scratch->ctrl_cfg; 3900 3901 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr); 3902 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size); 3903 3904 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE); 3905 } 3906 3907 /* 3908 * Load platform-NVM (non-volatile-memory) data from the filesystem. 3909 * This data apparently contains regulatory information and affects device 3910 * channel configuration. 3911 * The SKU of AX210 devices tells us which PNVM file section is needed. 3912 * Pre-AX210 devices store NVM data onboard. 3913 */ 3914 static int 3915 iwx_load_pnvm(struct iwx_softc *sc) 3916 { 3917 const int wait_flags = IWX_PNVM_COMPLETE; 3918 int err = 0; 3919 const struct firmware *pnvm; 3920 3921 if (sc->sc_sku_id[0] == 0 && 3922 sc->sc_sku_id[1] == 0 && 3923 sc->sc_sku_id[2] == 0) 3924 return 0; 3925 3926 if (sc->sc_pnvm_name) { 3927 if (sc->pnvm_dma.vaddr == NULL) { 3928 IWX_UNLOCK(sc); 3929 pnvm = firmware_get(sc->sc_pnvm_name); 3930 if (pnvm == NULL) { 3931 printf("%s: could not read %s (error %d)\n", 3932 DEVNAME(sc), sc->sc_pnvm_name, err); 3933 IWX_LOCK(sc); 3934 return EINVAL; 3935 } 3936 sc->sc_pnvm = pnvm; 3937 3938 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize); 3939 IWX_LOCK(sc); 3940 if (err && err != ENOENT) { 3941 return EINVAL; 3942 } 3943 } else 3944 iwx_ctxt_info_gen3_set_pnvm(sc); 3945 } 3946 3947 if (!iwx_nic_lock(sc)) { 3948 return EBUSY; 3949 } 3950 3951 /* 3952 * If we don't have a platform NVM file simply ask firmware 3953 * to proceed without it. 3954 */ 3955 3956 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6, 3957 IWX_UREG_DOORBELL_TO_ISR6_PNVM); 3958 3959 /* Wait for the pnvm complete notification from firmware. */ 3960 while ((sc->sc_init_complete & wait_flags) != wait_flags) { 3961 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz); 3962 if (err) 3963 break; 3964 } 3965 3966 iwx_nic_unlock(sc); 3967 3968 return err; 3969 } 3970 3971 static int 3972 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant) 3973 { 3974 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = { 3975 .valid = htole32(valid_tx_ant), 3976 }; 3977 3978 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD, 3979 0, sizeof(tx_ant_cmd), &tx_ant_cmd); 3980 } 3981 3982 static int 3983 iwx_send_phy_cfg_cmd(struct iwx_softc *sc) 3984 { 3985 struct iwx_phy_cfg_cmd phy_cfg_cmd; 3986 3987 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config); 3988 phy_cfg_cmd.calib_control.event_trigger = 3989 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger; 3990 phy_cfg_cmd.calib_control.flow_trigger = 3991 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger; 3992 3993 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0, 3994 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 3995 } 3996 3997 static int 3998 iwx_send_dqa_cmd(struct iwx_softc *sc) 3999 { 4000 struct iwx_dqa_enable_cmd dqa_cmd = { 4001 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE), 4002 }; 4003 uint32_t cmd_id; 4004 4005 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0); 4006 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); 4007 } 4008 4009 static int 4010 iwx_load_ucode_wait_alive(struct iwx_softc *sc) 4011 { 4012 int err; 4013 4014 IWX_UNLOCK(sc); 4015 err = iwx_read_firmware(sc); 4016 IWX_LOCK(sc); 4017 if (err) 4018 return err; 4019 4020 err = iwx_start_fw(sc); 4021 if (err) 4022 return err; 4023 4024 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4025 err = iwx_load_pnvm(sc); 4026 if (err) 4027 return err; 4028 } 4029 4030 iwx_post_alive(sc); 4031 4032 return 0; 4033 } 4034 4035 static int 4036 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm) 4037 { 4038 const int wait_flags = IWX_INIT_COMPLETE; 4039 struct iwx_nvm_access_complete_cmd nvm_complete = {}; 4040 struct iwx_init_extended_cfg_cmd init_cfg = { 4041 .init_flags = htole32(IWX_INIT_NVM), 4042 }; 4043 4044 int err; 4045 4046 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) { 4047 printf("%s: radio is disabled by hardware switch\n", 4048 DEVNAME(sc)); 4049 return EPERM; 4050 } 4051 4052 sc->sc_init_complete = 0; 4053 err = iwx_load_ucode_wait_alive(sc); 4054 if (err) { 4055 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 4056 "%s: failed to load init firmware\n", DEVNAME(sc)); 4057 return err; 4058 } else { 4059 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV, 4060 "%s: successfully loaded init firmware\n", __func__); 4061 } 4062 4063 /* 4064 * Send init config command to mark that we are sending NVM 4065 * access commands 4066 */ 4067 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP, 4068 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg); 4069 if (err) { 4070 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__, 4071 err); 4072 return err; 4073 } 4074 4075 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 4076 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete); 4077 if (err) { 4078 return err; 4079 } 4080 4081 /* Wait for the init complete notification from the firmware. */ 4082 while ((sc->sc_init_complete & wait_flags) != wait_flags) { 4083 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz); 4084 if (err) { 4085 DPRINTF(("%s: will return err=%d\n", __func__, err)); 4086 return err; 4087 } else { 4088 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n", 4089 __func__)); 4090 } 4091 } 4092 4093 if (readnvm) { 4094 err = iwx_nvm_get(sc); 4095 DPRINTF(("%s: err=%d\n", __func__, err)); 4096 if (err) { 4097 printf("%s: failed to read nvm (error %d)\n", 4098 DEVNAME(sc), err); 4099 return err; 4100 } else { 4101 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc))); 4102 } 4103 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr); 4104 } 4105 return 0; 4106 } 4107 4108 static int 4109 iwx_config_ltr(struct iwx_softc *sc) 4110 { 4111 struct iwx_ltr_config_cmd cmd = { 4112 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE), 4113 }; 4114 4115 if (!sc->sc_ltr_enabled) 4116 return 0; 4117 4118 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd); 4119 } 4120 4121 static void 4122 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx, 4123 bus_dma_segment_t *seg) 4124 { 4125 struct iwx_rx_data *data = &ring->data[idx]; 4126 4127 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4128 struct iwx_rx_transfer_desc *desc = ring->desc; 4129 desc[idx].rbid = htole16(idx & 0xffff); 4130 desc[idx].addr = htole64((*seg).ds_addr); 4131 bus_dmamap_sync(ring->data_dmat, data->map, 4132 BUS_DMASYNC_PREWRITE); 4133 } else { 4134 ((uint64_t *)ring->desc)[idx] = 4135 htole64((*seg).ds_addr); 4136 bus_dmamap_sync(ring->data_dmat, data->map, 4137 BUS_DMASYNC_PREWRITE); 4138 } 4139 } 4140 4141 static int 4142 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx) 4143 { 4144 struct iwx_rx_ring *ring = &sc->rxq; 4145 struct iwx_rx_data *data = &ring->data[idx]; 4146 struct mbuf *m; 4147 int err; 4148 int fatal = 0; 4149 bus_dma_segment_t seg; 4150 int nsegs; 4151 4152 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE); 4153 if (m == NULL) 4154 return ENOBUFS; 4155 4156 if (data->m != NULL) { 4157 bus_dmamap_unload(ring->data_dmat, data->map); 4158 fatal = 1; 4159 } 4160 4161 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 4162 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg, 4163 &nsegs, BUS_DMA_NOWAIT); 4164 if (err) { 4165 /* XXX */ 4166 if (fatal) 4167 panic("could not load RX mbuf"); 4168 m_freem(m); 4169 return err; 4170 } 4171 data->m = m; 4172 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 4173 4174 /* Update RX descriptor. */ 4175 iwx_update_rx_desc(sc, ring, idx, &seg); 4176 return 0; 4177 } 4178 4179 static int 4180 iwx_rxmq_get_signal_strength(struct iwx_softc *sc, 4181 struct iwx_rx_mpdu_desc *desc) 4182 { 4183 int energy_a, energy_b; 4184 4185 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4186 energy_a = desc->v3.energy_a; 4187 energy_b = desc->v3.energy_b; 4188 } else { 4189 energy_a = desc->v1.energy_a; 4190 energy_b = desc->v1.energy_b; 4191 } 4192 energy_a = energy_a ? -energy_a : -256; 4193 energy_b = energy_b ? -energy_b : -256; 4194 return MAX(energy_a, energy_b); 4195 } 4196 4197 static int 4198 iwx_rxmq_get_chains(struct iwx_softc *sc, 4199 struct iwx_rx_mpdu_desc *desc) 4200 { 4201 4202 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 4203 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >> 4204 IWX_RATE_MCS_ANT_POS); 4205 else 4206 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >> 4207 IWX_RATE_MCS_ANT_POS); 4208 } 4209 4210 static void 4211 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4212 struct iwx_rx_data *data) 4213 { 4214 struct iwx_rx_phy_info *phy_info = (void *)pkt->data; 4215 struct iwx_cmd_header *cmd_hdr = &pkt->hdr; 4216 int qid = cmd_hdr->qid; 4217 struct iwx_tx_ring *ring = &sc->txq[qid]; 4218 4219 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 4220 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 4221 } 4222 4223 /* 4224 * Retrieve the average noise (in dBm) among receivers. 4225 */ 4226 static int 4227 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats) 4228 { 4229 int i, total, nbant, noise; 4230 4231 total = nbant = noise = 0; 4232 for (i = 0; i < 3; i++) { 4233 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 4234 if (noise) { 4235 total += noise; 4236 nbant++; 4237 } 4238 } 4239 4240 /* There should be at least one antenna but check anyway. */ 4241 return (nbant == 0) ? -127 : (total / nbant) - 107; 4242 } 4243 4244 #if 0 4245 int 4246 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 4247 struct ieee80211_rxinfo *rxi) 4248 { 4249 struct ieee80211com *ic = &sc->sc_ic; 4250 struct ieee80211_key *k; 4251 struct ieee80211_frame *wh; 4252 uint64_t pn, *prsc; 4253 uint8_t *ivp; 4254 uint8_t tid; 4255 int hdrlen, hasqos; 4256 4257 wh = mtod(m, struct ieee80211_frame *); 4258 hdrlen = ieee80211_get_hdrlen(wh); 4259 ivp = (uint8_t *)wh + hdrlen; 4260 4261 /* find key for decryption */ 4262 k = ieee80211_get_rxkey(ic, m, ni); 4263 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP) 4264 return 1; 4265 4266 /* Check that ExtIV bit is be set. */ 4267 if (!(ivp[3] & IEEE80211_WEP_EXTIV)) 4268 return 1; 4269 4270 hasqos = ieee80211_has_qos(wh); 4271 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0; 4272 prsc = &k->k_rsc[tid]; 4273 4274 /* Extract the 48-bit PN from the CCMP header. */ 4275 pn = (uint64_t)ivp[0] | 4276 (uint64_t)ivp[1] << 8 | 4277 (uint64_t)ivp[4] << 16 | 4278 (uint64_t)ivp[5] << 24 | 4279 (uint64_t)ivp[6] << 32 | 4280 (uint64_t)ivp[7] << 40; 4281 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) { 4282 if (pn < *prsc) { 4283 ic->ic_stats.is_ccmp_replays++; 4284 return 1; 4285 } 4286 } else if (pn <= *prsc) { 4287 ic->ic_stats.is_ccmp_replays++; 4288 return 1; 4289 } 4290 /* Last seen packet number is updated in ieee80211_inputm(). */ 4291 4292 /* 4293 * Some firmware versions strip the MIC, and some don't. It is not 4294 * clear which of the capability flags could tell us what to expect. 4295 * For now, keep things simple and just leave the MIC in place if 4296 * it is present. 4297 * 4298 * The IV will be stripped by ieee80211_inputm(). 4299 */ 4300 return 0; 4301 } 4302 #endif 4303 4304 static int 4305 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status) 4306 { 4307 struct ieee80211_frame *wh; 4308 int ret = 0; 4309 uint8_t type, subtype; 4310 4311 wh = mtod(m, struct ieee80211_frame *); 4312 4313 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4314 if (type == IEEE80211_FC0_TYPE_CTL) { 4315 return 0; 4316 } 4317 4318 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4319 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) { 4320 return 0; 4321 } 4322 4323 4324 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 4325 IEEE80211_FC0_TYPE_CTL) 4326 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { 4327 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) != 4328 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4329 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__)); 4330 ret = 1; 4331 goto out; 4332 } 4333 /* Check whether decryption was successful or not. */ 4334 if ((rx_pkt_status & 4335 (IWX_RX_MPDU_RES_STATUS_DEC_DONE | 4336 IWX_RX_MPDU_RES_STATUS_MIC_OK)) != 4337 (IWX_RX_MPDU_RES_STATUS_DEC_DONE | 4338 IWX_RX_MPDU_RES_STATUS_MIC_OK)) { 4339 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__)); 4340 ret = 1; 4341 goto out; 4342 } 4343 } 4344 out: 4345 return ret; 4346 } 4347 4348 static void 4349 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx, 4350 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags, 4351 uint32_t device_timestamp, uint8_t rssi) 4352 { 4353 struct ieee80211com *ic = &sc->sc_ic; 4354 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4355 struct ieee80211_frame *wh; 4356 struct ieee80211_node *ni; 4357 4358 /* 4359 * We need to turn the hardware provided channel index into a channel 4360 * and then find it in our ic_channels array 4361 */ 4362 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) { 4363 /* 4364 * OpenBSD points this at the ibss chan, which it defaults to 4365 * channel 1 and then never touches again. Skip a step. 4366 */ 4367 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx); 4368 chanidx = 1; 4369 } 4370 4371 int channel = chanidx; 4372 for (int i = 0; i < ic->ic_nchans; i++) { 4373 if (ic->ic_channels[i].ic_ieee == channel) { 4374 chanidx = i; 4375 } 4376 } 4377 ic->ic_curchan = &ic->ic_channels[chanidx]; 4378 4379 wh = mtod(m, struct ieee80211_frame *); 4380 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 4381 4382 #if 0 /* XXX hw decrypt */ 4383 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) && 4384 iwx_ccmp_decap(sc, m, ni, rxi) != 0) { 4385 m_freem(m); 4386 ieee80211_release_node(ic, ni); 4387 return; 4388 } 4389 #endif 4390 if (ieee80211_radiotap_active_vap(vap)) { 4391 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap; 4392 uint16_t chan_flags; 4393 int have_legacy_rate = 1; 4394 uint8_t mcs, rate; 4395 4396 tap->wr_flags = 0; 4397 if (is_shortpre) 4398 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4399 tap->wr_chan_freq = 4400 htole16(ic->ic_channels[chanidx].ic_freq); 4401 chan_flags = ic->ic_channels[chanidx].ic_flags; 4402 #if 0 4403 if (ic->ic_curmode != IEEE80211_MODE_11N && 4404 ic->ic_curmode != IEEE80211_MODE_11AC) { 4405 chan_flags &= ~IEEE80211_CHAN_HT; 4406 chan_flags &= ~IEEE80211_CHAN_40MHZ; 4407 } 4408 if (ic->ic_curmode != IEEE80211_MODE_11AC) 4409 chan_flags &= ~IEEE80211_CHAN_VHT; 4410 #else 4411 chan_flags &= ~IEEE80211_CHAN_HT; 4412 #endif 4413 tap->wr_chan_flags = htole16(chan_flags); 4414 tap->wr_dbm_antsignal = rssi; 4415 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 4416 tap->wr_tsft = device_timestamp; 4417 4418 if (sc->sc_rate_n_flags_version >= 2) { 4419 uint32_t mod_type = (rate_n_flags & 4420 IWX_RATE_MCS_MOD_TYPE_MSK); 4421 const struct ieee80211_rateset *rs = NULL; 4422 uint32_t ridx; 4423 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK || 4424 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK); 4425 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK); 4426 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK); 4427 if (mod_type == IWX_RATE_MCS_CCK_MSK) 4428 rs = &ieee80211_std_rateset_11b; 4429 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK) 4430 rs = &ieee80211_std_rateset_11a; 4431 if (rs && ridx < rs->rs_nrates) { 4432 rate = (rs->rs_rates[ridx] & 4433 IEEE80211_RATE_VAL); 4434 } else 4435 rate = 0; 4436 } else { 4437 have_legacy_rate = ((rate_n_flags & 4438 (IWX_RATE_MCS_HT_MSK_V1 | 4439 IWX_RATE_MCS_VHT_MSK_V1)) == 0); 4440 mcs = (rate_n_flags & 4441 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 | 4442 IWX_RATE_HT_MCS_NSS_MSK_V1)); 4443 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1); 4444 } 4445 if (!have_legacy_rate) { 4446 tap->wr_rate = (0x80 | mcs); 4447 } else { 4448 switch (rate) { 4449 /* CCK rates. */ 4450 case 10: tap->wr_rate = 2; break; 4451 case 20: tap->wr_rate = 4; break; 4452 case 55: tap->wr_rate = 11; break; 4453 case 110: tap->wr_rate = 22; break; 4454 /* OFDM rates. */ 4455 case 0xd: tap->wr_rate = 12; break; 4456 case 0xf: tap->wr_rate = 18; break; 4457 case 0x5: tap->wr_rate = 24; break; 4458 case 0x7: tap->wr_rate = 36; break; 4459 case 0x9: tap->wr_rate = 48; break; 4460 case 0xb: tap->wr_rate = 72; break; 4461 case 0x1: tap->wr_rate = 96; break; 4462 case 0x3: tap->wr_rate = 108; break; 4463 /* Unknown rate: should not happen. */ 4464 default: tap->wr_rate = 0; 4465 } 4466 // XXX hack - this needs rebased with the new rate stuff anyway 4467 tap->wr_rate = rate; 4468 } 4469 } 4470 4471 IWX_UNLOCK(sc); 4472 if (ni == NULL) { 4473 if (ieee80211_input_mimo_all(ic, m) == -1) 4474 printf("%s:%d input_all returned -1\n", __func__, __LINE__); 4475 } else { 4476 4477 if (ieee80211_input_mimo(ni, m) == -1) 4478 printf("%s:%d input_all returned -1\n", __func__, __LINE__); 4479 ieee80211_free_node(ni); 4480 } 4481 IWX_LOCK(sc); 4482 } 4483 4484 static void 4485 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata, 4486 size_t maxlen) 4487 { 4488 struct ieee80211com *ic = &sc->sc_ic; 4489 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4490 struct ieee80211_node *ni = vap->iv_bss; 4491 struct ieee80211_key *k; 4492 struct ieee80211_rx_stats rxs; 4493 struct iwx_rx_mpdu_desc *desc; 4494 uint32_t len, hdrlen, rate_n_flags, device_timestamp; 4495 int rssi; 4496 uint8_t chanidx; 4497 uint16_t phy_info; 4498 size_t desc_size; 4499 int pad = 0; 4500 4501 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 4502 desc_size = sizeof(*desc); 4503 else 4504 desc_size = IWX_RX_DESC_SIZE_V1; 4505 4506 if (maxlen < desc_size) { 4507 m_freem(m); 4508 return; /* drop */ 4509 } 4510 4511 desc = (struct iwx_rx_mpdu_desc *)pktdata; 4512 4513 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) || 4514 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) { 4515 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status); 4516 m_freem(m); 4517 return; /* drop */ 4518 } 4519 4520 len = le16toh(desc->mpdu_len); 4521 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4522 /* Allow control frames in monitor mode. */ 4523 if (len < sizeof(struct ieee80211_frame_cts)) { 4524 m_freem(m); 4525 return; 4526 } 4527 4528 } else if (len < sizeof(struct ieee80211_frame)) { 4529 m_freem(m); 4530 return; 4531 } 4532 if (len > maxlen - desc_size) { 4533 m_freem(m); 4534 return; 4535 } 4536 4537 // TODO: arithmetic on a pointer to void is a GNU extension 4538 m->m_data = (char *)pktdata + desc_size; 4539 m->m_pkthdr.len = m->m_len = len; 4540 4541 /* Account for padding following the frame header. */ 4542 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) { 4543 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4544 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4545 if (type == IEEE80211_FC0_TYPE_CTL) { 4546 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { 4547 case IEEE80211_FC0_SUBTYPE_CTS: 4548 hdrlen = sizeof(struct ieee80211_frame_cts); 4549 break; 4550 case IEEE80211_FC0_SUBTYPE_ACK: 4551 hdrlen = sizeof(struct ieee80211_frame_ack); 4552 break; 4553 default: 4554 hdrlen = sizeof(struct ieee80211_frame_min); 4555 break; 4556 } 4557 } else 4558 hdrlen = ieee80211_hdrsize(wh); 4559 4560 if ((le16toh(desc->status) & 4561 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) == 4562 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4563 // CCMP header length 4564 hdrlen += 8; 4565 } 4566 4567 memmove(m->m_data + 2, m->m_data, hdrlen); 4568 m_adj(m, 2); 4569 4570 } 4571 4572 if ((le16toh(desc->status) & 4573 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) == 4574 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) { 4575 pad = 1; 4576 } 4577 4578 // /* 4579 // * Hardware de-aggregates A-MSDUs and copies the same MAC header 4580 // * in place for each subframe. But it leaves the 'A-MSDU present' 4581 // * bit set in the frame header. We need to clear this bit ourselves. 4582 // * (XXX This workaround is not required on AX200/AX201 devices that 4583 // * have been tested by me, but it's unclear when this problem was 4584 // * fixed in the hardware. It definitely affects the 9k generation. 4585 // * Leaving this in place for now since some 9k/AX200 hybrids seem 4586 // * to exist that we may eventually add support for.) 4587 // * 4588 // * And we must allow the same CCMP PN for subframes following the 4589 // * first subframe. Otherwise they would be discarded as replays. 4590 // */ 4591 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) { 4592 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__)); 4593 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4594 // uint8_t subframe_idx = (desc->amsdu_info & 4595 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK); 4596 // if (subframe_idx > 0) 4597 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN; 4598 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) && 4599 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) { 4600 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m, 4601 // struct ieee80211_qosframe_addr4 *); 4602 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU); 4603 // } else if (ieee80211_has_qos(wh) && 4604 // m->m_len >= sizeof(struct ieee80211_qosframe)) { 4605 // struct ieee80211_qosframe *qwh = mtod(m, 4606 // struct ieee80211_qosframe *); 4607 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU); 4608 // } 4609 } 4610 4611 /* 4612 * Verify decryption before duplicate detection. The latter uses 4613 * the TID supplied in QoS frame headers and this TID is implicitly 4614 * verified as part of the CCMP nonce. 4615 */ 4616 k = ieee80211_crypto_get_txkey(ni, m); 4617 if (k != NULL && 4618 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) && 4619 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) { 4620 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__)); 4621 m_freem(m); 4622 return; 4623 } 4624 4625 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 4626 rate_n_flags = le32toh(desc->v3.rate_n_flags); 4627 chanidx = desc->v3.channel; 4628 device_timestamp = le32toh(desc->v3.gp2_on_air_rise); 4629 } else { 4630 rate_n_flags = le32toh(desc->v1.rate_n_flags); 4631 chanidx = desc->v1.channel; 4632 device_timestamp = le32toh(desc->v1.gp2_on_air_rise); 4633 } 4634 4635 phy_info = le16toh(desc->phy_info); 4636 4637 rssi = iwx_rxmq_get_signal_strength(sc, desc); 4638 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */ 4639 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */ 4640 4641 memset(&rxs, 0, sizeof(rxs)); 4642 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 4643 rxs.r_flags |= IEEE80211_R_BAND; 4644 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 4645 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START; 4646 4647 rxs.c_ieee = chanidx; 4648 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, 4649 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ); 4650 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; 4651 rxs.c_rx_tsf = device_timestamp; 4652 rxs.c_chain = iwx_rxmq_get_chains(sc, desc); 4653 if (rxs.c_chain != 0) 4654 rxs.r_flags |= IEEE80211_R_C_CHAIN; 4655 4656 /* rssi is in 1/2db units */ 4657 rxs.c_rssi = rssi * 2; 4658 rxs.c_nf = sc->sc_noise; 4659 4660 if (pad) { 4661 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED; 4662 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP; 4663 } 4664 4665 if (ieee80211_add_rx_params(m, &rxs) == 0) { 4666 printf("%s: ieee80211_add_rx_params failed\n", __func__); 4667 return; 4668 } 4669 4670 ieee80211_add_rx_params(m, &rxs); 4671 4672 #if 0 4673 if (iwx_rx_reorder(sc, m, chanidx, desc, 4674 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE), 4675 rate_n_flags, device_timestamp, &rxi, ml)) 4676 return; 4677 #endif 4678 4679 if (pad) { 4680 #define TRIM 8 4681 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 4682 hdrlen = ieee80211_hdrsize(wh); 4683 memmove(m->m_data + TRIM, m->m_data, hdrlen); 4684 m_adj(m, TRIM); 4685 #undef TRIM 4686 } 4687 4688 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status), 4689 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE), 4690 rate_n_flags, device_timestamp, rssi); 4691 } 4692 4693 static void 4694 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx) 4695 { 4696 struct iwx_tfh_tfd *desc = &ring->desc[idx]; 4697 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f; 4698 int i; 4699 4700 /* First TB is never cleared - it is bidirectional DMA data. */ 4701 for (i = 1; i < num_tbs; i++) { 4702 struct iwx_tfh_tb *tb = &desc->tbs[i]; 4703 memset(tb, 0, sizeof(*tb)); 4704 } 4705 desc->num_tbs = htole16(1); 4706 4707 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4708 BUS_DMASYNC_PREWRITE); 4709 } 4710 4711 static void 4712 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring, 4713 struct iwx_tx_data *txd) 4714 { 4715 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 4716 bus_dmamap_unload(ring->data_dmat, txd->map); 4717 4718 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0); 4719 txd->m = NULL; 4720 txd->in = NULL; 4721 } 4722 4723 static void 4724 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx) 4725 { 4726 struct iwx_tx_data *txd; 4727 4728 while (ring->tail_hw != idx) { 4729 txd = &ring->data[ring->tail]; 4730 if (txd->m != NULL) { 4731 iwx_clear_tx_desc(sc, ring, ring->tail); 4732 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0); 4733 iwx_txd_done(sc, ring, txd); 4734 ring->queued--; 4735 if (ring->queued < 0) 4736 panic("caught negative queue count"); 4737 } 4738 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT; 4739 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size; 4740 } 4741 } 4742 4743 static void 4744 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4745 struct iwx_rx_data *data) 4746 { 4747 struct ieee80211com *ic = &sc->sc_ic; 4748 struct ifnet *ifp = IC2IFP(ic); 4749 struct iwx_cmd_header *cmd_hdr = &pkt->hdr; 4750 int qid = cmd_hdr->qid, status, txfail; 4751 struct iwx_tx_ring *ring = &sc->txq[qid]; 4752 struct iwx_tx_resp *tx_resp = (void *)pkt->data; 4753 uint32_t ssn; 4754 uint32_t len = iwx_rx_packet_len(pkt); 4755 int idx = cmd_hdr->idx; 4756 struct iwx_tx_data *txd = &ring->data[idx]; 4757 struct mbuf *m = txd->m; 4758 4759 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 4760 4761 /* Sanity checks. */ 4762 if (sizeof(*tx_resp) > len) 4763 return; 4764 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1) 4765 return; 4766 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) + 4767 tx_resp->frame_count * sizeof(tx_resp->status) > len) 4768 return; 4769 4770 sc->sc_tx_timer[qid] = 0; 4771 4772 if (tx_resp->frame_count > 1) /* A-MPDU */ 4773 return; 4774 4775 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK; 4776 txfail = (status != IWX_TX_STATUS_SUCCESS && 4777 status != IWX_TX_STATUS_DIRECT_DONE); 4778 4779 #ifdef __not_yet__ 4780 /* TODO: Replace accounting below with ieee80211_tx_complete() */ 4781 ieee80211_tx_complete(&in->in_ni, m, txfail); 4782 #else 4783 if (txfail) 4784 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 4785 else { 4786 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 4787 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 4788 if (m->m_flags & M_MCAST) 4789 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 4790 } 4791 #endif 4792 /* 4793 * On hardware supported by iwx(4) the SSN counter corresponds 4794 * to a Tx ring index rather than a sequence number. 4795 * Frames up to this index (non-inclusive) can now be freed. 4796 */ 4797 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn)); 4798 ssn = le32toh(ssn); 4799 if (ssn < sc->max_tfd_queue_size) { 4800 iwx_txq_advance(sc, ring, ssn); 4801 iwx_clear_oactive(sc, ring); 4802 } 4803 } 4804 4805 static void 4806 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring) 4807 { 4808 IWX_ASSERT_LOCKED(sc); 4809 4810 if (ring->queued < iwx_lomark) { 4811 sc->qfullmsk &= ~(1 << ring->qid); 4812 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) { 4813 /* 4814 * Well, we're in interrupt context, but then again 4815 * I guess net80211 does all sorts of stunts in 4816 * interrupt context, so maybe this is no biggie. 4817 */ 4818 iwx_start(sc); 4819 } 4820 } 4821 } 4822 4823 static void 4824 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt) 4825 { 4826 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data; 4827 struct ieee80211com *ic = &sc->sc_ic; 4828 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4829 struct iwx_node *in = IWX_NODE(vap->iv_bss); 4830 struct ieee80211_node *ni = &in->in_ni; 4831 struct iwx_tx_ring *ring; 4832 uint16_t i, tfd_cnt, ra_tid_cnt, idx; 4833 int qid; 4834 4835 // if (ic->ic_state != IEEE80211_S_RUN) 4836 // return; 4837 4838 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res)) 4839 return; 4840 4841 if (ba_res->sta_id != IWX_STATION_ID) 4842 return; 4843 4844 in = (void *)ni; 4845 4846 tfd_cnt = le16toh(ba_res->tfd_cnt); 4847 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt); 4848 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) + 4849 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt + 4850 sizeof(ba_res->tfd[0]) * tfd_cnt)) 4851 return; 4852 4853 for (i = 0; i < tfd_cnt; i++) { 4854 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; 4855 uint8_t tid; 4856 4857 tid = ba_tfd->tid; 4858 if (tid >= nitems(sc->aggqid)) 4859 continue; 4860 4861 qid = sc->aggqid[tid]; 4862 if (qid != htole16(ba_tfd->q_num)) 4863 continue; 4864 4865 ring = &sc->txq[qid]; 4866 4867 #if 0 4868 ba = &ni->ni_tx_ba[tid]; 4869 if (ba->ba_state != IEEE80211_BA_AGREED) 4870 continue; 4871 #endif 4872 idx = le16toh(ba_tfd->tfd_index); 4873 sc->sc_tx_timer[qid] = 0; 4874 iwx_txq_advance(sc, ring, idx); 4875 iwx_clear_oactive(sc, ring); 4876 } 4877 } 4878 4879 static void 4880 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt, 4881 struct iwx_rx_data *data) 4882 { 4883 struct ieee80211com *ic = &sc->sc_ic; 4884 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4885 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data; 4886 uint32_t missed; 4887 4888 if ((ic->ic_opmode != IEEE80211_M_STA) || 4889 (vap->iv_state != IEEE80211_S_RUN)) 4890 return; 4891 4892 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4893 BUS_DMASYNC_POSTREAD); 4894 4895 IWX_DPRINTF(sc, IWX_DEBUG_BEACON, 4896 "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n", 4897 __func__, 4898 le32toh(mbn->mac_id), 4899 le32toh(mbn->consec_missed_beacons_since_last_rx), 4900 le32toh(mbn->consec_missed_beacons), 4901 le32toh(mbn->num_expected_beacons), 4902 le32toh(mbn->num_recvd_beacons)); 4903 4904 missed = le32toh(mbn->consec_missed_beacons_since_last_rx); 4905 if (missed > vap->iv_bmissthreshold) { 4906 ieee80211_beacon_miss(ic); 4907 } 4908 } 4909 4910 static int 4911 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action) 4912 { 4913 struct iwx_binding_cmd cmd; 4914 struct ieee80211com *ic = &sc->sc_ic; 4915 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4916 struct iwx_vap *ivp = IWX_VAP(vap); 4917 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt; 4918 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color); 4919 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE); 4920 uint32_t status; 4921 4922 if (action == IWX_FW_CTXT_ACTION_ADD && active) 4923 panic("binding already added"); 4924 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active) 4925 panic("binding already removed"); 4926 4927 if (phyctxt == NULL) /* XXX race with iwx_stop() */ 4928 return EINVAL; 4929 4930 memset(&cmd, 0, sizeof(cmd)); 4931 4932 cmd.id_and_color 4933 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); 4934 cmd.action = htole32(action); 4935 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); 4936 4937 cmd.macs[0] = htole32(mac_id); 4938 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++) 4939 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID); 4940 4941 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) || 4942 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 4943 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 4944 else 4945 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 4946 4947 status = 0; 4948 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd), 4949 &cmd, &status); 4950 if (err == 0 && status != 0) 4951 err = EIO; 4952 4953 return err; 4954 } 4955 4956 static uint8_t 4957 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan) 4958 { 4959 int ctlchan = ieee80211_chan2ieee(ic, chan); 4960 int midpoint = chan->ic_vht_ch_freq1; 4961 4962 /* 4963 * The FW is expected to check the control channel position only 4964 * when in HT/VHT and the channel width is not 20MHz. Return 4965 * this value as the default one: 4966 */ 4967 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 4968 4969 switch (ctlchan - midpoint) { 4970 case -6: 4971 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW; 4972 break; 4973 case -2: 4974 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 4975 break; 4976 case 2: 4977 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 4978 break; 4979 case 6: 4980 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE; 4981 break; 4982 default: 4983 break; 4984 } 4985 4986 return pos; 4987 } 4988 4989 static int 4990 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 4991 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco, 4992 uint8_t vht_chan_width, int cmdver) 4993 { 4994 struct ieee80211com *ic = &sc->sc_ic; 4995 struct iwx_phy_context_cmd_uhb cmd; 4996 uint8_t active_cnt, idle_cnt; 4997 struct ieee80211_channel *chan = ctxt->channel; 4998 4999 memset(&cmd, 0, sizeof(cmd)); 5000 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id, 5001 ctxt->color)); 5002 cmd.action = htole32(action); 5003 5004 if (IEEE80211_IS_CHAN_2GHZ(chan) || 5005 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 5006 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 5007 else 5008 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 5009 5010 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ? 5011 IWX_PHY_BAND_24 : IWX_PHY_BAND_5; 5012 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan)); 5013 5014 if (IEEE80211_IS_CHAN_VHT80(chan)) { 5015 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan); 5016 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80; 5017 } else if (IEEE80211_IS_CHAN_HT40(chan)) { 5018 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 5019 if (IEEE80211_IS_CHAN_HT40D(chan)) 5020 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 5021 else 5022 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5023 } else { 5024 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5025 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5026 } 5027 5028 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 5029 IWX_RLC_CONFIG_CMD) != 2) { 5030 idle_cnt = chains_static; 5031 active_cnt = chains_dynamic; 5032 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) << 5033 IWX_PHY_RX_CHAIN_VALID_POS); 5034 cmd.rxchain_info |= htole32(idle_cnt << 5035 IWX_PHY_RX_CHAIN_CNT_POS); 5036 cmd.rxchain_info |= htole32(active_cnt << 5037 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 5038 } 5039 5040 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 5041 } 5042 5043 #if 0 5044 int 5045 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 5046 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco, 5047 uint8_t vht_chan_width, int cmdver) 5048 { 5049 struct ieee80211com *ic = &sc->sc_ic; 5050 struct iwx_phy_context_cmd cmd; 5051 uint8_t active_cnt, idle_cnt; 5052 struct ieee80211_channel *chan = ctxt->channel; 5053 5054 memset(&cmd, 0, sizeof(cmd)); 5055 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id, 5056 ctxt->color)); 5057 cmd.action = htole32(action); 5058 5059 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) || 5060 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)) 5061 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX); 5062 else 5063 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX); 5064 5065 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ? 5066 IWX_PHY_BAND_24 : IWX_PHY_BAND_5; 5067 cmd.ci.channel = ieee80211_chan2ieee(ic, chan); 5068 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) { 5069 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan); 5070 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80; 5071 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) { 5072 if (sco == IEEE80211_HTOP0_SCO_SCA) { 5073 /* secondary chan above -> control chan below */ 5074 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5075 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 5076 } else if (sco == IEEE80211_HTOP0_SCO_SCB) { 5077 /* secondary chan below -> control chan above */ 5078 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE; 5079 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40; 5080 } else { 5081 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5082 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5083 } 5084 } else { 5085 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20; 5086 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW; 5087 } 5088 5089 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 5090 IWX_RLC_CONFIG_CMD) != 2) { 5091 idle_cnt = chains_static; 5092 active_cnt = chains_dynamic; 5093 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) << 5094 IWX_PHY_RX_CHAIN_VALID_POS); 5095 cmd.rxchain_info |= htole32(idle_cnt << 5096 IWX_PHY_RX_CHAIN_CNT_POS); 5097 cmd.rxchain_info |= htole32(active_cnt << 5098 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 5099 } 5100 5101 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 5102 } 5103 #endif 5104 5105 static int 5106 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt, 5107 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, 5108 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width) 5109 { 5110 int cmdver; 5111 5112 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD); 5113 if (cmdver != 3 && cmdver != 4) { 5114 printf("%s: firmware does not support phy-context-cmd v3/v4\n", 5115 DEVNAME(sc)); 5116 return ENOTSUP; 5117 } 5118 5119 /* 5120 * Intel increased the size of the fw_channel_info struct and neglected 5121 * to bump the phy_context_cmd struct, which contains an fw_channel_info 5122 * member in the middle. 5123 * To keep things simple we use a separate function to handle the larger 5124 * variant of the phy context command. 5125 */ 5126 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) { 5127 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static, 5128 chains_dynamic, action, sco, vht_chan_width, cmdver); 5129 } else 5130 panic("Unsupported old hardware contact thj@"); 5131 5132 #if 0 5133 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic, 5134 action, sco, vht_chan_width, cmdver); 5135 #endif 5136 } 5137 5138 static int 5139 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd) 5140 { 5141 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE]; 5142 struct iwx_tfh_tfd *desc; 5143 struct iwx_tx_data *txdata; 5144 struct iwx_device_cmd *cmd; 5145 struct mbuf *m; 5146 bus_addr_t paddr; 5147 uint64_t addr; 5148 int err = 0, i, paylen, off/*, s*/; 5149 int idx, code, async, group_id; 5150 size_t hdrlen, datasz; 5151 uint8_t *data; 5152 int generation = sc->sc_generation; 5153 bus_dma_segment_t seg[10]; 5154 int nsegs; 5155 5156 code = hcmd->id; 5157 async = hcmd->flags & IWX_CMD_ASYNC; 5158 idx = ring->cur; 5159 5160 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) { 5161 paylen += hcmd->len[i]; 5162 } 5163 5164 /* If this command waits for a response, allocate response buffer. */ 5165 hcmd->resp_pkt = NULL; 5166 if (hcmd->flags & IWX_CMD_WANT_RESP) { 5167 uint8_t *resp_buf; 5168 KASSERT(!async, ("async command want response")); 5169 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet), 5170 ("wrong pkt len 1")); 5171 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX, 5172 ("wrong pkt len 2")); 5173 if (sc->sc_cmd_resp_pkt[idx] != NULL) 5174 return ENOSPC; 5175 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF, 5176 M_NOWAIT | M_ZERO); 5177 if (resp_buf == NULL) 5178 return ENOMEM; 5179 sc->sc_cmd_resp_pkt[idx] = resp_buf; 5180 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len; 5181 } else { 5182 sc->sc_cmd_resp_pkt[idx] = NULL; 5183 } 5184 5185 desc = &ring->desc[idx]; 5186 txdata = &ring->data[idx]; 5187 5188 /* 5189 * XXX Intel inside (tm) 5190 * Firmware API versions >= 50 reject old-style commands in 5191 * group 0 with a "BAD_COMMAND" firmware error. We must pretend 5192 * that such commands were in the LONG_GROUP instead in order 5193 * for firmware to accept them. 5194 */ 5195 if (iwx_cmd_groupid(code) == 0) { 5196 code = IWX_WIDE_ID(IWX_LONG_GROUP, code); 5197 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW; 5198 } else 5199 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW; 5200 5201 group_id = iwx_cmd_groupid(code); 5202 5203 hdrlen = sizeof(cmd->hdr_wide); 5204 datasz = sizeof(cmd->data_wide); 5205 5206 if (paylen > datasz) { 5207 /* Command is too large to fit in pre-allocated space. */ 5208 size_t totlen = hdrlen + paylen; 5209 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) { 5210 printf("%s: firmware command too long (%zd bytes)\n", 5211 DEVNAME(sc), totlen); 5212 err = EINVAL; 5213 goto out; 5214 } 5215 if (totlen > IWX_RBUF_SIZE) 5216 panic("totlen > IWX_RBUF_SIZE"); 5217 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE); 5218 if (m == NULL) { 5219 printf("%s: could not get fw cmd mbuf (%i bytes)\n", 5220 DEVNAME(sc), IWX_RBUF_SIZE); 5221 err = ENOMEM; 5222 goto out; 5223 } 5224 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5225 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m, 5226 seg, &nsegs, BUS_DMA_NOWAIT); 5227 if (nsegs > 20) 5228 panic("nsegs > 20"); 5229 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs)); 5230 if (err) { 5231 printf("%s: could not load fw cmd mbuf (%zd bytes)\n", 5232 DEVNAME(sc), totlen); 5233 m_freem(m); 5234 goto out; 5235 } 5236 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */ 5237 cmd = mtod(m, struct iwx_device_cmd *); 5238 paddr = seg[0].ds_addr; 5239 } else { 5240 cmd = &ring->cmd[idx]; 5241 paddr = txdata->cmd_paddr; 5242 } 5243 5244 memset(cmd, 0, sizeof(*cmd)); 5245 cmd->hdr_wide.opcode = iwx_cmd_opcode(code); 5246 cmd->hdr_wide.group_id = group_id; 5247 cmd->hdr_wide.qid = ring->qid; 5248 cmd->hdr_wide.idx = idx; 5249 cmd->hdr_wide.length = htole16(paylen); 5250 cmd->hdr_wide.version = iwx_cmd_version(code); 5251 data = cmd->data_wide; 5252 5253 for (i = 0, off = 0; i < nitems(hcmd->data); i++) { 5254 if (hcmd->len[i] == 0) 5255 continue; 5256 memcpy(data + off, hcmd->data[i], hcmd->len[i]); 5257 off += hcmd->len[i]; 5258 } 5259 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen)); 5260 5261 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE)); 5262 addr = htole64(paddr); 5263 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr)); 5264 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) { 5265 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen, 5266 paylen)); 5267 desc->tbs[1].tb_len = htole16(hdrlen + paylen - 5268 IWX_FIRST_TB_SIZE); 5269 addr = htole64(paddr + IWX_FIRST_TB_SIZE); 5270 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr)); 5271 desc->num_tbs = htole16(2); 5272 } else 5273 desc->num_tbs = htole16(1); 5274 5275 if (paylen > datasz) { 5276 bus_dmamap_sync(ring->data_dmat, txdata->map, 5277 BUS_DMASYNC_PREWRITE); 5278 } else { 5279 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 5280 BUS_DMASYNC_PREWRITE); 5281 } 5282 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5283 BUS_DMASYNC_PREWRITE); 5284 5285 /* Kick command ring. */ 5286 ring->queued++; 5287 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT; 5288 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size; 5289 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw)); 5290 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw); 5291 5292 if (!async) { 5293 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz); 5294 if (err == 0) { 5295 /* if hardware is no longer up, return error */ 5296 if (generation != sc->sc_generation) { 5297 err = ENXIO; 5298 goto out; 5299 } 5300 5301 /* Response buffer will be freed in iwx_free_resp(). */ 5302 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx]; 5303 sc->sc_cmd_resp_pkt[idx] = NULL; 5304 } else if (generation == sc->sc_generation) { 5305 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF); 5306 sc->sc_cmd_resp_pkt[idx] = NULL; 5307 } 5308 } 5309 out: 5310 return err; 5311 } 5312 5313 static int 5314 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags, 5315 uint16_t len, const void *data) 5316 { 5317 struct iwx_host_cmd cmd = { 5318 .id = id, 5319 .len = { len, }, 5320 .data = { data, }, 5321 .flags = flags, 5322 }; 5323 5324 return iwx_send_cmd(sc, &cmd); 5325 } 5326 5327 static int 5328 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd, 5329 uint32_t *status) 5330 { 5331 struct iwx_rx_packet *pkt; 5332 struct iwx_cmd_response *resp; 5333 int err, resp_len; 5334 5335 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP")); 5336 cmd->flags |= IWX_CMD_WANT_RESP; 5337 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp); 5338 5339 err = iwx_send_cmd(sc, cmd); 5340 if (err) 5341 return err; 5342 5343 pkt = cmd->resp_pkt; 5344 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) 5345 return EIO; 5346 5347 resp_len = iwx_rx_packet_payload_len(pkt); 5348 if (resp_len != sizeof(*resp)) { 5349 iwx_free_resp(sc, cmd); 5350 return EIO; 5351 } 5352 5353 resp = (void *)pkt->data; 5354 *status = le32toh(resp->status); 5355 iwx_free_resp(sc, cmd); 5356 return err; 5357 } 5358 5359 static int 5360 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len, 5361 const void *data, uint32_t *status) 5362 { 5363 struct iwx_host_cmd cmd = { 5364 .id = id, 5365 .len = { len, }, 5366 .data = { data, }, 5367 }; 5368 5369 return iwx_send_cmd_status(sc, &cmd, status); 5370 } 5371 5372 static void 5373 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd) 5374 { 5375 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP, 5376 ("hcmd flags !IWX_CMD_WANT_RESP")); 5377 free(hcmd->resp_pkt, M_DEVBUF); 5378 hcmd->resp_pkt = NULL; 5379 } 5380 5381 static void 5382 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code) 5383 { 5384 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE]; 5385 struct iwx_tx_data *data; 5386 5387 if (qid != IWX_DQA_CMD_QUEUE) { 5388 return; /* Not a command ack. */ 5389 } 5390 5391 data = &ring->data[idx]; 5392 5393 if (data->m != NULL) { 5394 bus_dmamap_sync(ring->data_dmat, data->map, 5395 BUS_DMASYNC_POSTWRITE); 5396 bus_dmamap_unload(ring->data_dmat, data->map); 5397 m_freem(data->m); 5398 data->m = NULL; 5399 } 5400 wakeup(&ring->desc[idx]); 5401 5402 DPRINTF(("%s: command 0x%x done\n", __func__, code)); 5403 if (ring->queued == 0) { 5404 DPRINTF(("%s: unexpected firmware response to command 0x%x\n", 5405 DEVNAME(sc), code)); 5406 } else if (ring->queued > 0) 5407 ring->queued--; 5408 } 5409 5410 static uint32_t 5411 iwx_fw_rateidx_ofdm(uint8_t rval) 5412 { 5413 /* Firmware expects indices which match our 11a rate set. */ 5414 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a; 5415 int i; 5416 5417 for (i = 0; i < rs->rs_nrates; i++) { 5418 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 5419 return i; 5420 } 5421 5422 return 0; 5423 } 5424 5425 static uint32_t 5426 iwx_fw_rateidx_cck(uint8_t rval) 5427 { 5428 /* Firmware expects indices which match our 11b rate set. */ 5429 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b; 5430 int i; 5431 5432 for (i = 0; i < rs->rs_nrates; i++) { 5433 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 5434 return i; 5435 } 5436 5437 return 0; 5438 } 5439 5440 static int 5441 iwx_min_basic_rate(struct ieee80211com *ic) 5442 { 5443 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5444 struct ieee80211_node *ni = vap->iv_bss; 5445 struct ieee80211_rateset *rs = &ni->ni_rates; 5446 struct ieee80211_channel *c = ni->ni_chan; 5447 int i, min, rval; 5448 5449 min = -1; 5450 5451 if (c == IEEE80211_CHAN_ANYC) { 5452 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__); 5453 return -1; 5454 } 5455 5456 for (i = 0; i < rs->rs_nrates; i++) { 5457 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0) 5458 continue; 5459 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 5460 if (min == -1) 5461 min = rval; 5462 else if (rval < min) 5463 min = rval; 5464 } 5465 5466 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */ 5467 if (min == -1) 5468 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12; 5469 5470 return min; 5471 } 5472 5473 /* 5474 * Determine the Tx command flags and Tx rate+flags to use. 5475 * Return the selected Tx rate. 5476 */ 5477 static const struct iwx_rate * 5478 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in, 5479 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags, 5480 struct mbuf *m) 5481 { 5482 struct ieee80211com *ic = &sc->sc_ic; 5483 struct ieee80211_node *ni = &in->in_ni; 5484 struct ieee80211_rateset *rs = &ni->ni_rates; 5485 const struct iwx_rate *rinfo = NULL; 5486 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 5487 int ridx = iwx_min_basic_rate(ic); 5488 int min_ridx, rate_flags; 5489 uint8_t rval; 5490 5491 /* We're in the process of clearing the node, no channel already */ 5492 if (ridx == -1) 5493 return NULL; 5494 5495 min_ridx = iwx_rval2ridx(ridx); 5496 5497 *flags = 0; 5498 5499 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 5500 type != IEEE80211_FC0_TYPE_DATA) { 5501 /* for non-data, use the lowest supported rate */ 5502 ridx = min_ridx; 5503 *flags |= IWX_TX_FLAGS_CMD_RATE; 5504 } else if (ni->ni_flags & IEEE80211_NODE_VHT) { 5505 /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */ 5506 ridx = iwx_min_basic_rate(ic); 5507 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 5508 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni) 5509 & ~IEEE80211_RATE_MCS]; 5510 } else { 5511 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)] 5512 & IEEE80211_RATE_VAL); 5513 ridx = iwx_rval2ridx(rval); 5514 if (ridx < min_ridx) 5515 ridx = min_ridx; 5516 } 5517 5518 if (m->m_flags & M_EAPOL) 5519 *flags |= IWX_TX_FLAGS_HIGH_PRI; 5520 5521 rinfo = &iwx_rates[ridx]; 5522 5523 /* 5524 * Do not fill rate_n_flags if firmware controls the Tx rate. 5525 * For data frames we rely on Tx rate scaling in firmware by default. 5526 */ 5527 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) { 5528 *rate_n_flags = 0; 5529 return rinfo; 5530 } 5531 5532 /* 5533 * Forcing a CCK/OFDM legacy rate is important for management frames. 5534 * Association will only succeed if we do this correctly. 5535 */ 5536 5537 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx); 5538 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx); 5539 rate_flags = IWX_RATE_MCS_ANT_A_MSK; 5540 if (IWX_RIDX_IS_CCK(ridx)) { 5541 if (sc->sc_rate_n_flags_version >= 2) 5542 rate_flags |= IWX_RATE_MCS_CCK_MSK; 5543 else 5544 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1; 5545 } else if (sc->sc_rate_n_flags_version >= 2) 5546 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK; 5547 5548 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)] 5549 & IEEE80211_RATE_VAL); 5550 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__, 5551 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]); 5552 5553 if (sc->sc_rate_n_flags_version >= 2) { 5554 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) { 5555 rate_flags |= (iwx_fw_rateidx_ofdm(rval) & 5556 IWX_RATE_LEGACY_RATE_MSK); 5557 } else { 5558 rate_flags |= (iwx_fw_rateidx_cck(rval) & 5559 IWX_RATE_LEGACY_RATE_MSK); 5560 } 5561 } else 5562 rate_flags |= rinfo->plcp; 5563 5564 *rate_n_flags = rate_flags; 5565 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n", 5566 __func__, __LINE__,*flags); 5567 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n", 5568 __func__, __LINE__, *rate_n_flags); 5569 5570 if (sc->sc_debug & IWX_DEBUG_TXRATE) 5571 print_ratenflags(__func__, __LINE__, 5572 *rate_n_flags, sc->sc_rate_n_flags_version); 5573 5574 return rinfo; 5575 } 5576 5577 static void 5578 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq, 5579 int idx, uint16_t byte_cnt, uint16_t num_tbs) 5580 { 5581 uint8_t filled_tfd_size, num_fetch_chunks; 5582 uint16_t len = byte_cnt; 5583 uint16_t bc_ent; 5584 5585 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) + 5586 num_tbs * sizeof(struct iwx_tfh_tb); 5587 /* 5588 * filled_tfd_size contains the number of filled bytes in the TFD. 5589 * Dividing it by 64 will give the number of chunks to fetch 5590 * to SRAM- 0 for one chunk, 1 for 2 and so on. 5591 * If, for example, TFD contains only 3 TBs then 32 bytes 5592 * of the TFD are used, and only one chunk of 64 bytes should 5593 * be fetched 5594 */ 5595 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1; 5596 5597 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 5598 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr; 5599 /* Starting from AX210, the HW expects bytes */ 5600 bc_ent = htole16(len | (num_fetch_chunks << 14)); 5601 scd_bc_tbl[idx].tfd_offset = bc_ent; 5602 } else { 5603 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr; 5604 /* Before AX210, the HW expects DW */ 5605 len = howmany(len, 4); 5606 bc_ent = htole16(len | (num_fetch_chunks << 12)); 5607 scd_bc_tbl->tfd_offset[idx] = bc_ent; 5608 } 5609 5610 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE); 5611 } 5612 5613 static int 5614 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 5615 { 5616 struct ieee80211com *ic = &sc->sc_ic; 5617 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5618 struct iwx_node *in = (void *)ni; 5619 struct iwx_tx_ring *ring; 5620 struct iwx_tx_data *data; 5621 struct iwx_tfh_tfd *desc; 5622 struct iwx_device_cmd *cmd; 5623 struct ieee80211_frame *wh; 5624 struct ieee80211_key *k = NULL; 5625 const struct iwx_rate *rinfo; 5626 uint64_t paddr; 5627 u_int hdrlen; 5628 uint32_t rate_n_flags; 5629 uint16_t num_tbs, flags, offload_assist = 0; 5630 uint8_t type, subtype; 5631 int i, totlen, err, pad, qid; 5632 #define IWM_MAX_SCATTER 20 5633 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 5634 int nsegs; 5635 struct mbuf *m1; 5636 size_t txcmd_size; 5637 5638 IWX_ASSERT_LOCKED(sc); 5639 5640 wh = mtod(m, struct ieee80211_frame *); 5641 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 5642 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 5643 hdrlen = ieee80211_anyhdrsize(wh); 5644 5645 qid = sc->first_data_qid; 5646 5647 /* Put QoS frames on the data queue which maps to their TID. */ 5648 if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) { 5649 uint16_t qos = ieee80211_gettid(wh); 5650 uint8_t tid = qos & IEEE80211_QOS_TID; 5651 #if 0 5652 /* 5653 * XXX-THJ: TODO when we enable ba we need to manage the 5654 * mappings 5655 */ 5656 struct ieee80211_tx_ba *ba; 5657 ba = &ni->ni_tx_ba[tid]; 5658 5659 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 5660 type == IEEE80211_FC0_TYPE_DATA && 5661 subtype != IEEE80211_FC0_SUBTYPE_NODATA && 5662 subtype != IEEE80211_FC0_SUBTYPE_BAR && 5663 sc->aggqid[tid] != 0 /*&& 5664 ba->ba_state == IEEE80211_BA_AGREED*/) { 5665 qid = sc->aggqid[tid]; 5666 #else 5667 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 5668 type == IEEE80211_FC0_TYPE_DATA && 5669 subtype != IEEE80211_FC0_SUBTYPE_NODATA && 5670 sc->aggqid[tid] != 0) { 5671 qid = sc->aggqid[tid]; 5672 #endif 5673 } 5674 } 5675 5676 ring = &sc->txq[qid]; 5677 desc = &ring->desc[ring->cur]; 5678 memset(desc, 0, sizeof(*desc)); 5679 data = &ring->data[ring->cur]; 5680 5681 cmd = &ring->cmd[ring->cur]; 5682 cmd->hdr.code = IWX_TX_CMD; 5683 cmd->hdr.flags = 0; 5684 cmd->hdr.qid = ring->qid; 5685 cmd->hdr.idx = ring->cur; 5686 5687 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m); 5688 if (rinfo == NULL) 5689 return EINVAL; 5690 5691 /* Offloaded sequence number assignment; non-AMPDU case */ 5692 if ((m->m_flags & M_AMPDU_MPDU) == 0) 5693 ieee80211_output_seqno_assign(ni, -1, m); 5694 5695 /* Radiotap */ 5696 if (ieee80211_radiotap_active_vap(vap)) { 5697 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap; 5698 5699 tap->wt_flags = 0; 5700 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 5701 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 5702 tap->wt_rate = rinfo->rate; 5703 if (k != NULL) 5704 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 5705 ieee80211_radiotap_tx(vap, m); 5706 } 5707 5708 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */ 5709 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 5710 k = ieee80211_crypto_get_txkey(ni, m); 5711 if (k == NULL) { 5712 printf("%s: k is NULL!\n", __func__); 5713 m_freem(m); 5714 return (ENOBUFS); 5715 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 5716 k->wk_keytsc++; 5717 } else { 5718 k->wk_cipher->ic_encap(k, m); 5719 5720 /* 802.11 headers may have moved */ 5721 wh = mtod(m, struct ieee80211_frame *); 5722 flags |= IWX_TX_FLAGS_ENCRYPT_DIS; 5723 } 5724 } else 5725 flags |= IWX_TX_FLAGS_ENCRYPT_DIS; 5726 5727 totlen = m->m_pkthdr.len; 5728 5729 if (hdrlen & 3) { 5730 /* First segment length must be a multiple of 4. */ 5731 pad = 4 - (hdrlen & 3); 5732 offload_assist |= IWX_TX_CMD_OFFLD_PAD; 5733 } else 5734 pad = 0; 5735 5736 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 5737 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data; 5738 memset(tx, 0, sizeof(*tx)); 5739 tx->len = htole16(totlen); 5740 tx->offload_assist = htole32(offload_assist); 5741 tx->flags = htole16(flags); 5742 tx->rate_n_flags = htole32(rate_n_flags); 5743 memcpy(tx->hdr, wh, hdrlen); 5744 txcmd_size = sizeof(*tx); 5745 } else { 5746 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data; 5747 memset(tx, 0, sizeof(*tx)); 5748 tx->len = htole16(totlen); 5749 tx->offload_assist = htole16(offload_assist); 5750 tx->flags = htole32(flags); 5751 tx->rate_n_flags = htole32(rate_n_flags); 5752 memcpy(tx->hdr, wh, hdrlen); 5753 txcmd_size = sizeof(*tx); 5754 } 5755 5756 /* Trim 802.11 header. */ 5757 m_adj(m, hdrlen); 5758 5759 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 5760 &nsegs, BUS_DMA_NOWAIT); 5761 if (err && err != EFBIG) { 5762 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err); 5763 m_freem(m); 5764 return err; 5765 } 5766 if (err) { 5767 /* Too many DMA segments, linearize mbuf. */ 5768 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 5769 if (m1 == NULL) { 5770 printf("%s: could not defrag mbufs\n", __func__); 5771 m_freem(m); 5772 return (ENOBUFS); 5773 } 5774 m = m1; 5775 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 5776 segs, &nsegs, BUS_DMA_NOWAIT); 5777 if (err) { 5778 printf("%s: can't map mbuf (error %d)\n", __func__, 5779 err); 5780 m_freem(m); 5781 return (err); 5782 } 5783 } 5784 data->m = m; 5785 data->in = in; 5786 5787 /* Fill TX descriptor. */ 5788 num_tbs = 2 + nsegs; 5789 desc->num_tbs = htole16(num_tbs); 5790 5791 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE); 5792 paddr = htole64(data->cmd_paddr); 5793 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr)); 5794 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32) 5795 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__)); 5796 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) + 5797 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE); 5798 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE); 5799 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr)); 5800 5801 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32) 5802 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__)); 5803 5804 /* Other DMA segments are for data payload. */ 5805 for (i = 0; i < nsegs; i++) { 5806 seg = &segs[i]; 5807 desc->tbs[i + 2].tb_len = htole16(seg->ds_len); 5808 paddr = htole64(seg->ds_addr); 5809 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr)); 5810 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32) 5811 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2)); 5812 } 5813 5814 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 5815 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 5816 BUS_DMASYNC_PREWRITE); 5817 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5818 BUS_DMASYNC_PREWRITE); 5819 5820 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs); 5821 5822 /* Kick TX ring. */ 5823 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT; 5824 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size; 5825 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw); 5826 5827 /* Mark TX ring as full if we reach a certain threshold. */ 5828 if (++ring->queued > iwx_himark) { 5829 sc->qfullmsk |= 1 << ring->qid; 5830 } 5831 5832 sc->sc_tx_timer[ring->qid] = 15; 5833 5834 return 0; 5835 } 5836 5837 static int 5838 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids) 5839 { 5840 struct iwx_rx_packet *pkt; 5841 struct iwx_tx_path_flush_cmd_rsp *resp; 5842 struct iwx_tx_path_flush_cmd flush_cmd = { 5843 .sta_id = htole32(sta_id), 5844 .tid_mask = htole16(tids), 5845 }; 5846 struct iwx_host_cmd hcmd = { 5847 .id = IWX_TXPATH_FLUSH, 5848 .len = { sizeof(flush_cmd), }, 5849 .data = { &flush_cmd, }, 5850 .flags = IWX_CMD_WANT_RESP, 5851 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), 5852 }; 5853 int err, resp_len, i, num_flushed_queues; 5854 5855 err = iwx_send_cmd(sc, &hcmd); 5856 if (err) 5857 return err; 5858 5859 pkt = hcmd.resp_pkt; 5860 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 5861 err = EIO; 5862 goto out; 5863 } 5864 5865 resp_len = iwx_rx_packet_payload_len(pkt); 5866 /* Some firmware versions don't provide a response. */ 5867 if (resp_len == 0) 5868 goto out; 5869 else if (resp_len != sizeof(*resp)) { 5870 err = EIO; 5871 goto out; 5872 } 5873 5874 resp = (void *)pkt->data; 5875 5876 if (le16toh(resp->sta_id) != sta_id) { 5877 err = EIO; 5878 goto out; 5879 } 5880 5881 num_flushed_queues = le16toh(resp->num_flushed_queues); 5882 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) { 5883 err = EIO; 5884 goto out; 5885 } 5886 5887 for (i = 0; i < num_flushed_queues; i++) { 5888 struct iwx_flush_queue_info *queue_info = &resp->queues[i]; 5889 uint16_t tid = le16toh(queue_info->tid); 5890 uint16_t read_after = le16toh(queue_info->read_after_flush); 5891 uint16_t qid = le16toh(queue_info->queue_num); 5892 struct iwx_tx_ring *txq; 5893 5894 if (qid >= nitems(sc->txq)) 5895 continue; 5896 5897 txq = &sc->txq[qid]; 5898 if (tid != txq->tid) 5899 continue; 5900 5901 iwx_txq_advance(sc, txq, read_after); 5902 } 5903 out: 5904 iwx_free_resp(sc, &hcmd); 5905 return err; 5906 } 5907 5908 #define IWX_FLUSH_WAIT_MS 2000 5909 5910 static int 5911 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain) 5912 { 5913 struct iwx_add_sta_cmd cmd; 5914 int err; 5915 uint32_t status; 5916 5917 memset(&cmd, 0, sizeof(cmd)); 5918 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 5919 in->in_color)); 5920 cmd.sta_id = IWX_STATION_ID; 5921 cmd.add_modify = IWX_STA_MODE_MODIFY; 5922 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0; 5923 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW); 5924 5925 status = IWX_ADD_STA_SUCCESS; 5926 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, 5927 sizeof(cmd), &cmd, &status); 5928 if (err) { 5929 printf("%s: could not update sta (error %d)\n", 5930 DEVNAME(sc), err); 5931 return err; 5932 } 5933 5934 switch (status & IWX_ADD_STA_STATUS_MASK) { 5935 case IWX_ADD_STA_SUCCESS: 5936 break; 5937 default: 5938 err = EIO; 5939 printf("%s: Couldn't %s draining for station\n", 5940 DEVNAME(sc), drain ? "enable" : "disable"); 5941 break; 5942 } 5943 5944 return err; 5945 } 5946 5947 static int 5948 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in) 5949 { 5950 int err; 5951 5952 IWX_ASSERT_LOCKED(sc); 5953 5954 sc->sc_flags |= IWX_FLAG_TXFLUSH; 5955 5956 err = iwx_drain_sta(sc, in, 1); 5957 if (err) 5958 goto done; 5959 5960 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff); 5961 if (err) { 5962 printf("%s: could not flush Tx path (error %d)\n", 5963 DEVNAME(sc), err); 5964 goto done; 5965 } 5966 5967 /* 5968 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the 5969 * fc drive rand has has been replaced in OpenBSD. 5970 */ 5971 5972 err = iwx_drain_sta(sc, in, 0); 5973 done: 5974 sc->sc_flags &= ~IWX_FLAG_TXFLUSH; 5975 return err; 5976 } 5977 5978 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25 5979 5980 static int 5981 iwx_beacon_filter_send_cmd(struct iwx_softc *sc, 5982 struct iwx_beacon_filter_cmd *cmd) 5983 { 5984 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD, 5985 0, sizeof(struct iwx_beacon_filter_cmd), cmd); 5986 } 5987 5988 static int 5989 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable) 5990 { 5991 struct iwx_beacon_filter_cmd cmd = { 5992 IWX_BF_CMD_CONFIG_DEFAULTS, 5993 .bf_enable_beacon_filter = htole32(1), 5994 .ba_enable_beacon_abort = htole32(enable), 5995 }; 5996 5997 if (!sc->sc_bf.bf_enabled) 5998 return 0; 5999 6000 sc->sc_bf.ba_enabled = enable; 6001 return iwx_beacon_filter_send_cmd(sc, &cmd); 6002 } 6003 6004 static void 6005 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in, 6006 struct iwx_mac_power_cmd *cmd) 6007 { 6008 struct ieee80211com *ic = &sc->sc_ic; 6009 struct ieee80211_node *ni = &in->in_ni; 6010 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6011 int dtim_period, dtim_msec, keep_alive; 6012 6013 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 6014 in->in_color)); 6015 if (vap->iv_dtim_period) 6016 dtim_period = vap->iv_dtim_period; 6017 else 6018 dtim_period = 1; 6019 6020 /* 6021 * Regardless of power management state the driver must set 6022 * keep alive period. FW will use it for sending keep alive NDPs 6023 * immediately after association. Check that keep alive period 6024 * is at least 3 * DTIM. 6025 */ 6026 dtim_msec = dtim_period * ni->ni_intval; 6027 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC); 6028 keep_alive = roundup(keep_alive, 1000) / 1000; 6029 cmd->keep_alive_seconds = htole16(keep_alive); 6030 6031 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6032 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK); 6033 } 6034 6035 static int 6036 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in) 6037 { 6038 int err; 6039 int ba_enable; 6040 struct iwx_mac_power_cmd cmd; 6041 6042 memset(&cmd, 0, sizeof(cmd)); 6043 6044 iwx_power_build_cmd(sc, in, &cmd); 6045 6046 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0, 6047 sizeof(cmd), &cmd); 6048 if (err != 0) 6049 return err; 6050 6051 ba_enable = !!(cmd.flags & 6052 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); 6053 return iwx_update_beacon_abort(sc, in, ba_enable); 6054 } 6055 6056 static int 6057 iwx_power_update_device(struct iwx_softc *sc) 6058 { 6059 struct iwx_device_power_cmd cmd = { }; 6060 struct ieee80211com *ic = &sc->sc_ic; 6061 6062 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6063 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); 6064 6065 return iwx_send_cmd_pdu(sc, 6066 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd); 6067 } 6068 #if 0 6069 static int 6070 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in) 6071 { 6072 struct iwx_beacon_filter_cmd cmd = { 6073 IWX_BF_CMD_CONFIG_DEFAULTS, 6074 .bf_enable_beacon_filter = htole32(1), 6075 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled), 6076 }; 6077 int err; 6078 6079 err = iwx_beacon_filter_send_cmd(sc, &cmd); 6080 if (err == 0) 6081 sc->sc_bf.bf_enabled = 1; 6082 6083 return err; 6084 } 6085 #endif 6086 static int 6087 iwx_disable_beacon_filter(struct iwx_softc *sc) 6088 { 6089 struct iwx_beacon_filter_cmd cmd; 6090 int err; 6091 6092 memset(&cmd, 0, sizeof(cmd)); 6093 6094 err = iwx_beacon_filter_send_cmd(sc, &cmd); 6095 if (err == 0) 6096 sc->sc_bf.bf_enabled = 0; 6097 6098 return err; 6099 } 6100 6101 static int 6102 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update) 6103 { 6104 struct iwx_add_sta_cmd add_sta_cmd; 6105 int err, i; 6106 uint32_t status, aggsize; 6107 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >> 6108 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT); 6109 struct ieee80211com *ic = &sc->sc_ic; 6110 struct ieee80211_node *ni = &in->in_ni; 6111 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 6112 6113 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE)) 6114 panic("STA already added"); 6115 6116 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd)); 6117 6118 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6119 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID; 6120 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE; 6121 } else { 6122 add_sta_cmd.sta_id = IWX_STATION_ID; 6123 add_sta_cmd.station_type = IWX_STA_LINK; 6124 } 6125 add_sta_cmd.mac_id_n_color 6126 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)); 6127 if (!update) { 6128 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6129 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, 6130 etheranyaddr); 6131 else 6132 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, 6133 in->in_macaddr); 6134 } 6135 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__, 6136 ether_sprintf(add_sta_cmd.addr))); 6137 add_sta_cmd.add_modify = update ? 1 : 0; 6138 add_sta_cmd.station_flags_msk 6139 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK); 6140 6141 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) { 6142 add_sta_cmd.station_flags_msk 6143 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK | 6144 IWX_STA_FLG_AGG_MPDU_DENS_MSK); 6145 6146 if (iwx_mimo_enabled(sc)) { 6147 if (ni->ni_flags & IEEE80211_NODE_VHT) { 6148 add_sta_cmd.station_flags |= 6149 htole32(IWX_STA_FLG_MIMO_EN_MIMO2); 6150 } else { 6151 int hasmimo = 0; 6152 for (i = 0; i < htrs->rs_nrates; i++) { 6153 if (htrs->rs_rates[i] > 7) { 6154 hasmimo = 1; 6155 break; 6156 } 6157 } 6158 if (hasmimo) { 6159 add_sta_cmd.station_flags |= 6160 htole32(IWX_STA_FLG_MIMO_EN_MIMO2); 6161 } 6162 } 6163 } 6164 6165 if (ni->ni_flags & IEEE80211_NODE_HT && 6166 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6167 add_sta_cmd.station_flags |= htole32( 6168 IWX_STA_FLG_FAT_EN_40MHZ); 6169 } 6170 6171 6172 if (ni->ni_flags & IEEE80211_NODE_VHT) { 6173 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) { 6174 add_sta_cmd.station_flags |= htole32( 6175 IWX_STA_FLG_FAT_EN_80MHZ); 6176 } 6177 // XXX-misha: TODO get real ampdu size 6178 aggsize = max_aggsize; 6179 } else { 6180 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam), 6181 IEEE80211_HTCAP_MAXRXAMPDU); 6182 } 6183 6184 if (aggsize > max_aggsize) 6185 aggsize = max_aggsize; 6186 add_sta_cmd.station_flags |= htole32((aggsize << 6187 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) & 6188 IWX_STA_FLG_MAX_AGG_SIZE_MSK); 6189 6190 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam), 6191 IEEE80211_HTCAP_MPDUDENSITY)) { 6192 case IEEE80211_HTCAP_MPDUDENSITY_2: 6193 add_sta_cmd.station_flags 6194 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US); 6195 break; 6196 case IEEE80211_HTCAP_MPDUDENSITY_4: 6197 add_sta_cmd.station_flags 6198 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US); 6199 break; 6200 case IEEE80211_HTCAP_MPDUDENSITY_8: 6201 add_sta_cmd.station_flags 6202 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US); 6203 break; 6204 case IEEE80211_HTCAP_MPDUDENSITY_16: 6205 add_sta_cmd.station_flags 6206 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US); 6207 break; 6208 default: 6209 break; 6210 } 6211 } 6212 6213 status = IWX_ADD_STA_SUCCESS; 6214 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd), 6215 &add_sta_cmd, &status); 6216 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 6217 err = EIO; 6218 6219 return err; 6220 } 6221 6222 static int 6223 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in) 6224 { 6225 struct ieee80211com *ic = &sc->sc_ic; 6226 struct iwx_rm_sta_cmd rm_sta_cmd; 6227 int err; 6228 6229 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0) 6230 panic("sta already removed"); 6231 6232 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 6233 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6234 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID; 6235 else 6236 rm_sta_cmd.sta_id = IWX_STATION_ID; 6237 6238 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd), 6239 &rm_sta_cmd); 6240 6241 return err; 6242 } 6243 6244 static int 6245 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in) 6246 { 6247 int err, i, cmd_ver; 6248 6249 err = iwx_flush_sta(sc, in); 6250 if (err) { 6251 printf("%s: could not flush Tx path (error %d)\n", 6252 DEVNAME(sc), err); 6253 return err; 6254 } 6255 6256 /* 6257 * New SCD_QUEUE_CONFIG API requires explicit queue removal 6258 * before a station gets removed. 6259 */ 6260 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 6261 IWX_SCD_QUEUE_CONFIG_CMD); 6262 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) { 6263 err = iwx_disable_mgmt_queue(sc); 6264 if (err) 6265 return err; 6266 for (i = IWX_FIRST_AGG_TX_QUEUE; 6267 i < IWX_LAST_AGG_TX_QUEUE; i++) { 6268 struct iwx_tx_ring *ring = &sc->txq[i]; 6269 if ((sc->qenablemsk & (1 << i)) == 0) 6270 continue; 6271 err = iwx_disable_txq(sc, IWX_STATION_ID, 6272 ring->qid, ring->tid); 6273 if (err) { 6274 printf("%s: could not disable Tx queue %d " 6275 "(error %d)\n", DEVNAME(sc), ring->qid, 6276 err); 6277 return err; 6278 } 6279 } 6280 } 6281 6282 err = iwx_rm_sta_cmd(sc, in); 6283 if (err) { 6284 printf("%s: could not remove STA (error %d)\n", 6285 DEVNAME(sc), err); 6286 return err; 6287 } 6288 6289 in->in_flags = 0; 6290 6291 sc->sc_rx_ba_sessions = 0; 6292 sc->ba_rx.start_tidmask = 0; 6293 sc->ba_rx.stop_tidmask = 0; 6294 memset(sc->aggqid, 0, sizeof(sc->aggqid)); 6295 sc->ba_tx.start_tidmask = 0; 6296 sc->ba_tx.stop_tidmask = 0; 6297 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++) 6298 sc->qenablemsk &= ~(1 << i); 6299 6300 #if 0 6301 for (i = 0; i < IEEE80211_NUM_TID; i++) { 6302 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i]; 6303 if (ba->ba_state != IEEE80211_BA_AGREED) 6304 continue; 6305 ieee80211_delba_request(ic, ni, 0, 1, i); 6306 } 6307 #endif 6308 /* Clear ampdu rx state (GOS-1525) */ 6309 for (i = 0; i < IWX_MAX_TID_COUNT; i++) { 6310 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i]; 6311 ba->ba_flags = 0; 6312 } 6313 6314 return 0; 6315 } 6316 6317 static uint8_t 6318 iwx_umac_scan_fill_channels(struct iwx_softc *sc, 6319 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems, 6320 int n_ssids, uint32_t channel_cfg_flags) 6321 { 6322 struct ieee80211com *ic = &sc->sc_ic; 6323 struct ieee80211_scan_state *ss = ic->ic_scan; 6324 struct ieee80211_channel *c; 6325 uint8_t nchan; 6326 int j; 6327 6328 for (nchan = j = 0; 6329 j < ss->ss_last && 6330 nchan < sc->sc_capa_n_scan_channels; 6331 j++) { 6332 uint8_t channel_num; 6333 6334 c = ss->ss_chans[j]; 6335 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0); 6336 if (isset(sc->sc_ucode_api, 6337 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) { 6338 chan->v2.channel_num = channel_num; 6339 if (IEEE80211_IS_CHAN_2GHZ(c)) 6340 chan->v2.band = IWX_PHY_BAND_24; 6341 else 6342 chan->v2.band = IWX_PHY_BAND_5; 6343 chan->v2.iter_count = 1; 6344 chan->v2.iter_interval = 0; 6345 } else { 6346 chan->v1.channel_num = channel_num; 6347 chan->v1.iter_count = 1; 6348 chan->v1.iter_interval = htole16(0); 6349 } 6350 chan->flags |= htole32(channel_cfg_flags); 6351 chan++; 6352 nchan++; 6353 } 6354 6355 return nchan; 6356 } 6357 6358 static int 6359 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq) 6360 { 6361 struct ieee80211com *ic = &sc->sc_ic; 6362 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6363 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf; 6364 struct ieee80211_rateset *rs; 6365 size_t remain = sizeof(preq->buf); 6366 uint8_t *frm, *pos; 6367 6368 memset(preq, 0, sizeof(*preq)); 6369 6370 if (remain < sizeof(*wh) + 2) 6371 return ENOBUFS; 6372 6373 /* 6374 * Build a probe request frame. Most of the following code is a 6375 * copy & paste of what is done in net80211. 6376 */ 6377 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6378 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6379 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6380 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 6381 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr); 6382 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 6383 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6384 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6385 6386 frm = (uint8_t *)(wh + 1); 6387 *frm++ = IEEE80211_ELEMID_SSID; 6388 *frm++ = 0; 6389 /* hardware inserts SSID */ 6390 6391 /* Tell the firmware where the MAC header is. */ 6392 preq->mac_header.offset = 0; 6393 preq->mac_header.len = htole16(frm - (uint8_t *)wh); 6394 remain -= frm - (uint8_t *)wh; 6395 6396 /* Fill in 2GHz IEs and tell firmware where they are. */ 6397 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6398 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 6399 if (remain < 4 + rs->rs_nrates) 6400 return ENOBUFS; 6401 } else if (remain < 2 + rs->rs_nrates) 6402 return ENOBUFS; 6403 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh); 6404 pos = frm; 6405 frm = ieee80211_add_rates(frm, rs); 6406 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6407 frm = ieee80211_add_xrates(frm, rs); 6408 remain -= frm - pos; 6409 6410 if (isset(sc->sc_enabled_capa, 6411 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) { 6412 if (remain < 3) 6413 return ENOBUFS; 6414 *frm++ = IEEE80211_ELEMID_DSPARMS; 6415 *frm++ = 1; 6416 *frm++ = 0; 6417 remain -= 3; 6418 } 6419 preq->band_data[0].len = htole16(frm - pos); 6420 6421 if (sc->sc_nvm.sku_cap_band_52GHz_enable) { 6422 /* Fill in 5GHz IEs. */ 6423 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6424 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 6425 if (remain < 4 + rs->rs_nrates) 6426 return ENOBUFS; 6427 } else if (remain < 2 + rs->rs_nrates) 6428 return ENOBUFS; 6429 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh); 6430 pos = frm; 6431 frm = ieee80211_add_rates(frm, rs); 6432 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6433 frm = ieee80211_add_xrates(frm, rs); 6434 preq->band_data[1].len = htole16(frm - pos); 6435 remain -= frm - pos; 6436 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) { 6437 if (remain < 14) 6438 return ENOBUFS; 6439 frm = ieee80211_add_vhtcap(frm, vap->iv_bss); 6440 remain -= frm - pos; 6441 preq->band_data[1].len = htole16(frm - pos); 6442 } 6443 } 6444 6445 /* Send 11n IEs on both 2GHz and 5GHz bands. */ 6446 preq->common_data.offset = htole16(frm - (uint8_t *)wh); 6447 pos = frm; 6448 if (vap->iv_flags_ht & IEEE80211_FHT_HT) { 6449 if (remain < 28) 6450 return ENOBUFS; 6451 frm = ieee80211_add_htcap(frm, vap->iv_bss); 6452 /* XXX add WME info? */ 6453 remain -= frm - pos; 6454 } 6455 6456 preq->common_data.len = htole16(frm - pos); 6457 6458 return 0; 6459 } 6460 6461 static int 6462 iwx_config_umac_scan_reduced(struct iwx_softc *sc) 6463 { 6464 struct iwx_scan_config scan_cfg; 6465 struct iwx_host_cmd hcmd = { 6466 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0), 6467 .len[0] = sizeof(scan_cfg), 6468 .data[0] = &scan_cfg, 6469 .flags = 0, 6470 }; 6471 int cmdver; 6472 6473 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) { 6474 printf("%s: firmware does not support reduced scan config\n", 6475 DEVNAME(sc)); 6476 return ENOTSUP; 6477 } 6478 6479 memset(&scan_cfg, 0, sizeof(scan_cfg)); 6480 6481 /* 6482 * SCAN_CFG version >= 5 implies that the broadcast 6483 * STA ID field is deprecated. 6484 */ 6485 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD); 6486 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5) 6487 scan_cfg.bcast_sta_id = 0xff; 6488 6489 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc)); 6490 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc)); 6491 6492 return iwx_send_cmd(sc, &hcmd); 6493 } 6494 6495 static uint16_t 6496 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan) 6497 { 6498 struct ieee80211com *ic = &sc->sc_ic; 6499 struct ieee80211_scan_state *ss = ic->ic_scan; 6500 uint16_t flags = 0; 6501 6502 if (ss->ss_nssid == 0) { 6503 DPRINTF(("%s: Passive scan started\n", __func__)); 6504 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; 6505 } 6506 6507 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL; 6508 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; 6509 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL; 6510 6511 return flags; 6512 } 6513 6514 #define IWX_SCAN_DWELL_ACTIVE 10 6515 #define IWX_SCAN_DWELL_PASSIVE 110 6516 6517 /* adaptive dwell max budget time [TU] for full scan */ 6518 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 6519 /* adaptive dwell max budget time [TU] for directed scan */ 6520 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 6521 /* adaptive dwell default high band APs number */ 6522 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8 6523 /* adaptive dwell default low band APs number */ 6524 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2 6525 /* adaptive dwell default APs number in social channels (1, 6, 11) */ 6526 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 6527 /* adaptive dwell number of APs override for p2p friendly GO channels */ 6528 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10 6529 /* adaptive dwell number of APs override for social channels */ 6530 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 6531 6532 static void 6533 iwx_scan_umac_dwell_v10(struct iwx_softc *sc, 6534 struct iwx_scan_general_params_v10 *general_params, int bgscan) 6535 { 6536 uint32_t suspend_time, max_out_time; 6537 uint8_t active_dwell, passive_dwell; 6538 6539 active_dwell = IWX_SCAN_DWELL_ACTIVE; 6540 passive_dwell = IWX_SCAN_DWELL_PASSIVE; 6541 6542 general_params->adwell_default_social_chn = 6543 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; 6544 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS; 6545 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS; 6546 6547 if (bgscan) 6548 general_params->adwell_max_budget = 6549 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); 6550 else 6551 general_params->adwell_max_budget = 6552 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); 6553 6554 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6); 6555 if (bgscan) { 6556 max_out_time = htole32(120); 6557 suspend_time = htole32(120); 6558 } else { 6559 max_out_time = htole32(0); 6560 suspend_time = htole32(0); 6561 } 6562 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] = 6563 htole32(max_out_time); 6564 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] = 6565 htole32(suspend_time); 6566 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] = 6567 htole32(max_out_time); 6568 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] = 6569 htole32(suspend_time); 6570 6571 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell; 6572 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell; 6573 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell; 6574 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell; 6575 } 6576 6577 static void 6578 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc, 6579 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan) 6580 { 6581 iwx_scan_umac_dwell_v10(sc, gp, bgscan); 6582 6583 gp->flags = htole16(gen_flags); 6584 6585 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) 6586 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3; 6587 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) 6588 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3; 6589 6590 gp->scan_start_mac_id = 0; 6591 } 6592 6593 static void 6594 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc, 6595 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags, 6596 int n_ssid) 6597 { 6598 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER; 6599 6600 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config, 6601 nitems(cp->channel_config), n_ssid, channel_cfg_flags); 6602 6603 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY; 6604 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS; 6605 } 6606 6607 static int 6608 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan) 6609 { 6610 struct ieee80211com *ic = &sc->sc_ic; 6611 struct ieee80211_scan_state *ss = ic->ic_scan; 6612 struct iwx_host_cmd hcmd = { 6613 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0), 6614 .len = { 0, }, 6615 .data = { NULL, }, 6616 .flags = 0, 6617 }; 6618 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd; 6619 struct iwx_scan_req_params_v14 *scan_p; 6620 int err, async = bgscan, n_ssid = 0; 6621 uint16_t gen_flags; 6622 uint32_t bitmap_ssid = 0; 6623 6624 IWX_ASSERT_LOCKED(sc); 6625 6626 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14)); 6627 6628 scan_p = &cmd->scan_params; 6629 6630 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6); 6631 cmd->uid = htole32(0); 6632 6633 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan); 6634 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params, 6635 gen_flags, bgscan); 6636 6637 scan_p->periodic_params.schedule[0].interval = htole16(0); 6638 scan_p->periodic_params.schedule[0].iter_count = 1; 6639 6640 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq); 6641 if (err) { 6642 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__, 6643 err); 6644 return err; 6645 } 6646 6647 for (int i=0; i < ss->ss_nssid; i++) { 6648 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID; 6649 scan_p->probe_params.direct_scan[i].len = 6650 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 6651 DPRINTF(("%s: Active scan started for ssid ", __func__)); 6652 memcpy(scan_p->probe_params.direct_scan[i].ssid, 6653 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len); 6654 n_ssid++; 6655 bitmap_ssid |= (1 << i); 6656 } 6657 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid)); 6658 6659 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid, 6660 n_ssid); 6661 6662 hcmd.len[0] = sizeof(*cmd); 6663 hcmd.data[0] = (void *)cmd; 6664 hcmd.flags |= async ? IWX_CMD_ASYNC : 0; 6665 6666 err = iwx_send_cmd(sc, &hcmd); 6667 return err; 6668 } 6669 6670 static void 6671 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif) 6672 { 6673 char alpha2[3]; 6674 6675 snprintf(alpha2, sizeof(alpha2), "%c%c", 6676 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff); 6677 6678 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' " 6679 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc)); 6680 6681 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */ 6682 } 6683 6684 uint8_t 6685 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx) 6686 { 6687 int i; 6688 uint8_t rval; 6689 6690 for (i = 0; i < rs->rs_nrates; i++) { 6691 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 6692 if (rval == iwx_rates[ridx].rate) 6693 return rs->rs_rates[i]; 6694 } 6695 6696 return 0; 6697 } 6698 6699 static int 6700 iwx_rval2ridx(int rval) 6701 { 6702 int ridx; 6703 6704 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) { 6705 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP) 6706 continue; 6707 if (rval == iwx_rates[ridx].rate) 6708 break; 6709 } 6710 6711 return ridx; 6712 } 6713 6714 static void 6715 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates, 6716 int *ofdm_rates) 6717 { 6718 struct ieee80211_node *ni = &in->in_ni; 6719 struct ieee80211_rateset *rs = &ni->ni_rates; 6720 int lowest_present_ofdm = -1; 6721 int lowest_present_cck = -1; 6722 uint8_t cck = 0; 6723 uint8_t ofdm = 0; 6724 int i; 6725 6726 if (ni->ni_chan == IEEE80211_CHAN_ANYC || 6727 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { 6728 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) { 6729 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0) 6730 continue; 6731 cck |= (1 << i); 6732 if (lowest_present_cck == -1 || lowest_present_cck > i) 6733 lowest_present_cck = i; 6734 } 6735 } 6736 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) { 6737 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0) 6738 continue; 6739 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE)); 6740 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i) 6741 lowest_present_ofdm = i; 6742 } 6743 6744 /* 6745 * Now we've got the basic rates as bitmaps in the ofdm and cck 6746 * variables. This isn't sufficient though, as there might not 6747 * be all the right rates in the bitmap. E.g. if the only basic 6748 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps 6749 * and 6 Mbps because the 802.11-2007 standard says in 9.6: 6750 * 6751 * [...] a STA responding to a received frame shall transmit 6752 * its Control Response frame [...] at the highest rate in the 6753 * BSSBasicRateSet parameter that is less than or equal to the 6754 * rate of the immediately previous frame in the frame exchange 6755 * sequence ([...]) and that is of the same modulation class 6756 * ([...]) as the received frame. If no rate contained in the 6757 * BSSBasicRateSet parameter meets these conditions, then the 6758 * control frame sent in response to a received frame shall be 6759 * transmitted at the highest mandatory rate of the PHY that is 6760 * less than or equal to the rate of the received frame, and 6761 * that is of the same modulation class as the received frame. 6762 * 6763 * As a consequence, we need to add all mandatory rates that are 6764 * lower than all of the basic rates to these bitmaps. 6765 */ 6766 6767 if (IWX_RATE_24M_INDEX < lowest_present_ofdm) 6768 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE; 6769 if (IWX_RATE_12M_INDEX < lowest_present_ofdm) 6770 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE; 6771 /* 6M already there or needed so always add */ 6772 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE; 6773 6774 /* 6775 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. 6776 * Note, however: 6777 * - if no CCK rates are basic, it must be ERP since there must 6778 * be some basic rates at all, so they're OFDM => ERP PHY 6779 * (or we're in 5 GHz, and the cck bitmap will never be used) 6780 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M 6781 * - if 5.5M is basic, 1M and 2M are mandatory 6782 * - if 2M is basic, 1M is mandatory 6783 * - if 1M is basic, that's the only valid ACK rate. 6784 * As a consequence, it's not as complicated as it sounds, just add 6785 * any lower rates to the ACK rate bitmap. 6786 */ 6787 if (IWX_RATE_11M_INDEX < lowest_present_cck) 6788 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE; 6789 if (IWX_RATE_5M_INDEX < lowest_present_cck) 6790 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE; 6791 if (IWX_RATE_2M_INDEX < lowest_present_cck) 6792 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE; 6793 /* 1M already there or needed so always add */ 6794 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE; 6795 6796 *cck_rates = cck; 6797 *ofdm_rates = ofdm; 6798 } 6799 6800 static void 6801 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in, 6802 struct iwx_mac_ctx_cmd *cmd, uint32_t action) 6803 { 6804 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6805 struct ieee80211com *ic = &sc->sc_ic; 6806 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6807 struct ieee80211_node *ni = vap->iv_bss; 6808 int cck_ack_rates, ofdm_ack_rates; 6809 6810 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, 6811 in->in_color)); 6812 cmd->action = htole32(action); 6813 6814 if (action == IWX_FW_CTXT_ACTION_REMOVE) 6815 return; 6816 6817 if (ic->ic_opmode == IEEE80211_M_MONITOR) 6818 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER); 6819 else if (ic->ic_opmode == IEEE80211_M_STA) 6820 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA); 6821 else 6822 panic("unsupported operating mode %d", ic->ic_opmode); 6823 cmd->tsf_id = htole32(IWX_TSF_ID_A); 6824 6825 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr); 6826 DPRINTF(("%s: cmd->node_addr=%s\n", __func__, 6827 ether_sprintf(cmd->node_addr))); 6828 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6829 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr); 6830 return; 6831 } 6832 6833 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr); 6834 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__, 6835 ether_sprintf(cmd->bssid_addr))); 6836 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates); 6837 cmd->cck_rates = htole32(cck_ack_rates); 6838 cmd->ofdm_rates = htole32(ofdm_ack_rates); 6839 6840 cmd->cck_short_preamble 6841 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6842 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0); 6843 cmd->short_slot 6844 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT) 6845 ? IWX_MAC_FLG_SHORT_SLOT : 0); 6846 6847 struct chanAccParams chp; 6848 ieee80211_wme_vap_getparams(vap, &chp); 6849 6850 for (int i = 0; i < WME_NUM_AC; i++) { 6851 int txf = iwx_ac_to_tx_fifo[i]; 6852 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin); 6853 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax); 6854 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn; 6855 cmd->ac[txf].fifos_mask = (1 << txf); 6856 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit; 6857 6858 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32); 6859 } 6860 6861 if (ni->ni_flags & IEEE80211_NODE_QOS) { 6862 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__)); 6863 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA); 6864 } 6865 6866 if (ni->ni_flags & IEEE80211_NODE_HT) { 6867 switch (vap->iv_curhtprotmode) { 6868 case IEEE80211_HTINFO_OPMODE_PURE: 6869 break; 6870 case IEEE80211_HTINFO_OPMODE_PROTOPT: 6871 case IEEE80211_HTINFO_OPMODE_MIXED: 6872 cmd->protection_flags |= 6873 htole32(IWX_MAC_PROT_FLG_HT_PROT | 6874 IWX_MAC_PROT_FLG_FAT_PROT); 6875 break; 6876 case IEEE80211_HTINFO_OPMODE_HT20PR: 6877 if (in->in_phyctxt && 6878 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE || 6879 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) { 6880 cmd->protection_flags |= 6881 htole32(IWX_MAC_PROT_FLG_HT_PROT | 6882 IWX_MAC_PROT_FLG_FAT_PROT); 6883 } 6884 break; 6885 default: 6886 break; 6887 } 6888 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN); 6889 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__)); 6890 } 6891 6892 if (ic->ic_flags & IEEE80211_F_USEPROT) 6893 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT); 6894 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP); 6895 #undef IWX_EXP2 6896 } 6897 6898 static void 6899 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in, 6900 struct iwx_mac_data_sta *sta, int assoc) 6901 { 6902 struct ieee80211_node *ni = &in->in_ni; 6903 struct ieee80211com *ic = &sc->sc_ic; 6904 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6905 uint32_t dtim_off; 6906 uint64_t tsf; 6907 int dtim_period; 6908 6909 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU; 6910 tsf = le64toh(ni->ni_tstamp.tsf); 6911 dtim_period = vap->iv_dtim_period; 6912 6913 sta->is_assoc = htole32(assoc); 6914 6915 if (assoc) { 6916 sta->dtim_time = htole32(tsf + dtim_off); 6917 sta->dtim_tsf = htole64(tsf + dtim_off); 6918 // XXX: unset in iwm 6919 sta->assoc_beacon_arrive_time = 0; 6920 } 6921 sta->bi = htole32(ni->ni_intval); 6922 sta->dtim_interval = htole32(ni->ni_intval * dtim_period); 6923 sta->data_policy = htole32(0); 6924 sta->listen_interval = htole32(10); 6925 sta->assoc_id = htole32(ni->ni_associd); 6926 } 6927 6928 static int 6929 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action, 6930 int assoc) 6931 { 6932 struct ieee80211com *ic = &sc->sc_ic; 6933 struct ieee80211_node *ni = &in->in_ni; 6934 struct iwx_mac_ctx_cmd cmd; 6935 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE); 6936 6937 if (action == IWX_FW_CTXT_ACTION_ADD && active) 6938 panic("MAC already added"); 6939 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active) 6940 panic("MAC already removed"); 6941 6942 memset(&cmd, 0, sizeof(cmd)); 6943 6944 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action); 6945 6946 if (action == IWX_FW_CTXT_ACTION_REMOVE) { 6947 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, 6948 sizeof(cmd), &cmd); 6949 } 6950 6951 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6952 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC | 6953 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT | 6954 IWX_MAC_FILTER_ACCEPT_GRP | 6955 IWX_MAC_FILTER_IN_BEACON | 6956 IWX_MAC_FILTER_IN_PROBE_REQUEST | 6957 IWX_MAC_FILTER_IN_CRC32); 6958 // XXX: dtim period is in vap 6959 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) { 6960 /* 6961 * Allow beacons to pass through as long as we are not 6962 * associated or we do not have dtim period information. 6963 */ 6964 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON); 6965 } 6966 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc); 6967 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd); 6968 } 6969 6970 static int 6971 iwx_clear_statistics(struct iwx_softc *sc) 6972 { 6973 struct iwx_statistics_cmd scmd = { 6974 .flags = htole32(IWX_STATISTICS_FLG_CLEAR) 6975 }; 6976 struct iwx_host_cmd cmd = { 6977 .id = IWX_STATISTICS_CMD, 6978 .len[0] = sizeof(scmd), 6979 .data[0] = &scmd, 6980 .flags = IWX_CMD_WANT_RESP, 6981 .resp_pkt_len = sizeof(struct iwx_notif_statistics), 6982 }; 6983 int err; 6984 6985 err = iwx_send_cmd(sc, &cmd); 6986 if (err) 6987 return err; 6988 6989 iwx_free_resp(sc, &cmd); 6990 return 0; 6991 } 6992 6993 static int 6994 iwx_scan(struct iwx_softc *sc) 6995 { 6996 int err; 6997 err = iwx_umac_scan_v14(sc, 0); 6998 6999 if (err) { 7000 printf("%s: could not initiate scan\n", DEVNAME(sc)); 7001 return err; 7002 } 7003 return 0; 7004 } 7005 7006 static int 7007 iwx_bgscan(struct ieee80211com *ic) 7008 { 7009 struct iwx_softc *sc = ic->ic_softc; 7010 int err; 7011 7012 err = iwx_umac_scan_v14(sc, 1); 7013 if (err) { 7014 printf("%s: could not initiate scan\n", DEVNAME(sc)); 7015 return err; 7016 } 7017 return 0; 7018 } 7019 7020 static int 7021 iwx_enable_mgmt_queue(struct iwx_softc *sc) 7022 { 7023 int err; 7024 7025 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1; 7026 7027 /* 7028 * Non-QoS frames use the "MGMT" TID and queue. 7029 * Other TIDs and data queues are reserved for QoS data frames. 7030 */ 7031 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid, 7032 IWX_MGMT_TID, IWX_TX_RING_COUNT); 7033 if (err) { 7034 printf("%s: could not enable Tx queue %d (error %d)\n", 7035 DEVNAME(sc), sc->first_data_qid, err); 7036 return err; 7037 } 7038 7039 return 0; 7040 } 7041 7042 static int 7043 iwx_disable_mgmt_queue(struct iwx_softc *sc) 7044 { 7045 int err, cmd_ver; 7046 7047 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */ 7048 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7049 IWX_SCD_QUEUE_CONFIG_CMD); 7050 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) 7051 return 0; 7052 7053 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1; 7054 7055 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid, 7056 IWX_MGMT_TID); 7057 if (err) { 7058 printf("%s: could not disable Tx queue %d (error %d)\n", 7059 DEVNAME(sc), sc->first_data_qid, err); 7060 return err; 7061 } 7062 7063 return 0; 7064 } 7065 7066 static int 7067 iwx_rs_rval2idx(uint8_t rval) 7068 { 7069 /* Firmware expects indices which match our 11g rate set. */ 7070 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g; 7071 int i; 7072 7073 for (i = 0; i < rs->rs_nrates; i++) { 7074 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval) 7075 return i; 7076 } 7077 7078 return -1; 7079 } 7080 7081 static uint16_t 7082 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx) 7083 { 7084 uint16_t htrates = 0; 7085 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 7086 int i; 7087 7088 if (rsidx == IEEE80211_HT_RATESET_SISO) { 7089 for (i = 0; i < htrs->rs_nrates; i++) { 7090 if (htrs->rs_rates[i] <= 7) 7091 htrates |= (1 << htrs->rs_rates[i]); 7092 } 7093 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) { 7094 for (i = 0; i < htrs->rs_nrates; i++) { 7095 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15) 7096 htrates |= (1 << (htrs->rs_rates[i] - 8)); 7097 } 7098 } else 7099 panic(("iwx_rs_ht_rates")); 7100 7101 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, 7102 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates); 7103 7104 return htrates; 7105 } 7106 7107 uint16_t 7108 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss) 7109 { 7110 uint16_t rx_mcs; 7111 int max_mcs = -1; 7112 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1))) 7113 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1)) 7114 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map & 7115 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >> 7116 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss); 7117 7118 switch (rx_mcs) { 7119 case IEEE80211_VHT_MCS_NOT_SUPPORTED: 7120 break; 7121 case IEEE80211_VHT_MCS_SUPPORT_0_7: 7122 max_mcs = 7; 7123 break; 7124 case IEEE80211_VHT_MCS_SUPPORT_0_8: 7125 max_mcs = 8; 7126 break; 7127 case IEEE80211_VHT_MCS_SUPPORT_0_9: 7128 /* Disable VHT MCS 9 for 20MHz-only stations. */ 7129 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0) 7130 max_mcs = 8; 7131 else 7132 max_mcs = 9; 7133 break; 7134 default: 7135 /* Should not happen; Values above cover the possible range. */ 7136 panic("invalid VHT Rx MCS value %u", rx_mcs); 7137 } 7138 7139 return ((1 << (max_mcs + 1)) - 1); 7140 } 7141 7142 static int 7143 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in) 7144 { 7145 #if 1 7146 panic("iwx: Trying to init rate set on untested version"); 7147 #else 7148 struct ieee80211_node *ni = &in->in_ni; 7149 struct ieee80211_rateset *rs = &ni->ni_rates; 7150 struct iwx_tlc_config_cmd_v3 cfg_cmd; 7151 uint32_t cmd_id; 7152 int i; 7153 size_t cmd_size = sizeof(cfg_cmd); 7154 7155 memset(&cfg_cmd, 0, sizeof(cfg_cmd)); 7156 7157 for (i = 0; i < rs->rs_nrates; i++) { 7158 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL; 7159 int idx = iwx_rs_rval2idx(rval); 7160 if (idx == -1) 7161 return EINVAL; 7162 cfg_cmd.non_ht_rates |= (1 << idx); 7163 } 7164 7165 if (ni->ni_flags & IEEE80211_NODE_VHT) { 7166 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT; 7167 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7168 htole16(iwx_rs_vht_rates(sc, ni, 1)); 7169 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7170 htole16(iwx_rs_vht_rates(sc, ni, 2)); 7171 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 7172 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT; 7173 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7174 htole16(iwx_rs_ht_rates(sc, ni, 7175 IEEE80211_HT_RATESET_SISO)); 7176 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7177 htole16(iwx_rs_ht_rates(sc, ni, 7178 IEEE80211_HT_RATESET_MIMO2)); 7179 } else 7180 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT; 7181 7182 cfg_cmd.sta_id = IWX_STATION_ID; 7183 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) 7184 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7185 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA || 7186 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB) 7187 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7188 else 7189 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7190 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK; 7191 if (ni->ni_flags & IEEE80211_NODE_VHT) 7192 cfg_cmd.max_mpdu_len = htole16(3895); 7193 else 7194 cfg_cmd.max_mpdu_len = htole16(3839); 7195 if (ni->ni_flags & IEEE80211_NODE_HT) { 7196 if (ieee80211_node_supports_ht_sgi20(ni)) { 7197 cfg_cmd.sgi_ch_width_supp |= (1 << 7198 IWX_TLC_MNG_CH_WIDTH_20MHZ); 7199 } 7200 if (ieee80211_node_supports_ht_sgi40(ni)) { 7201 cfg_cmd.sgi_ch_width_supp |= (1 << 7202 IWX_TLC_MNG_CH_WIDTH_40MHZ); 7203 } 7204 } 7205 if ((ni->ni_flags & IEEE80211_NODE_VHT) && 7206 ieee80211_node_supports_vht_sgi80(ni)) 7207 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ); 7208 7209 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0); 7210 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd); 7211 #endif 7212 } 7213 7214 static int 7215 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in) 7216 { 7217 struct ieee80211_node *ni = &in->in_ni; 7218 struct ieee80211_rateset *rs = &ni->ni_rates; 7219 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 7220 struct iwx_tlc_config_cmd_v4 cfg_cmd; 7221 uint32_t cmd_id; 7222 int i; 7223 int sgi80 = 0; 7224 size_t cmd_size = sizeof(cfg_cmd); 7225 7226 memset(&cfg_cmd, 0, sizeof(cfg_cmd)); 7227 7228 for (i = 0; i < rs->rs_nrates; i++) { 7229 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL; 7230 int idx = iwx_rs_rval2idx(rval); 7231 if (idx == -1) 7232 return EINVAL; 7233 cfg_cmd.non_ht_rates |= (1 << idx); 7234 } 7235 for (i = 0; i < htrs->rs_nrates; i++) { 7236 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i])); 7237 } 7238 7239 if (ni->ni_flags & IEEE80211_NODE_VHT) { 7240 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT; 7241 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7242 htole16(iwx_rs_vht_rates(sc, ni, 1)); 7243 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7244 htole16(iwx_rs_vht_rates(sc, ni, 2)); 7245 7246 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n", 7247 __func__, __LINE__, 7248 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]); 7249 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n", 7250 __func__, __LINE__, 7251 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]); 7252 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 7253 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT; 7254 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] = 7255 htole16(iwx_rs_ht_rates(sc, ni, 7256 IEEE80211_HT_RATESET_SISO)); 7257 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] = 7258 htole16(iwx_rs_ht_rates(sc, ni, 7259 IEEE80211_HT_RATESET_MIMO2)); 7260 7261 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n", 7262 __func__, __LINE__, 7263 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]); 7264 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n", 7265 __func__, __LINE__, 7266 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]); 7267 } else 7268 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT; 7269 7270 cfg_cmd.sta_id = IWX_STATION_ID; 7271 #if 0 7272 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) 7273 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7274 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA || 7275 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB) 7276 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7277 else 7278 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7279 #endif 7280 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) { 7281 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ; 7282 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) { 7283 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ; 7284 } else { 7285 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ; 7286 } 7287 7288 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK; 7289 if (ni->ni_flags & IEEE80211_NODE_VHT) 7290 cfg_cmd.max_mpdu_len = htole16(3895); 7291 else 7292 cfg_cmd.max_mpdu_len = htole16(3839); 7293 if (ni->ni_flags & IEEE80211_NODE_HT) { 7294 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 7295 cfg_cmd.sgi_ch_width_supp |= (1 << 7296 IWX_TLC_MNG_CH_WIDTH_20MHZ); 7297 } 7298 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) { 7299 cfg_cmd.sgi_ch_width_supp |= (1 << 7300 IWX_TLC_MNG_CH_WIDTH_40MHZ); 7301 } 7302 } 7303 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap, 7304 IEEE80211_VHTCAP_SHORT_GI_80); 7305 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) { 7306 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ); 7307 } 7308 7309 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0); 7310 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd); 7311 } 7312 7313 static int 7314 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in) 7315 { 7316 int cmd_ver; 7317 7318 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7319 IWX_TLC_MNG_CONFIG_CMD); 7320 if (cmd_ver == 4) 7321 return iwx_rs_init_v4(sc, in); 7322 else 7323 return iwx_rs_init_v3(sc, in); 7324 } 7325 7326 7327 /** 7328 * @brief Turn the given TX rate control notification into an ieee80211_node_txrate 7329 * 7330 * This populates the given txrate node with the TX rate control notification. 7331 * 7332 * @param sc driver softc 7333 * @param notif firmware notification 7334 * @param ni ieee80211_node update 7335 * @returns true if updated, false if not 7336 */ 7337 static bool 7338 iwx_rs_update_node_txrate(struct iwx_softc *sc, 7339 const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni) 7340 { 7341 struct ieee80211com *ic = &sc->sc_ic; 7342 /* XXX TODO: create an inline function in if_iwxreg.h? */ 7343 static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 }; 7344 static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 }; 7345 7346 uint32_t rate_n_flags; 7347 uint32_t type; 7348 7349 /* Extract the rate and command version */ 7350 rate_n_flags = le32toh(notif->rate); 7351 7352 if (sc->sc_rate_n_flags_version != 2) { 7353 net80211_ic_printf(ic, 7354 "%s: unsupported rate_n_flags version (%d)\n", 7355 __func__, 7356 sc->sc_rate_n_flags_version); 7357 return (false); 7358 } 7359 7360 if (sc->sc_debug & IWX_DEBUG_TXRATE) 7361 print_ratenflags(__func__, __LINE__, 7362 rate_n_flags, sc->sc_rate_n_flags_version); 7363 7364 type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK); 7365 switch (type) { 7366 case IWX_RATE_MCS_CCK_MSK: 7367 ieee80211_node_set_txrate_dot11rate(ni, 7368 cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]); 7369 return (true); 7370 case IWX_RATE_MCS_LEGACY_OFDM_MSK: 7371 ieee80211_node_set_txrate_dot11rate(ni, 7372 ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]); 7373 return (true); 7374 case IWX_RATE_MCS_HT_MSK: 7375 /* 7376 * TODO: the current API doesn't include channel width 7377 * and other flags, so we can't accurately store them yet! 7378 * 7379 * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK) 7380 * >> IWX_RATE_MCS_CHAN_WIDTH_POS) 7381 * LDPC: (flags & (1 << 16)) 7382 */ 7383 ieee80211_node_set_txrate_ht_mcsrate(ni, 7384 IWX_RATE_HT_MCS_INDEX(rate_n_flags)); 7385 return (true); 7386 case IWX_RATE_MCS_VHT_MSK: 7387 /* TODO: same comment on channel width, etc above */ 7388 ieee80211_node_set_txrate_vht_rate(ni, 7389 IWX_RATE_VHT_MCS_CODE(rate_n_flags), 7390 IWX_RATE_VHT_MCS_NSS(rate_n_flags)); 7391 return (true); 7392 default: 7393 net80211_ic_printf(ic, 7394 "%s: unsupported chosen rate type in " 7395 "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__, 7396 type >> IWX_RATE_MCS_MOD_TYPE_POS); 7397 return (false); 7398 } 7399 7400 /* Default: if we get here, we didn't successfully update anything */ 7401 return (false); 7402 } 7403 7404 /** 7405 * @brief Process a firmware rate control update and update net80211. 7406 * 7407 * Since firmware is doing rate control, this just needs to update 7408 * the txrate in the ieee80211_node entry. 7409 */ 7410 static void 7411 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif) 7412 { 7413 struct ieee80211com *ic = &sc->sc_ic; 7414 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7415 /* XXX TODO: get a node ref! */ 7416 struct ieee80211_node *ni = (void *)vap->iv_bss; 7417 7418 /* 7419 * For now the iwx driver only supports a single vdev with a single 7420 * node; it doesn't yet support ibss/hostap/multiple vdevs. 7421 */ 7422 if (notif->sta_id != IWX_STATION_ID || 7423 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0) 7424 return; 7425 7426 iwx_rs_update_node_txrate(sc, notif, ni); 7427 } 7428 7429 static int 7430 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt, 7431 uint8_t chains_static, uint8_t chains_dynamic) 7432 { 7433 struct iwx_rlc_config_cmd cmd; 7434 uint32_t cmd_id; 7435 uint8_t active_cnt, idle_cnt; 7436 7437 memset(&cmd, 0, sizeof(cmd)); 7438 7439 idle_cnt = chains_static; 7440 active_cnt = chains_dynamic; 7441 7442 cmd.phy_id = htole32(phyctxt->id); 7443 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) << 7444 IWX_PHY_RX_CHAIN_VALID_POS); 7445 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS); 7446 cmd.rlc.rx_chain_info |= htole32(active_cnt << 7447 IWX_PHY_RX_CHAIN_MIMO_CNT_POS); 7448 7449 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2); 7450 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 7451 } 7452 7453 static int 7454 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt, 7455 struct ieee80211_channel *chan, uint8_t chains_static, 7456 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco, 7457 uint8_t vht_chan_width) 7458 { 7459 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ); 7460 int err; 7461 7462 if (chan == IEEE80211_CHAN_ANYC) { 7463 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n", 7464 DEVNAME(sc)); 7465 return EIO; 7466 } 7467 7468 if (isset(sc->sc_enabled_capa, 7469 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && 7470 (phyctxt->channel->ic_flags & band_flags) != 7471 (chan->ic_flags & band_flags)) { 7472 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7473 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco, 7474 vht_chan_width); 7475 if (err) { 7476 printf("%s: could not remove PHY context " 7477 "(error %d)\n", DEVNAME(sc), err); 7478 return err; 7479 } 7480 phyctxt->channel = chan; 7481 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7482 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco, 7483 vht_chan_width); 7484 if (err) { 7485 printf("%s: could not add PHY context " 7486 "(error %d)\n", DEVNAME(sc), err); 7487 return err; 7488 } 7489 } else { 7490 phyctxt->channel = chan; 7491 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static, 7492 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco, 7493 vht_chan_width); 7494 if (err) { 7495 printf("%s: could not update PHY context (error %d)\n", 7496 DEVNAME(sc), err); 7497 return err; 7498 } 7499 } 7500 7501 phyctxt->sco = sco; 7502 phyctxt->vht_chan_width = vht_chan_width; 7503 7504 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__, 7505 phyctxt->channel->ic_ieee)); 7506 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco)); 7507 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__, 7508 phyctxt->vht_chan_width)); 7509 7510 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 7511 IWX_RLC_CONFIG_CMD) == 2) 7512 return iwx_phy_send_rlc(sc, phyctxt, 7513 chains_static, chains_dynamic); 7514 7515 return 0; 7516 } 7517 7518 static int 7519 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc) 7520 { 7521 struct ieee80211com *ic = &sc->sc_ic; 7522 struct iwx_node *in; 7523 struct iwx_vap *ivp = IWX_VAP(vap); 7524 struct ieee80211_node *ni; 7525 uint32_t duration; 7526 int generation = sc->sc_generation, err; 7527 7528 IWX_ASSERT_LOCKED(sc); 7529 7530 ni = ieee80211_ref_node(vap->iv_bss); 7531 in = IWX_NODE(ni); 7532 7533 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7534 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7535 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7536 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7537 if (err) 7538 return err; 7539 } else { 7540 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7541 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7542 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7543 if (err) 7544 return err; 7545 } 7546 ivp->phy_ctxt = &sc->sc_phyctxt[0]; 7547 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr); 7548 DPRINTF(("%s: in-in_macaddr=%s\n", __func__, 7549 ether_sprintf(in->in_macaddr))); 7550 7551 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0); 7552 if (err) { 7553 printf("%s: could not add MAC context (error %d)\n", 7554 DEVNAME(sc), err); 7555 return err; 7556 } 7557 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE; 7558 7559 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD); 7560 if (err) { 7561 printf("%s: could not add binding (error %d)\n", 7562 DEVNAME(sc), err); 7563 goto rm_mac_ctxt; 7564 } 7565 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE; 7566 7567 err = iwx_add_sta_cmd(sc, in, 0); 7568 if (err) { 7569 printf("%s: could not add sta (error %d)\n", 7570 DEVNAME(sc), err); 7571 goto rm_binding; 7572 } 7573 sc->sc_flags |= IWX_FLAG_STA_ACTIVE; 7574 7575 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7576 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID, 7577 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID, 7578 IWX_TX_RING_COUNT); 7579 if (err) 7580 goto rm_sta; 7581 return 0; 7582 } 7583 7584 err = iwx_enable_mgmt_queue(sc); 7585 if (err) 7586 goto rm_sta; 7587 7588 err = iwx_clear_statistics(sc); 7589 if (err) 7590 goto rm_mgmt_queue; 7591 7592 /* 7593 * Prevent the FW from wandering off channel during association 7594 * by "protecting" the session with a time event. 7595 */ 7596 if (in->in_ni.ni_intval) 7597 duration = in->in_ni.ni_intval * 9; 7598 else 7599 duration = 900; 7600 return iwx_schedule_session_protection(sc, in, duration); 7601 7602 rm_mgmt_queue: 7603 if (generation == sc->sc_generation) 7604 iwx_disable_mgmt_queue(sc); 7605 rm_sta: 7606 if (generation == sc->sc_generation) { 7607 iwx_rm_sta_cmd(sc, in); 7608 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 7609 } 7610 rm_binding: 7611 if (generation == sc->sc_generation) { 7612 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE); 7613 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 7614 } 7615 rm_mac_ctxt: 7616 if (generation == sc->sc_generation) { 7617 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0); 7618 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 7619 } 7620 return err; 7621 } 7622 7623 static int 7624 iwx_deauth(struct iwx_softc *sc) 7625 { 7626 struct ieee80211com *ic = &sc->sc_ic; 7627 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7628 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7629 int err; 7630 7631 IWX_ASSERT_LOCKED(sc); 7632 7633 iwx_unprotect_session(sc, in); 7634 7635 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) { 7636 err = iwx_rm_sta(sc, in); 7637 if (err) 7638 return err; 7639 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 7640 } 7641 7642 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) { 7643 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE); 7644 if (err) { 7645 printf("%s: could not remove binding (error %d)\n", 7646 DEVNAME(sc), err); 7647 return err; 7648 } 7649 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 7650 } 7651 7652 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags & 7653 IWX_FLAG_MAC_ACTIVE)); 7654 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) { 7655 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0); 7656 if (err) { 7657 printf("%s: could not remove MAC context (error %d)\n", 7658 DEVNAME(sc), err); 7659 return err; 7660 } 7661 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 7662 } 7663 7664 /* Move unused PHY context to a default channel. */ 7665 //TODO uncommented in obsd, but stays on the way of auth->auth 7666 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0], 7667 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN, 7668 IEEE80211_VHTOP0_CHAN_WIDTH_HT); 7669 if (err) 7670 return err; 7671 7672 return 0; 7673 } 7674 7675 static int 7676 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc) 7677 { 7678 struct ieee80211com *ic = &sc->sc_ic; 7679 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7680 struct ieee80211_node *ni = &in->in_ni; 7681 struct iwx_vap *ivp = IWX_VAP(vap); 7682 int err; 7683 7684 IWX_ASSERT_LOCKED(sc); 7685 7686 if (ni->ni_flags & IEEE80211_NODE_HT) { 7687 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1; 7688 uint8_t sco, vht_chan_width; 7689 sco = IEEE80211_HTOP0_SCO_SCN; 7690 if ((ni->ni_flags & IEEE80211_NODE_VHT) && 7691 IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) 7692 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80; 7693 else 7694 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT; 7695 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt, 7696 ivp->phy_ctxt->channel, chains, chains, 7697 0, sco, vht_chan_width); 7698 if (err) { 7699 printf("%s: failed to update PHY\n", DEVNAME(sc)); 7700 return err; 7701 } 7702 } 7703 7704 /* Update STA again to apply HT and VHT settings. */ 7705 err = iwx_add_sta_cmd(sc, in, 1); 7706 if (err) { 7707 printf("%s: could not update STA (error %d)\n", 7708 DEVNAME(sc), err); 7709 return err; 7710 } 7711 7712 /* We have now been assigned an associd by the AP. */ 7713 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1); 7714 if (err) { 7715 printf("%s: failed to update MAC\n", DEVNAME(sc)); 7716 return err; 7717 } 7718 7719 err = iwx_sf_config(sc, IWX_SF_FULL_ON); 7720 if (err) { 7721 printf("%s: could not set sf full on (error %d)\n", 7722 DEVNAME(sc), err); 7723 return err; 7724 } 7725 7726 err = iwx_allow_mcast(sc); 7727 if (err) { 7728 printf("%s: could not allow mcast (error %d)\n", 7729 DEVNAME(sc), err); 7730 return err; 7731 } 7732 7733 err = iwx_power_update_device(sc); 7734 if (err) { 7735 printf("%s: could not send power command (error %d)\n", 7736 DEVNAME(sc), err); 7737 return err; 7738 } 7739 #ifdef notyet 7740 /* 7741 * Disabled for now. Default beacon filter settings 7742 * prevent net80211 from getting ERP and HT protection 7743 * updates from beacons. 7744 */ 7745 err = iwx_enable_beacon_filter(sc, in); 7746 if (err) { 7747 printf("%s: could not enable beacon filter\n", 7748 DEVNAME(sc)); 7749 return err; 7750 } 7751 #endif 7752 err = iwx_power_mac_update_mode(sc, in); 7753 if (err) { 7754 printf("%s: could not update MAC power (error %d)\n", 7755 DEVNAME(sc), err); 7756 return err; 7757 } 7758 7759 if (ic->ic_opmode == IEEE80211_M_MONITOR) 7760 return 0; 7761 7762 err = iwx_rs_init(sc, in); 7763 if (err) { 7764 printf("%s: could not init rate scaling (error %d)\n", 7765 DEVNAME(sc), err); 7766 return err; 7767 } 7768 7769 return 0; 7770 } 7771 7772 static int 7773 iwx_run_stop(struct iwx_softc *sc) 7774 { 7775 struct ieee80211com *ic = &sc->sc_ic; 7776 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7777 struct iwx_node *in = IWX_NODE(vap->iv_bss); 7778 struct ieee80211_node *ni = &in->in_ni; 7779 int err, i; 7780 7781 IWX_ASSERT_LOCKED(sc); 7782 7783 err = iwx_flush_sta(sc, in); 7784 if (err) { 7785 printf("%s: could not flush Tx path (error %d)\n", 7786 DEVNAME(sc), err); 7787 return err; 7788 } 7789 7790 /* 7791 * Stop Rx BA sessions now. We cannot rely on the BA task 7792 * for this when moving out of RUN state since it runs in a 7793 * separate thread. 7794 * Note that in->in_ni (struct ieee80211_node) already represents 7795 * our new access point in case we are roaming between APs. 7796 * This means we cannot rely on struct ieee802111_node to tell 7797 * us which BA sessions exist. 7798 */ 7799 // TODO agg 7800 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 7801 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i]; 7802 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID) 7803 continue; 7804 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0); 7805 } 7806 7807 err = iwx_sf_config(sc, IWX_SF_INIT_OFF); 7808 if (err) 7809 return err; 7810 7811 err = iwx_disable_beacon_filter(sc); 7812 if (err) { 7813 printf("%s: could not disable beacon filter (error %d)\n", 7814 DEVNAME(sc), err); 7815 return err; 7816 } 7817 7818 /* Mark station as disassociated. */ 7819 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0); 7820 if (err) { 7821 printf("%s: failed to update MAC\n", DEVNAME(sc)); 7822 return err; 7823 } 7824 7825 return 0; 7826 } 7827 7828 static struct ieee80211_node * 7829 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 7830 { 7831 return malloc(sizeof (struct iwx_node), M_80211_NODE, 7832 M_NOWAIT | M_ZERO); 7833 } 7834 7835 #if 0 7836 int 7837 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 7838 struct ieee80211_key *k) 7839 { 7840 struct iwx_softc *sc = ic->ic_softc; 7841 struct iwx_node *in = (void *)ni; 7842 struct iwx_setkey_task_arg *a; 7843 int err; 7844 7845 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 7846 /* Fallback to software crypto for other ciphers. */ 7847 err = ieee80211_set_key(ic, ni, k); 7848 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP)) 7849 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY; 7850 return err; 7851 } 7852 7853 if (sc->setkey_nkeys >= nitems(sc->setkey_arg)) 7854 return ENOSPC; 7855 7856 a = &sc->setkey_arg[sc->setkey_cur]; 7857 a->sta_id = IWX_STATION_ID; 7858 a->ni = ni; 7859 a->k = k; 7860 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg); 7861 sc->setkey_nkeys++; 7862 iwx_add_task(sc, systq, &sc->setkey_task); 7863 return EBUSY; 7864 } 7865 7866 int 7867 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni, 7868 struct ieee80211_key *k) 7869 { 7870 struct ieee80211com *ic = &sc->sc_ic; 7871 struct iwx_node *in = (void *)ni; 7872 struct iwx_add_sta_key_cmd cmd; 7873 uint32_t status; 7874 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY | 7875 IWX_NODE_FLAG_HAVE_GROUP_KEY); 7876 int err; 7877 7878 /* 7879 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid. 7880 * Currently we only implement station mode where 'ni' is always 7881 * ic->ic_bss so there is no need to validate arguments beyond this: 7882 */ 7883 KASSERT(ni == ic->ic_bss); 7884 7885 memset(&cmd, 0, sizeof(cmd)); 7886 7887 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM | 7888 IWX_STA_KEY_FLG_WEP_KEY_MAP | 7889 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) & 7890 IWX_STA_KEY_FLG_KEYID_MSK)); 7891 if (k->k_flags & IEEE80211_KEY_GROUP) { 7892 cmd.common.key_offset = 1; 7893 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST); 7894 } else 7895 cmd.common.key_offset = 0; 7896 7897 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len)); 7898 cmd.common.sta_id = sta_id; 7899 7900 cmd.transmit_seq_cnt = htole64(k->k_tsc); 7901 7902 status = IWX_ADD_STA_SUCCESS; 7903 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd, 7904 &status); 7905 if (sc->sc_flags & IWX_FLAG_SHUTDOWN) 7906 return ECANCELED; 7907 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 7908 err = EIO; 7909 if (err) { 7910 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH, 7911 IEEE80211_REASON_AUTH_LEAVE); 7912 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 7913 return err; 7914 } 7915 7916 if (k->k_flags & IEEE80211_KEY_GROUP) 7917 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY; 7918 else 7919 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY; 7920 7921 if ((in->in_flags & want_keymask) == want_keymask) { 7922 DPRINTF(("marking port %s valid\n", 7923 ether_sprintf(ni->ni_macaddr))); 7924 ni->ni_port_valid = 1; 7925 ieee80211_set_link_state(ic, LINK_STATE_UP); 7926 } 7927 7928 return 0; 7929 } 7930 7931 void 7932 iwx_setkey_task(void *arg) 7933 { 7934 struct iwx_softc *sc = arg; 7935 struct iwx_setkey_task_arg *a; 7936 int err = 0, s = splnet(); 7937 7938 while (sc->setkey_nkeys > 0) { 7939 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN)) 7940 break; 7941 a = &sc->setkey_arg[sc->setkey_tail]; 7942 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k); 7943 a->sta_id = 0; 7944 a->ni = NULL; 7945 a->k = NULL; 7946 sc->setkey_tail = (sc->setkey_tail + 1) % 7947 nitems(sc->setkey_arg); 7948 sc->setkey_nkeys--; 7949 } 7950 7951 refcnt_rele_wake(&sc->task_refs); 7952 splx(s); 7953 } 7954 7955 void 7956 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 7957 struct ieee80211_key *k) 7958 { 7959 struct iwx_softc *sc = ic->ic_softc; 7960 struct iwx_add_sta_key_cmd cmd; 7961 7962 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 7963 /* Fallback to software crypto for other ciphers. */ 7964 ieee80211_delete_key(ic, ni, k); 7965 return; 7966 } 7967 7968 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0) 7969 return; 7970 7971 memset(&cmd, 0, sizeof(cmd)); 7972 7973 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID | 7974 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP | 7975 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) & 7976 IWX_STA_KEY_FLG_KEYID_MSK)); 7977 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len)); 7978 if (k->k_flags & IEEE80211_KEY_GROUP) 7979 cmd.common.key_offset = 1; 7980 else 7981 cmd.common.key_offset = 0; 7982 cmd.common.sta_id = IWX_STATION_ID; 7983 7984 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd); 7985 } 7986 #endif 7987 7988 static int 7989 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate) 7990 { 7991 struct ieee80211com *ic = vap->iv_ic; 7992 struct iwx_softc *sc = ic->ic_softc; 7993 enum ieee80211_state ostate = vap->iv_state; 7994 int err = 0; 7995 7996 IWX_LOCK(sc); 7997 7998 if (nstate <= ostate || nstate > IEEE80211_S_RUN) { 7999 switch (ostate) { 8000 case IEEE80211_S_RUN: 8001 err = iwx_run_stop(sc); 8002 if (err) 8003 goto out; 8004 /* FALLTHROUGH */ 8005 case IEEE80211_S_ASSOC: 8006 case IEEE80211_S_AUTH: 8007 if (nstate <= IEEE80211_S_AUTH) { 8008 err = iwx_deauth(sc); 8009 if (err) 8010 goto out; 8011 } 8012 /* FALLTHROUGH */ 8013 case IEEE80211_S_SCAN: 8014 case IEEE80211_S_INIT: 8015 default: 8016 break; 8017 } 8018 // 8019 // /* Die now if iwx_stop() was called while we were sleeping. */ 8020 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) { 8021 // refcnt_rele_wake(&sc->task_refs); 8022 // splx(s); 8023 // return; 8024 // } 8025 } 8026 8027 switch (nstate) { 8028 case IEEE80211_S_INIT: 8029 break; 8030 8031 case IEEE80211_S_SCAN: 8032 break; 8033 8034 case IEEE80211_S_AUTH: 8035 err = iwx_auth(vap, sc); 8036 break; 8037 8038 case IEEE80211_S_ASSOC: 8039 break; 8040 8041 case IEEE80211_S_RUN: 8042 err = iwx_run(vap, sc); 8043 break; 8044 default: 8045 break; 8046 } 8047 8048 out: 8049 IWX_UNLOCK(sc); 8050 8051 return (err); 8052 } 8053 8054 static int 8055 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 8056 { 8057 struct iwx_vap *ivp = IWX_VAP(vap); 8058 struct ieee80211com *ic = vap->iv_ic; 8059 enum ieee80211_state ostate = vap->iv_state; 8060 int err; 8061 8062 /* 8063 * Prevent attempts to transition towards the same state, unless 8064 * we are scanning in which case a SCAN -> SCAN transition 8065 * triggers another scan iteration. And AUTH -> AUTH is needed 8066 * to support band-steering. 8067 */ 8068 if (ostate == nstate && nstate != IEEE80211_S_SCAN && 8069 nstate != IEEE80211_S_AUTH) 8070 return 0; 8071 IEEE80211_UNLOCK(ic); 8072 err = iwx_newstate_sub(vap, nstate); 8073 IEEE80211_LOCK(ic); 8074 if (err == 0) 8075 err = ivp->iv_newstate(vap, nstate, arg); 8076 8077 return (err); 8078 } 8079 8080 static void 8081 iwx_endscan(struct iwx_softc *sc) 8082 { 8083 struct ieee80211com *ic = &sc->sc_ic; 8084 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8085 8086 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0) 8087 return; 8088 8089 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN); 8090 8091 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 8092 wakeup(&vap->iv_state); /* wake up iwx_newstate */ 8093 } 8094 8095 /* 8096 * Aging and idle timeouts for the different possible scenarios 8097 * in default configuration 8098 */ 8099 static const uint32_t 8100 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = { 8101 { 8102 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF), 8103 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF) 8104 }, 8105 { 8106 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF), 8107 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF) 8108 }, 8109 { 8110 htole32(IWX_SF_MCAST_AGING_TIMER_DEF), 8111 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF) 8112 }, 8113 { 8114 htole32(IWX_SF_BA_AGING_TIMER_DEF), 8115 htole32(IWX_SF_BA_IDLE_TIMER_DEF) 8116 }, 8117 { 8118 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF), 8119 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF) 8120 }, 8121 }; 8122 8123 /* 8124 * Aging and idle timeouts for the different possible scenarios 8125 * in single BSS MAC configuration. 8126 */ 8127 static const uint32_t 8128 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = { 8129 { 8130 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER), 8131 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER) 8132 }, 8133 { 8134 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER), 8135 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER) 8136 }, 8137 { 8138 htole32(IWX_SF_MCAST_AGING_TIMER), 8139 htole32(IWX_SF_MCAST_IDLE_TIMER) 8140 }, 8141 { 8142 htole32(IWX_SF_BA_AGING_TIMER), 8143 htole32(IWX_SF_BA_IDLE_TIMER) 8144 }, 8145 { 8146 htole32(IWX_SF_TX_RE_AGING_TIMER), 8147 htole32(IWX_SF_TX_RE_IDLE_TIMER) 8148 }, 8149 }; 8150 8151 static void 8152 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd, 8153 struct ieee80211_node *ni) 8154 { 8155 int i, j, watermark; 8156 8157 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN); 8158 8159 /* 8160 * If we are in association flow - check antenna configuration 8161 * capabilities of the AP station, and choose the watermark accordingly. 8162 */ 8163 if (ni) { 8164 if (ni->ni_flags & IEEE80211_NODE_HT) { 8165 struct ieee80211_htrateset *htrs = &ni->ni_htrates; 8166 int hasmimo = 0; 8167 for (i = 0; i < htrs->rs_nrates; i++) { 8168 if (htrs->rs_rates[i] > 7) { 8169 hasmimo = 1; 8170 break; 8171 } 8172 } 8173 if (hasmimo) 8174 watermark = IWX_SF_W_MARK_MIMO2; 8175 else 8176 watermark = IWX_SF_W_MARK_SISO; 8177 } else { 8178 watermark = IWX_SF_W_MARK_LEGACY; 8179 } 8180 /* default watermark value for unassociated mode. */ 8181 } else { 8182 watermark = IWX_SF_W_MARK_MIMO2; 8183 } 8184 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark); 8185 8186 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) { 8187 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) { 8188 sf_cmd->long_delay_timeouts[i][j] = 8189 htole32(IWX_SF_LONG_DELAY_AGING_TIMER); 8190 } 8191 } 8192 8193 if (ni) { 8194 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout, 8195 sizeof(iwx_sf_full_timeout)); 8196 } else { 8197 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def, 8198 sizeof(iwx_sf_full_timeout_def)); 8199 } 8200 8201 } 8202 8203 static int 8204 iwx_sf_config(struct iwx_softc *sc, int new_state) 8205 { 8206 struct ieee80211com *ic = &sc->sc_ic; 8207 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8208 struct ieee80211_node *ni = vap->iv_bss; 8209 struct iwx_sf_cfg_cmd sf_cmd = { 8210 .state = htole32(new_state), 8211 }; 8212 int err = 0; 8213 8214 switch (new_state) { 8215 case IWX_SF_UNINIT: 8216 case IWX_SF_INIT_OFF: 8217 iwx_fill_sf_command(sc, &sf_cmd, NULL); 8218 break; 8219 case IWX_SF_FULL_ON: 8220 iwx_fill_sf_command(sc, &sf_cmd, ni); 8221 break; 8222 default: 8223 return EINVAL; 8224 } 8225 8226 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC, 8227 sizeof(sf_cmd), &sf_cmd); 8228 return err; 8229 } 8230 8231 static int 8232 iwx_send_bt_init_conf(struct iwx_softc *sc) 8233 { 8234 struct iwx_bt_coex_cmd bt_cmd; 8235 8236 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd)); 8237 8238 bt_cmd.mode = htole32(IWX_BT_COEX_NW); 8239 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED; 8240 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET; 8241 8242 8243 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd), 8244 &bt_cmd); 8245 } 8246 8247 static int 8248 iwx_send_soc_conf(struct iwx_softc *sc) 8249 { 8250 struct iwx_soc_configuration_cmd cmd; 8251 int err; 8252 uint32_t cmd_id, flags = 0; 8253 8254 memset(&cmd, 0, sizeof(cmd)); 8255 8256 /* 8257 * In VER_1 of this command, the discrete value is considered 8258 * an integer; In VER_2, it's a bitmask. Since we have only 2 8259 * values in VER_1, this is backwards-compatible with VER_2, 8260 * as long as we don't set any other flag bits. 8261 */ 8262 if (!sc->sc_integrated) { /* VER_1 */ 8263 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE; 8264 } else { /* VER_2 */ 8265 uint8_t scan_cmd_ver; 8266 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE) 8267 flags |= (sc->sc_ltr_delay & 8268 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK); 8269 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, 8270 IWX_SCAN_REQ_UMAC); 8271 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN && 8272 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal) 8273 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY; 8274 } 8275 cmd.flags = htole32(flags); 8276 8277 cmd.latency = htole32(sc->sc_xtal_latency); 8278 8279 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0); 8280 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd); 8281 if (err) 8282 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err); 8283 return err; 8284 } 8285 8286 static int 8287 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2) 8288 { 8289 struct iwx_mcc_update_cmd mcc_cmd; 8290 struct iwx_host_cmd hcmd = { 8291 .id = IWX_MCC_UPDATE_CMD, 8292 .flags = IWX_CMD_WANT_RESP, 8293 .data = { &mcc_cmd }, 8294 }; 8295 struct iwx_rx_packet *pkt; 8296 struct iwx_mcc_update_resp *resp; 8297 size_t resp_len; 8298 int err; 8299 8300 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 8301 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 8302 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) || 8303 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC)) 8304 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT; 8305 else 8306 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW; 8307 8308 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd); 8309 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX; 8310 8311 err = iwx_send_cmd(sc, &hcmd); 8312 if (err) 8313 return err; 8314 8315 pkt = hcmd.resp_pkt; 8316 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) { 8317 err = EIO; 8318 goto out; 8319 } 8320 8321 resp_len = iwx_rx_packet_payload_len(pkt); 8322 if (resp_len < sizeof(*resp)) { 8323 err = EIO; 8324 goto out; 8325 } 8326 8327 resp = (void *)pkt->data; 8328 if (resp_len != sizeof(*resp) + 8329 resp->n_channels * sizeof(resp->channels[0])) { 8330 err = EIO; 8331 goto out; 8332 } 8333 8334 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n", 8335 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels)); 8336 8337 out: 8338 iwx_free_resp(sc, &hcmd); 8339 8340 return err; 8341 } 8342 8343 static int 8344 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc) 8345 { 8346 struct iwx_temp_report_ths_cmd cmd; 8347 int err; 8348 8349 /* 8350 * In order to give responsibility for critical-temperature-kill 8351 * and TX backoff to FW we need to send an empty temperature 8352 * reporting command at init time. 8353 */ 8354 memset(&cmd, 0, sizeof(cmd)); 8355 8356 err = iwx_send_cmd_pdu(sc, 8357 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD), 8358 0, sizeof(cmd), &cmd); 8359 if (err) 8360 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n", 8361 DEVNAME(sc), err); 8362 8363 return err; 8364 } 8365 8366 static int 8367 iwx_init_hw(struct iwx_softc *sc) 8368 { 8369 struct ieee80211com *ic = &sc->sc_ic; 8370 int err = 0, i; 8371 8372 err = iwx_run_init_mvm_ucode(sc, 0); 8373 if (err) 8374 return err; 8375 8376 if (!iwx_nic_lock(sc)) 8377 return EBUSY; 8378 8379 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc)); 8380 if (err) { 8381 printf("%s: could not init tx ant config (error %d)\n", 8382 DEVNAME(sc), err); 8383 goto err; 8384 } 8385 8386 if (sc->sc_tx_with_siso_diversity) { 8387 err = iwx_send_phy_cfg_cmd(sc); 8388 if (err) { 8389 printf("%s: could not send phy config (error %d)\n", 8390 DEVNAME(sc), err); 8391 goto err; 8392 } 8393 } 8394 8395 err = iwx_send_bt_init_conf(sc); 8396 if (err) { 8397 printf("%s: could not init bt coex (error %d)\n", 8398 DEVNAME(sc), err); 8399 return err; 8400 } 8401 8402 err = iwx_send_soc_conf(sc); 8403 if (err) { 8404 printf("%s: iwx_send_soc_conf failed\n", __func__); 8405 return err; 8406 } 8407 8408 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) { 8409 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__); 8410 err = iwx_send_dqa_cmd(sc); 8411 if (err) { 8412 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT " 8413 "failed (error %d)\n", __func__, err); 8414 return err; 8415 } 8416 } 8417 // TODO phyctxt 8418 for (i = 0; i < IWX_NUM_PHY_CTX; i++) { 8419 /* 8420 * The channel used here isn't relevant as it's 8421 * going to be overwritten in the other flows. 8422 * For now use the first channel we have. 8423 */ 8424 sc->sc_phyctxt[i].id = i; 8425 sc->sc_phyctxt[i].channel = &ic->ic_channels[1]; 8426 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1, 8427 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0); 8428 if (err) { 8429 printf("%s: could not add phy context %d (error %d)\n", 8430 DEVNAME(sc), i, err); 8431 goto err; 8432 } 8433 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP, 8434 IWX_RLC_CONFIG_CMD) == 2) { 8435 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1); 8436 if (err) { 8437 printf("%s: could not configure RLC for PHY " 8438 "%d (error %d)\n", DEVNAME(sc), i, err); 8439 goto err; 8440 } 8441 } 8442 } 8443 8444 err = iwx_config_ltr(sc); 8445 if (err) { 8446 printf("%s: PCIe LTR configuration failed (error %d)\n", 8447 DEVNAME(sc), err); 8448 } 8449 8450 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) { 8451 err = iwx_send_temp_report_ths_cmd(sc); 8452 if (err) { 8453 printf("%s: iwx_send_temp_report_ths_cmd failed\n", 8454 __func__); 8455 goto err; 8456 } 8457 } 8458 8459 err = iwx_power_update_device(sc); 8460 if (err) { 8461 printf("%s: could not send power command (error %d)\n", 8462 DEVNAME(sc), err); 8463 goto err; 8464 } 8465 8466 if (sc->sc_nvm.lar_enabled) { 8467 err = iwx_send_update_mcc_cmd(sc, "ZZ"); 8468 if (err) { 8469 printf("%s: could not init LAR (error %d)\n", 8470 DEVNAME(sc), err); 8471 goto err; 8472 } 8473 } 8474 8475 err = iwx_config_umac_scan_reduced(sc); 8476 if (err) { 8477 printf("%s: could not configure scan (error %d)\n", 8478 DEVNAME(sc), err); 8479 goto err; 8480 } 8481 8482 err = iwx_disable_beacon_filter(sc); 8483 if (err) { 8484 printf("%s: could not disable beacon filter (error %d)\n", 8485 DEVNAME(sc), err); 8486 goto err; 8487 } 8488 8489 err: 8490 iwx_nic_unlock(sc); 8491 return err; 8492 } 8493 8494 /* Allow multicast from our BSSID. */ 8495 static int 8496 iwx_allow_mcast(struct iwx_softc *sc) 8497 { 8498 struct ieee80211com *ic = &sc->sc_ic; 8499 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8500 struct iwx_node *in = IWX_NODE(vap->iv_bss); 8501 struct iwx_mcast_filter_cmd *cmd; 8502 size_t size; 8503 int err; 8504 8505 size = roundup(sizeof(*cmd), 4); 8506 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 8507 if (cmd == NULL) 8508 return ENOMEM; 8509 cmd->filter_own = 1; 8510 cmd->port_id = 0; 8511 cmd->count = 0; 8512 cmd->pass_all = 1; 8513 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr); 8514 8515 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD, 8516 0, size, cmd); 8517 free(cmd, M_DEVBUF); 8518 return err; 8519 } 8520 8521 static int 8522 iwx_init(struct iwx_softc *sc) 8523 { 8524 int err, generation; 8525 generation = ++sc->sc_generation; 8526 iwx_preinit(sc); 8527 8528 err = iwx_start_hw(sc); 8529 if (err) { 8530 printf("%s: iwx_start_hw failed\n", __func__); 8531 return err; 8532 } 8533 8534 err = iwx_init_hw(sc); 8535 if (err) { 8536 if (generation == sc->sc_generation) 8537 iwx_stop_device(sc); 8538 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err); 8539 return err; 8540 } 8541 8542 sc->sc_flags |= IWX_FLAG_HW_INITED; 8543 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc); 8544 8545 return 0; 8546 } 8547 8548 static void 8549 iwx_start(struct iwx_softc *sc) 8550 { 8551 struct ieee80211_node *ni; 8552 struct mbuf *m; 8553 8554 IWX_ASSERT_LOCKED(sc); 8555 8556 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 8557 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 8558 if (iwx_tx(sc, m, ni) != 0) { 8559 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 8560 continue; 8561 } 8562 } 8563 } 8564 8565 static void 8566 iwx_stop(struct iwx_softc *sc) 8567 { 8568 struct ieee80211com *ic = &sc->sc_ic; 8569 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8570 struct iwx_vap *ivp = IWX_VAP(vap); 8571 8572 iwx_stop_device(sc); 8573 8574 /* Reset soft state. */ 8575 sc->sc_generation++; 8576 ivp->phy_ctxt = NULL; 8577 8578 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN); 8579 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE; 8580 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE; 8581 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE; 8582 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 8583 sc->sc_flags &= ~IWX_FLAG_HW_ERR; 8584 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN; 8585 sc->sc_flags &= ~IWX_FLAG_TXFLUSH; 8586 8587 sc->sc_rx_ba_sessions = 0; 8588 sc->ba_rx.start_tidmask = 0; 8589 sc->ba_rx.stop_tidmask = 0; 8590 memset(sc->aggqid, 0, sizeof(sc->aggqid)); 8591 sc->ba_tx.start_tidmask = 0; 8592 sc->ba_tx.stop_tidmask = 0; 8593 } 8594 8595 static void 8596 iwx_watchdog(void *arg) 8597 { 8598 struct iwx_softc *sc = arg; 8599 struct ieee80211com *ic = &sc->sc_ic; 8600 int i; 8601 8602 /* 8603 * We maintain a separate timer for each Tx queue because 8604 * Tx aggregation queues can get "stuck" while other queues 8605 * keep working. The Linux driver uses a similar workaround. 8606 */ 8607 for (i = 0; i < nitems(sc->sc_tx_timer); i++) { 8608 if (sc->sc_tx_timer[i] > 0) { 8609 if (--sc->sc_tx_timer[i] == 0) { 8610 printf("%s: device timeout\n", DEVNAME(sc)); 8611 8612 iwx_nic_error(sc); 8613 iwx_dump_driver_status(sc); 8614 ieee80211_restart_all(ic); 8615 return; 8616 } 8617 } 8618 } 8619 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc); 8620 } 8621 8622 /* 8623 * Note: This structure is read from the device with IO accesses, 8624 * and the reading already does the endian conversion. As it is 8625 * read with uint32_t-sized accesses, any members with a different size 8626 * need to be ordered correctly though! 8627 */ 8628 struct iwx_error_event_table { 8629 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 8630 uint32_t error_id; /* type of error */ 8631 uint32_t trm_hw_status0; /* TRM HW status */ 8632 uint32_t trm_hw_status1; /* TRM HW status */ 8633 uint32_t blink2; /* branch link */ 8634 uint32_t ilink1; /* interrupt link */ 8635 uint32_t ilink2; /* interrupt link */ 8636 uint32_t data1; /* error-specific data */ 8637 uint32_t data2; /* error-specific data */ 8638 uint32_t data3; /* error-specific data */ 8639 uint32_t bcon_time; /* beacon timer */ 8640 uint32_t tsf_low; /* network timestamp function timer */ 8641 uint32_t tsf_hi; /* network timestamp function timer */ 8642 uint32_t gp1; /* GP1 timer register */ 8643 uint32_t gp2; /* GP2 timer register */ 8644 uint32_t fw_rev_type; /* firmware revision type */ 8645 uint32_t major; /* uCode version major */ 8646 uint32_t minor; /* uCode version minor */ 8647 uint32_t hw_ver; /* HW Silicon version */ 8648 uint32_t brd_ver; /* HW board version */ 8649 uint32_t log_pc; /* log program counter */ 8650 uint32_t frame_ptr; /* frame pointer */ 8651 uint32_t stack_ptr; /* stack pointer */ 8652 uint32_t hcmd; /* last host command header */ 8653 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 8654 * rxtx_flag */ 8655 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 8656 * host_flag */ 8657 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 8658 * enc_flag */ 8659 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 8660 * time_flag */ 8661 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 8662 * wico interrupt */ 8663 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 8664 uint32_t wait_event; /* wait event() caller address */ 8665 uint32_t l2p_control; /* L2pControlField */ 8666 uint32_t l2p_duration; /* L2pDurationField */ 8667 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 8668 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 8669 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 8670 * (LMPM_PMG_SEL) */ 8671 uint32_t u_timestamp; /* indicate when the date and time of the 8672 * compilation */ 8673 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 8674 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 8675 8676 /* 8677 * UMAC error struct - relevant starting from family 8000 chip. 8678 * Note: This structure is read from the device with IO accesses, 8679 * and the reading already does the endian conversion. As it is 8680 * read with u32-sized accesses, any members with a different size 8681 * need to be ordered correctly though! 8682 */ 8683 struct iwx_umac_error_event_table { 8684 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 8685 uint32_t error_id; /* type of error */ 8686 uint32_t blink1; /* branch link */ 8687 uint32_t blink2; /* branch link */ 8688 uint32_t ilink1; /* interrupt link */ 8689 uint32_t ilink2; /* interrupt link */ 8690 uint32_t data1; /* error-specific data */ 8691 uint32_t data2; /* error-specific data */ 8692 uint32_t data3; /* error-specific data */ 8693 uint32_t umac_major; 8694 uint32_t umac_minor; 8695 uint32_t frame_pointer; /* core register 27*/ 8696 uint32_t stack_pointer; /* core register 28 */ 8697 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 8698 uint32_t nic_isr_pref; /* ISR status register */ 8699 } __packed; 8700 8701 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 8702 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 8703 8704 static void 8705 iwx_nic_umac_error(struct iwx_softc *sc) 8706 { 8707 struct iwx_umac_error_event_table table; 8708 uint32_t base; 8709 8710 base = sc->sc_uc.uc_umac_error_event_table; 8711 8712 if (base < 0x400000) { 8713 printf("%s: Invalid error log pointer 0x%08x\n", 8714 DEVNAME(sc), base); 8715 return; 8716 } 8717 8718 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 8719 printf("%s: reading errlog failed\n", DEVNAME(sc)); 8720 return; 8721 } 8722 8723 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 8724 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc)); 8725 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc), 8726 sc->sc_flags, table.valid); 8727 } 8728 8729 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id, 8730 iwx_desc_lookup(table.error_id)); 8731 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1); 8732 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2); 8733 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1); 8734 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2); 8735 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1); 8736 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2); 8737 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3); 8738 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major); 8739 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor); 8740 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc), 8741 table.frame_pointer); 8742 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc), 8743 table.stack_pointer); 8744 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header); 8745 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc), 8746 table.nic_isr_pref); 8747 } 8748 8749 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000 8750 static struct { 8751 const char *name; 8752 uint8_t num; 8753 } advanced_lookup[] = { 8754 { "NMI_INTERRUPT_WDG", 0x34 }, 8755 { "SYSASSERT", 0x35 }, 8756 { "UCODE_VERSION_MISMATCH", 0x37 }, 8757 { "BAD_COMMAND", 0x38 }, 8758 { "BAD_COMMAND", 0x39 }, 8759 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 8760 { "FATAL_ERROR", 0x3D }, 8761 { "NMI_TRM_HW_ERR", 0x46 }, 8762 { "NMI_INTERRUPT_TRM", 0x4C }, 8763 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 8764 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 8765 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 8766 { "NMI_INTERRUPT_HOST", 0x66 }, 8767 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, 8768 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, 8769 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, 8770 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 8771 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 8772 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 8773 { "ADVANCED_SYSASSERT", 0 }, 8774 }; 8775 8776 static const char * 8777 iwx_desc_lookup(uint32_t num) 8778 { 8779 int i; 8780 8781 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 8782 if (advanced_lookup[i].num == 8783 (num & ~IWX_FW_SYSASSERT_CPU_MASK)) 8784 return advanced_lookup[i].name; 8785 8786 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 8787 return advanced_lookup[i].name; 8788 } 8789 8790 /* 8791 * Support for dumping the error log seemed like a good idea ... 8792 * but it's mostly hex junk and the only sensible thing is the 8793 * hw/ucode revision (which we know anyway). Since it's here, 8794 * I'll just leave it in, just in case e.g. the Intel guys want to 8795 * help us decipher some "ADVANCED_SYSASSERT" later. 8796 */ 8797 static void 8798 iwx_nic_error(struct iwx_softc *sc) 8799 { 8800 struct iwx_error_event_table table; 8801 uint32_t base; 8802 8803 printf("%s: dumping device error log\n", DEVNAME(sc)); 8804 printf("%s: GOS-3758: 1\n", __func__); 8805 base = sc->sc_uc.uc_lmac_error_event_table[0]; 8806 printf("%s: GOS-3758: 2\n", __func__); 8807 if (base < 0x400000) { 8808 printf("%s: Invalid error log pointer 0x%08x\n", 8809 DEVNAME(sc), base); 8810 return; 8811 } 8812 8813 printf("%s: GOS-3758: 3\n", __func__); 8814 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 8815 printf("%s: reading errlog failed\n", DEVNAME(sc)); 8816 return; 8817 } 8818 8819 printf("%s: GOS-3758: 4\n", __func__); 8820 if (!table.valid) { 8821 printf("%s: errlog not found, skipping\n", DEVNAME(sc)); 8822 return; 8823 } 8824 8825 printf("%s: GOS-3758: 5\n", __func__); 8826 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 8827 printf("%s: Start Error Log Dump:\n", DEVNAME(sc)); 8828 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc), 8829 sc->sc_flags, table.valid); 8830 } 8831 8832 printf("%s: GOS-3758: 6\n", __func__); 8833 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id, 8834 iwx_desc_lookup(table.error_id)); 8835 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc), 8836 table.trm_hw_status0); 8837 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc), 8838 table.trm_hw_status1); 8839 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2); 8840 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1); 8841 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2); 8842 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1); 8843 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2); 8844 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3); 8845 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time); 8846 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low); 8847 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi); 8848 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1); 8849 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2); 8850 printf("%s: %08X | uCode revision type\n", DEVNAME(sc), 8851 table.fw_rev_type); 8852 printf("%s: %08X | uCode version major\n", DEVNAME(sc), 8853 table.major); 8854 printf("%s: %08X | uCode version minor\n", DEVNAME(sc), 8855 table.minor); 8856 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver); 8857 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver); 8858 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd); 8859 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0); 8860 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1); 8861 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2); 8862 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3); 8863 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4); 8864 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id); 8865 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event); 8866 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control); 8867 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration); 8868 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid); 8869 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match); 8870 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel); 8871 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp); 8872 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler); 8873 8874 if (sc->sc_uc.uc_umac_error_event_table) 8875 iwx_nic_umac_error(sc); 8876 } 8877 8878 static void 8879 iwx_dump_driver_status(struct iwx_softc *sc) 8880 { 8881 struct ieee80211com *ic = &sc->sc_ic; 8882 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8883 enum ieee80211_state state = vap->iv_state; 8884 int i; 8885 8886 printf("driver status:\n"); 8887 for (i = 0; i < nitems(sc->txq); i++) { 8888 struct iwx_tx_ring *ring = &sc->txq[i]; 8889 printf(" tx ring %2d: qid=%-2d cur=%-3d " 8890 "cur_hw=%-3d queued=%-3d\n", 8891 i, ring->qid, ring->cur, ring->cur_hw, 8892 ring->queued); 8893 } 8894 printf(" rx ring: cur=%d\n", sc->rxq.cur); 8895 printf(" 802.11 state %s\n", ieee80211_state_name[state]); 8896 } 8897 8898 #define SYNC_RESP_STRUCT(_var_, _pkt_) \ 8899 do { \ 8900 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \ 8901 _var_ = (void *)((_pkt_)+1); \ 8902 } while (/*CONSTCOND*/0) 8903 8904 static int 8905 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt) 8906 { 8907 int qid, idx, code; 8908 8909 qid = pkt->hdr.qid & ~0x80; 8910 idx = pkt->hdr.idx; 8911 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 8912 8913 return (!(qid == 0 && idx == 0 && code == 0) && 8914 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID)); 8915 } 8916 8917 static void 8918 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml) 8919 { 8920 struct ieee80211com *ic = &sc->sc_ic; 8921 struct iwx_rx_packet *pkt, *nextpkt; 8922 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len; 8923 struct mbuf *m0, *m; 8924 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr); 8925 int qid, idx, code, handled = 1; 8926 8927 m0 = data->m; 8928 while (m0 && offset + minsz < IWX_RBUF_SIZE) { 8929 pkt = (struct iwx_rx_packet *)(m0->m_data + offset); 8930 qid = pkt->hdr.qid; 8931 idx = pkt->hdr.idx; 8932 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 8933 8934 if (!iwx_rx_pkt_valid(pkt)) 8935 break; 8936 8937 /* 8938 * XXX Intel inside (tm) 8939 * Any commands in the LONG_GROUP could actually be in the 8940 * LEGACY group. Firmware API versions >= 50 reject commands 8941 * in group 0, forcing us to use this hack. 8942 */ 8943 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) { 8944 struct iwx_tx_ring *ring = &sc->txq[qid]; 8945 struct iwx_tx_data *txdata = &ring->data[idx]; 8946 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW) 8947 code = iwx_cmd_opcode(code); 8948 } 8949 8950 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt); 8951 if (len < minsz || len > (IWX_RBUF_SIZE - offset)) 8952 break; 8953 8954 // TODO ??? 8955 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) { 8956 /* Take mbuf m0 off the RX ring. */ 8957 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) { 8958 break; 8959 } 8960 KASSERT((data->m != m0), ("%s: data->m != m0", __func__)); 8961 } 8962 8963 switch (code) { 8964 case IWX_REPLY_RX_PHY_CMD: 8965 /* XXX-THJ: I've not managed to hit this path in testing */ 8966 iwx_rx_rx_phy_cmd(sc, pkt, data); 8967 break; 8968 8969 case IWX_REPLY_RX_MPDU_CMD: { 8970 size_t maxlen = IWX_RBUF_SIZE - offset - minsz; 8971 nextoff = offset + 8972 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN); 8973 nextpkt = (struct iwx_rx_packet *) 8974 (m0->m_data + nextoff); 8975 /* AX210 devices ship only one packet per Rx buffer. */ 8976 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 || 8977 nextoff + minsz >= IWX_RBUF_SIZE || 8978 !iwx_rx_pkt_valid(nextpkt)) { 8979 /* No need to copy last frame in buffer. */ 8980 if (offset > 0) 8981 m_adj(m0, offset); 8982 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen); 8983 m0 = NULL; /* stack owns m0 now; abort loop */ 8984 } else { 8985 /* 8986 * Create an mbuf which points to the current 8987 * packet. Always copy from offset zero to 8988 * preserve m_pkthdr. 8989 */ 8990 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT); 8991 if (m == NULL) { 8992 m_freem(m0); 8993 m0 = NULL; 8994 break; 8995 } 8996 m_adj(m, offset); 8997 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen); 8998 } 8999 break; 9000 } 9001 9002 // case IWX_BAR_FRAME_RELEASE: 9003 // iwx_rx_bar_frame_release(sc, pkt, ml); 9004 // break; 9005 // 9006 case IWX_TX_CMD: 9007 iwx_rx_tx_cmd(sc, pkt, data); 9008 break; 9009 9010 case IWX_BA_NOTIF: 9011 iwx_rx_compressed_ba(sc, pkt); 9012 break; 9013 9014 case IWX_MISSED_BEACONS_NOTIFICATION: 9015 IWX_DPRINTF(sc, IWX_DEBUG_BEACON, 9016 "%s: IWX_MISSED_BEACONS_NOTIFICATION\n", 9017 __func__); 9018 iwx_rx_bmiss(sc, pkt, data); 9019 break; 9020 9021 case IWX_MFUART_LOAD_NOTIFICATION: 9022 break; 9023 9024 case IWX_ALIVE: { 9025 struct iwx_alive_resp_v4 *resp4; 9026 struct iwx_alive_resp_v5 *resp5; 9027 struct iwx_alive_resp_v6 *resp6; 9028 9029 DPRINTF(("%s: firmware alive\n", __func__)); 9030 sc->sc_uc.uc_ok = 0; 9031 9032 /* 9033 * For v5 and above, we can check the version, for older 9034 * versions we need to check the size. 9035 */ 9036 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP, 9037 IWX_ALIVE) == 6) { 9038 SYNC_RESP_STRUCT(resp6, pkt); 9039 if (iwx_rx_packet_payload_len(pkt) != 9040 sizeof(*resp6)) { 9041 sc->sc_uc.uc_intr = 1; 9042 wakeup(&sc->sc_uc); 9043 break; 9044 } 9045 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 9046 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9047 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9048 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9049 sc->sc_uc.uc_log_event_table = le32toh( 9050 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9051 sc->sc_uc.uc_umac_error_event_table = le32toh( 9052 resp6->umac_data.dbg_ptrs.error_info_addr); 9053 sc->sc_sku_id[0] = 9054 le32toh(resp6->sku_id.data[0]); 9055 sc->sc_sku_id[1] = 9056 le32toh(resp6->sku_id.data[1]); 9057 sc->sc_sku_id[2] = 9058 le32toh(resp6->sku_id.data[2]); 9059 if (resp6->status == IWX_ALIVE_STATUS_OK) { 9060 sc->sc_uc.uc_ok = 1; 9061 } 9062 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP, 9063 IWX_ALIVE) == 5) { 9064 SYNC_RESP_STRUCT(resp5, pkt); 9065 if (iwx_rx_packet_payload_len(pkt) != 9066 sizeof(*resp5)) { 9067 sc->sc_uc.uc_intr = 1; 9068 wakeup(&sc->sc_uc); 9069 break; 9070 } 9071 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 9072 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9073 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9074 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9075 sc->sc_uc.uc_log_event_table = le32toh( 9076 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9077 sc->sc_uc.uc_umac_error_event_table = le32toh( 9078 resp5->umac_data.dbg_ptrs.error_info_addr); 9079 sc->sc_sku_id[0] = 9080 le32toh(resp5->sku_id.data[0]); 9081 sc->sc_sku_id[1] = 9082 le32toh(resp5->sku_id.data[1]); 9083 sc->sc_sku_id[2] = 9084 le32toh(resp5->sku_id.data[2]); 9085 if (resp5->status == IWX_ALIVE_STATUS_OK) 9086 sc->sc_uc.uc_ok = 1; 9087 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) { 9088 SYNC_RESP_STRUCT(resp4, pkt); 9089 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh( 9090 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr); 9091 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh( 9092 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr); 9093 sc->sc_uc.uc_log_event_table = le32toh( 9094 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr); 9095 sc->sc_uc.uc_umac_error_event_table = le32toh( 9096 resp4->umac_data.dbg_ptrs.error_info_addr); 9097 if (resp4->status == IWX_ALIVE_STATUS_OK) 9098 sc->sc_uc.uc_ok = 1; 9099 } else 9100 printf("unknown payload version"); 9101 9102 sc->sc_uc.uc_intr = 1; 9103 wakeup(&sc->sc_uc); 9104 break; 9105 } 9106 9107 case IWX_STATISTICS_NOTIFICATION: { 9108 struct iwx_notif_statistics *stats; 9109 SYNC_RESP_STRUCT(stats, pkt); 9110 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 9111 sc->sc_noise = iwx_get_noise(&stats->rx.general); 9112 break; 9113 } 9114 9115 case IWX_DTS_MEASUREMENT_NOTIFICATION: 9116 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9117 IWX_DTS_MEASUREMENT_NOTIF_WIDE): 9118 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9119 IWX_TEMP_REPORTING_THRESHOLDS_CMD): 9120 break; 9121 9122 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP, 9123 IWX_CT_KILL_NOTIFICATION): { 9124 struct iwx_ct_kill_notif *notif; 9125 SYNC_RESP_STRUCT(notif, pkt); 9126 printf("%s: device at critical temperature (%u degC), " 9127 "stopping device\n", 9128 DEVNAME(sc), le16toh(notif->temperature)); 9129 sc->sc_flags |= IWX_FLAG_HW_ERR; 9130 ieee80211_restart_all(ic); 9131 break; 9132 } 9133 9134 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9135 IWX_SCD_QUEUE_CONFIG_CMD): 9136 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9137 IWX_RX_BAID_ALLOCATION_CONFIG_CMD): 9138 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP, 9139 IWX_SESSION_PROTECTION_CMD): 9140 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9141 IWX_NVM_GET_INFO): 9142 case IWX_ADD_STA_KEY: 9143 case IWX_PHY_CONFIGURATION_CMD: 9144 case IWX_TX_ANT_CONFIGURATION_CMD: 9145 case IWX_ADD_STA: 9146 case IWX_MAC_CONTEXT_CMD: 9147 case IWX_REPLY_SF_CFG_CMD: 9148 case IWX_POWER_TABLE_CMD: 9149 case IWX_LTR_CONFIG: 9150 case IWX_PHY_CONTEXT_CMD: 9151 case IWX_BINDING_CONTEXT_CMD: 9152 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD): 9153 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC): 9154 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC): 9155 case IWX_REPLY_BEACON_FILTERING_CMD: 9156 case IWX_MAC_PM_POWER_TABLE: 9157 case IWX_TIME_QUOTA_CMD: 9158 case IWX_REMOVE_STA: 9159 case IWX_TXPATH_FLUSH: 9160 case IWX_BT_CONFIG: 9161 case IWX_MCC_UPDATE_CMD: 9162 case IWX_TIME_EVENT_CMD: 9163 case IWX_STATISTICS_CMD: 9164 case IWX_SCD_QUEUE_CFG: { 9165 size_t pkt_len; 9166 9167 if (sc->sc_cmd_resp_pkt[idx] == NULL) 9168 break; 9169 9170 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9171 BUS_DMASYNC_POSTREAD); 9172 9173 pkt_len = sizeof(pkt->len_n_flags) + 9174 iwx_rx_packet_len(pkt); 9175 9176 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) || 9177 pkt_len < sizeof(*pkt) || 9178 pkt_len > sc->sc_cmd_resp_len[idx]) { 9179 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF); 9180 sc->sc_cmd_resp_pkt[idx] = NULL; 9181 break; 9182 } 9183 9184 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9185 BUS_DMASYNC_POSTREAD); 9186 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len); 9187 break; 9188 } 9189 9190 case IWX_INIT_COMPLETE_NOTIF: 9191 sc->sc_init_complete |= IWX_INIT_COMPLETE; 9192 wakeup(&sc->sc_init_complete); 9193 break; 9194 9195 case IWX_SCAN_COMPLETE_UMAC: { 9196 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__)); 9197 struct iwx_umac_scan_complete *notif __attribute__((unused)); 9198 SYNC_RESP_STRUCT(notif, pkt); 9199 DPRINTF(("%s: scan complete notif->status=%d\n", __func__, 9200 notif->status)); 9201 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task); 9202 iwx_endscan(sc); 9203 break; 9204 } 9205 9206 case IWX_SCAN_ITERATION_COMPLETE_UMAC: { 9207 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n", 9208 __func__)); 9209 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused)); 9210 SYNC_RESP_STRUCT(notif, pkt); 9211 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__, 9212 notif->status)); 9213 iwx_endscan(sc); 9214 break; 9215 } 9216 9217 case IWX_MCC_CHUB_UPDATE_CMD: { 9218 struct iwx_mcc_chub_notif *notif; 9219 SYNC_RESP_STRUCT(notif, pkt); 9220 iwx_mcc_update(sc, notif); 9221 break; 9222 } 9223 9224 case IWX_REPLY_ERROR: { 9225 struct iwx_error_resp *resp; 9226 SYNC_RESP_STRUCT(resp, pkt); 9227 printf("%s: firmware error 0x%x, cmd 0x%x\n", 9228 DEVNAME(sc), le32toh(resp->error_type), 9229 resp->cmd_id); 9230 break; 9231 } 9232 9233 case IWX_TIME_EVENT_NOTIFICATION: { 9234 struct iwx_time_event_notif *notif; 9235 uint32_t action; 9236 SYNC_RESP_STRUCT(notif, pkt); 9237 9238 if (sc->sc_time_event_uid != le32toh(notif->unique_id)) 9239 break; 9240 action = le32toh(notif->action); 9241 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END) 9242 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 9243 break; 9244 } 9245 9246 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP, 9247 IWX_SESSION_PROTECTION_NOTIF): { 9248 struct iwx_session_prot_notif *notif; 9249 uint32_t status, start, conf_id; 9250 9251 SYNC_RESP_STRUCT(notif, pkt); 9252 9253 status = le32toh(notif->status); 9254 start = le32toh(notif->start); 9255 conf_id = le32toh(notif->conf_id); 9256 /* Check for end of successful PROTECT_CONF_ASSOC. */ 9257 if (status == 1 && start == 0 && 9258 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC) 9259 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE; 9260 break; 9261 } 9262 9263 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, 9264 IWX_FSEQ_VER_MISMATCH_NOTIFICATION): 9265 break; 9266 9267 /* 9268 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 9269 * messages. Just ignore them for now. 9270 */ 9271 case IWX_DEBUG_LOG_MSG: 9272 break; 9273 9274 case IWX_MCAST_FILTER_CMD: 9275 break; 9276 9277 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD): 9278 break; 9279 9280 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD): 9281 break; 9282 9283 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD): 9284 break; 9285 9286 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9287 IWX_NVM_ACCESS_COMPLETE): 9288 break; 9289 9290 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF): 9291 break; /* happens in monitor mode; ignore for now */ 9292 9293 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD): 9294 break; 9295 9296 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 9297 IWX_TLC_MNG_UPDATE_NOTIF): { 9298 struct iwx_tlc_update_notif *notif; 9299 SYNC_RESP_STRUCT(notif, pkt); 9300 (void)notif; 9301 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif)) 9302 iwx_rs_update(sc, notif); 9303 break; 9304 } 9305 9306 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD): 9307 break; 9308 9309 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */ 9310 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8): 9311 break; 9312 9313 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP, 9314 IWX_PNVM_INIT_COMPLETE): 9315 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__)); 9316 sc->sc_init_complete |= IWX_PNVM_COMPLETE; 9317 wakeup(&sc->sc_init_complete); 9318 break; 9319 9320 default: 9321 handled = 0; 9322 /* XXX wulf: Get rid of bluetooth-related spam */ 9323 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) || 9324 (code == 0xce && pkt->len_n_flags == 0x2000002c)) 9325 break; 9326 printf("%s: unhandled firmware response 0x%x/0x%x " 9327 "rx ring %d[%d]\n", 9328 DEVNAME(sc), code, pkt->len_n_flags, 9329 (qid & ~0x80), idx); 9330 break; 9331 } 9332 9333 /* 9334 * uCode sets bit 0x80 when it originates the notification, 9335 * i.e. when the notification is not a direct response to a 9336 * command sent by the driver. 9337 * For example, uCode issues IWX_REPLY_RX when it sends a 9338 * received frame to the driver. 9339 */ 9340 if (handled && !(qid & (1 << 7))) { 9341 iwx_cmd_done(sc, qid, idx, code); 9342 } 9343 9344 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN); 9345 9346 /* AX210 devices ship only one packet per Rx buffer. */ 9347 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 9348 break; 9349 } 9350 9351 if (m0 && m0 != data->m) 9352 m_freem(m0); 9353 } 9354 9355 static void 9356 iwx_notif_intr(struct iwx_softc *sc) 9357 { 9358 struct mbuf m; 9359 uint16_t hw; 9360 9361 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 9362 BUS_DMASYNC_POSTREAD); 9363 9364 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 9365 uint16_t *status = sc->rxq.stat_dma.vaddr; 9366 hw = le16toh(*status) & 0xfff; 9367 } else 9368 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 9369 hw &= (IWX_RX_MQ_RING_COUNT - 1); 9370 while (sc->rxq.cur != hw) { 9371 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 9372 9373 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 9374 BUS_DMASYNC_POSTREAD); 9375 9376 iwx_rx_pkt(sc, data, &m); 9377 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT; 9378 } 9379 9380 /* 9381 * Tell the firmware what we have processed. 9382 * Seems like the hardware gets upset unless we align the write by 8?? 9383 */ 9384 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1; 9385 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7); 9386 } 9387 9388 #if 0 9389 int 9390 iwx_intr(void *arg) 9391 { 9392 struct iwx_softc *sc = arg; 9393 struct ieee80211com *ic = &sc->sc_ic; 9394 struct ifnet *ifp = IC2IFP(ic); 9395 int r1, r2, rv = 0; 9396 9397 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 9398 9399 if (sc->sc_flags & IWX_FLAG_USE_ICT) { 9400 uint32_t *ict = sc->ict_dma.vaddr; 9401 int tmp; 9402 9403 tmp = htole32(ict[sc->ict_cur]); 9404 if (!tmp) 9405 goto out_ena; 9406 9407 /* 9408 * ok, there was something. keep plowing until we have all. 9409 */ 9410 r1 = r2 = 0; 9411 while (tmp) { 9412 r1 |= tmp; 9413 ict[sc->ict_cur] = 0; 9414 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT; 9415 tmp = htole32(ict[sc->ict_cur]); 9416 } 9417 9418 /* this is where the fun begins. don't ask */ 9419 if (r1 == 0xffffffff) 9420 r1 = 0; 9421 9422 /* i am not expected to understand this */ 9423 if (r1 & 0xc0000) 9424 r1 |= 0x8000; 9425 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 9426 } else { 9427 r1 = IWX_READ(sc, IWX_CSR_INT); 9428 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 9429 goto out; 9430 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS); 9431 } 9432 if (r1 == 0 && r2 == 0) { 9433 goto out_ena; 9434 } 9435 9436 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask); 9437 9438 if (r1 & IWX_CSR_INT_BIT_ALIVE) { 9439 #if 0 9440 int i; 9441 /* Firmware has now configured the RFH. */ 9442 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) 9443 iwx_update_rx_desc(sc, &sc->rxq, i); 9444 #endif 9445 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8); 9446 } 9447 9448 9449 if (r1 & IWX_CSR_INT_BIT_RF_KILL) { 9450 iwx_check_rfkill(sc); 9451 rv = 1; 9452 goto out_ena; 9453 } 9454 9455 if (r1 & IWX_CSR_INT_BIT_SW_ERR) { 9456 if (ifp->if_flags & IFF_DEBUG) { 9457 iwx_nic_error(sc); 9458 iwx_dump_driver_status(sc); 9459 } 9460 printf("%s: fatal firmware error\n", DEVNAME(sc)); 9461 ieee80211_restart_all(ic); 9462 rv = 1; 9463 goto out; 9464 9465 } 9466 9467 if (r1 & IWX_CSR_INT_BIT_HW_ERR) { 9468 printf("%s: hardware error, stopping device \n", DEVNAME(sc)); 9469 iwx_stop(sc); 9470 rv = 1; 9471 goto out; 9472 } 9473 9474 /* firmware chunk loaded */ 9475 if (r1 & IWX_CSR_INT_BIT_FH_TX) { 9476 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK); 9477 9478 sc->sc_fw_chunk_done = 1; 9479 wakeup(&sc->sc_fw); 9480 } 9481 9482 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX | 9483 IWX_CSR_INT_BIT_RX_PERIODIC)) { 9484 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) { 9485 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK); 9486 } 9487 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) { 9488 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC); 9489 } 9490 9491 /* Disable periodic interrupt; we use it as just a one-shot. */ 9492 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS); 9493 9494 /* 9495 * Enable periodic interrupt in 8 msec only if we received 9496 * real RX interrupt (instead of just periodic int), to catch 9497 * any dangling Rx interrupt. If it was just the periodic 9498 * interrupt, there was no dangling Rx activity, and no need 9499 * to extend the periodic interrupt; one-shot is enough. 9500 */ 9501 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) 9502 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, 9503 IWX_CSR_INT_PERIODIC_ENA); 9504 9505 iwx_notif_intr(sc); 9506 } 9507 9508 rv = 1; 9509 9510 out_ena: 9511 iwx_restore_interrupts(sc); 9512 out: 9513 return rv; 9514 } 9515 #endif 9516 9517 static void 9518 iwx_intr_msix(void *arg) 9519 { 9520 struct iwx_softc *sc = arg; 9521 struct ieee80211com *ic = &sc->sc_ic; 9522 uint32_t inta_fh, inta_hw; 9523 int vector = 0; 9524 9525 IWX_LOCK(sc); 9526 9527 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD); 9528 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD); 9529 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 9530 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 9531 inta_fh &= sc->sc_fh_mask; 9532 inta_hw &= sc->sc_hw_mask; 9533 9534 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 || 9535 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) { 9536 iwx_notif_intr(sc); 9537 } 9538 9539 /* firmware chunk loaded */ 9540 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 9541 sc->sc_fw_chunk_done = 1; 9542 wakeup(&sc->sc_fw); 9543 } 9544 9545 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) || 9546 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) || 9547 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { 9548 if (sc->sc_debug) { 9549 iwx_nic_error(sc); 9550 iwx_dump_driver_status(sc); 9551 } 9552 printf("%s: fatal firmware error\n", DEVNAME(sc)); 9553 ieee80211_restart_all(ic); 9554 goto out; 9555 } 9556 9557 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) { 9558 iwx_check_rfkill(sc); 9559 } 9560 9561 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) { 9562 printf("%s: hardware error, stopping device \n", DEVNAME(sc)); 9563 sc->sc_flags |= IWX_FLAG_HW_ERR; 9564 iwx_stop(sc); 9565 goto out; 9566 } 9567 9568 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) { 9569 IWX_DPRINTF(sc, IWX_DEBUG_TRACE, 9570 "%s:%d WARNING: Skipping rx desc update\n", 9571 __func__, __LINE__); 9572 #if 0 9573 /* 9574 * XXX-THJ: we don't have the dma segment handy. This is hacked 9575 * out in the fc release, return to it if we ever get this 9576 * warning. 9577 */ 9578 /* Firmware has now configured the RFH. */ 9579 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++) 9580 iwx_update_rx_desc(sc, &sc->rxq, i); 9581 #endif 9582 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8); 9583 } 9584 9585 /* 9586 * Before sending the interrupt the HW disables it to prevent 9587 * a nested interrupt. This is done by writing 1 to the corresponding 9588 * bit in the mask register. After handling the interrupt, it should be 9589 * re-enabled by clearing this bit. This register is defined as 9590 * write 1 clear (W1C) register, meaning that it's being clear 9591 * by writing 1 to the bit. 9592 */ 9593 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector); 9594 out: 9595 IWX_UNLOCK(sc); 9596 return; 9597 } 9598 9599 /* 9600 * The device info table below contains device-specific config overrides. 9601 * The most important parameter derived from this table is the name of the 9602 * firmware image to load. 9603 * 9604 * The Linux iwlwifi driver uses an "old" and a "new" device info table. 9605 * The "old" table matches devices based on PCI vendor/product IDs only. 9606 * The "new" table extends this with various device parameters derived 9607 * from MAC type, and RF type. 9608 * 9609 * In iwlwifi "old" and "new" tables share the same array, where "old" 9610 * entries contain dummy values for data defined only for "new" entries. 9611 * As of 2022, Linux developers are still in the process of moving entries 9612 * from "old" to "new" style and it looks like this effort has stalled in 9613 * in some work-in-progress state for quite a while. Linux commits moving 9614 * entries from "old" to "new" have at times been reverted due to regressions. 9615 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4) 9616 * devices in the same driver. 9617 * 9618 * Our table below contains mostly "new" entries declared in iwlwifi 9619 * with the _IWL_DEV_INFO() macro (with a leading underscore). 9620 * Other devices are matched based on PCI vendor/product ID as usual, 9621 * unless matching specific PCI subsystem vendor/product IDs is required. 9622 * 9623 * Some "old"-style entries are required to identify the firmware image to use. 9624 * Others might be used to print a specific marketing name into Linux dmesg, 9625 * but we can't be sure whether the corresponding devices would be matched 9626 * correctly in the absence of their entries. So we include them just in case. 9627 */ 9628 9629 struct iwx_dev_info { 9630 uint16_t device; 9631 uint16_t subdevice; 9632 uint16_t mac_type; 9633 uint16_t rf_type; 9634 uint8_t mac_step; 9635 uint8_t rf_id; 9636 uint8_t no_160; 9637 uint8_t cores; 9638 uint8_t cdb; 9639 uint8_t jacket; 9640 const struct iwx_device_cfg *cfg; 9641 }; 9642 9643 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ 9644 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \ 9645 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ 9646 .mac_type = _mac_type, .rf_type = _rf_type, \ 9647 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ 9648 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } 9649 9650 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \ 9651 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \ 9652 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \ 9653 IWX_CFG_ANY, IWX_CFG_ANY, _cfg) 9654 9655 /* 9656 * When adding entries to this table keep in mind that entries must 9657 * be listed in the same order as in the Linux driver. Code walks this 9658 * table backwards and uses the first matching entry it finds. 9659 * Device firmware must be available in fw_update(8). 9660 */ 9661 static const struct iwx_dev_info iwx_dev_info_table[] = { 9662 /* So with HR */ 9663 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0), 9664 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0), 9665 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0), 9666 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0), 9667 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0), 9668 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0), 9669 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0), 9670 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0), 9671 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0), 9672 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0), 9673 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0), 9674 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0), 9675 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */ 9676 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */ 9677 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9678 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9679 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0), 9680 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0), 9681 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9682 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9683 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long), 9684 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long), 9685 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long), 9686 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long), 9687 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long), 9688 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long), 9689 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0), 9690 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0), 9691 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0), 9692 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9693 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9694 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0), 9695 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0), 9696 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0), 9697 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */ 9698 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */ 9699 9700 /* So with GF2 */ 9701 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9702 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9703 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9704 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9705 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9706 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9707 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9708 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9709 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9710 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9711 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */ 9712 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */ 9713 9714 /* Qu with Jf, C step */ 9715 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9716 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9717 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9718 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9719 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */ 9720 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9721 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9722 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9723 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9724 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */ 9725 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9726 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9727 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9728 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9729 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */ 9730 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9731 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9732 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9733 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9734 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */ 9735 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9736 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9737 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9738 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9739 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */ 9740 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9741 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9742 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9743 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9744 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */ 9745 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551, 9746 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9747 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9748 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9749 IWX_CFG_ANY, 9750 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */ 9751 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552, 9752 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9753 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9754 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9755 IWX_CFG_ANY, 9756 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */ 9757 9758 /* QuZ with Jf */ 9759 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9760 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9761 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9762 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9763 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */ 9764 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9765 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9766 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9767 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9768 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */ 9769 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9770 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9771 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9772 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9773 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */ 9774 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9775 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9776 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9777 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9778 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */ 9779 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551, 9780 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9781 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9782 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9783 IWX_CFG_ANY, 9784 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */ 9785 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552, 9786 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9787 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9788 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9789 IWX_CFG_ANY, 9790 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */ 9791 9792 /* Qu with Hr, B step */ 9793 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9794 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP, 9795 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9796 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9797 iwx_qu_b0_hr1_b0), /* AX101 */ 9798 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9799 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP, 9800 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9801 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9802 iwx_qu_b0_hr_b0), /* AX203 */ 9803 9804 /* Qu with Hr, C step */ 9805 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9806 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9807 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9808 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9809 iwx_qu_c0_hr1_b0), /* AX101 */ 9810 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9811 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9812 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9813 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9814 iwx_qu_c0_hr_b0), /* AX203 */ 9815 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9816 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP, 9817 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9818 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9819 iwx_qu_c0_hr_b0), /* AX201 */ 9820 9821 /* QuZ with Hr */ 9822 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9823 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY, 9824 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9825 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9826 iwx_quz_a0_hr1_b0), /* AX101 */ 9827 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9828 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP, 9829 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9830 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9831 iwx_cfg_quz_a0_hr_b0), /* AX203 */ 9832 9833 /* SoF with JF2 */ 9834 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9835 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9836 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9837 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9838 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */ 9839 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9840 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9841 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9842 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9843 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */ 9844 9845 /* SoF with JF */ 9846 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9847 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9848 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9849 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9850 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */ 9851 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9852 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9853 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9854 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9855 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */ 9856 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9857 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9858 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9859 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9860 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */ 9861 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9862 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9863 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9864 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9865 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */ 9866 9867 /* So with Hr */ 9868 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9869 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9870 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9871 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9872 iwx_cfg_so_a0_hr_b0), /* AX203 */ 9873 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9874 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9875 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9876 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9877 iwx_cfg_so_a0_hr_b0), /* ax101 */ 9878 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9879 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9880 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9881 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9882 iwx_cfg_so_a0_hr_b0), /* ax201 */ 9883 9884 /* So-F with Hr */ 9885 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9886 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9887 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9888 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9889 iwx_cfg_so_a0_hr_b0), /* AX203 */ 9890 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9891 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9892 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY, 9893 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9894 iwx_cfg_so_a0_hr_b0), /* AX101 */ 9895 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9896 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9897 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY, 9898 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9899 iwx_cfg_so_a0_hr_b0), /* AX201 */ 9900 9901 /* So-F with GF */ 9902 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9903 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9904 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9905 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9906 iwx_2ax_cfg_so_gf_a0), /* AX211 */ 9907 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9908 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY, 9909 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9910 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY, 9911 iwx_2ax_cfg_so_gf4_a0), /* AX411 */ 9912 9913 /* So with GF */ 9914 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9915 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9916 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9917 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY, 9918 iwx_2ax_cfg_so_gf_a0), /* AX211 */ 9919 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9920 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9921 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY, 9922 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY, 9923 iwx_2ax_cfg_so_gf4_a0), /* AX411 */ 9924 9925 /* So with JF2 */ 9926 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9927 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9928 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9929 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9930 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */ 9931 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9932 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9933 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF, 9934 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9935 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */ 9936 9937 /* So with JF */ 9938 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9939 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9940 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9941 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9942 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */ 9943 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9944 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9945 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9946 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9947 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */ 9948 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9949 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9950 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1, 9951 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9952 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */ 9953 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY, 9954 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY, 9955 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV, 9956 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB, 9957 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */ 9958 }; 9959 9960 static int 9961 iwx_preinit(struct iwx_softc *sc) 9962 { 9963 struct ieee80211com *ic = &sc->sc_ic; 9964 int err; 9965 9966 err = iwx_prepare_card_hw(sc); 9967 if (err) { 9968 printf("%s: could not initialize hardware\n", DEVNAME(sc)); 9969 return err; 9970 } 9971 9972 if (sc->attached) { 9973 return 0; 9974 } 9975 9976 err = iwx_start_hw(sc); 9977 if (err) { 9978 printf("%s: could not initialize hardware\n", DEVNAME(sc)); 9979 return err; 9980 } 9981 9982 err = iwx_run_init_mvm_ucode(sc, 1); 9983 iwx_stop_device(sc); 9984 if (err) { 9985 printf("%s: failed to stop device\n", DEVNAME(sc)); 9986 return err; 9987 } 9988 9989 /* Print version info and MAC address on first successful fw load. */ 9990 sc->attached = 1; 9991 if (sc->sc_pnvm_ver) { 9992 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, " 9993 "address %s\n", 9994 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK, 9995 sc->sc_fwver, sc->sc_pnvm_ver, 9996 ether_sprintf(sc->sc_nvm.hw_addr)); 9997 } else { 9998 printf("%s: hw rev 0x%x, fw %s, address %s\n", 9999 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK, 10000 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr)); 10001 } 10002 10003 /* not all hardware can do 5GHz band */ 10004 if (!sc->sc_nvm.sku_cap_band_52GHz_enable) 10005 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 10006 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 10007 10008 return 0; 10009 } 10010 10011 static void 10012 iwx_attach_hook(void *self) 10013 { 10014 struct iwx_softc *sc = (void *)self; 10015 struct ieee80211com *ic = &sc->sc_ic; 10016 int err; 10017 10018 IWX_LOCK(sc); 10019 err = iwx_preinit(sc); 10020 IWX_UNLOCK(sc); 10021 if (err != 0) 10022 goto out; 10023 10024 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 10025 ic->ic_channels); 10026 10027 ieee80211_ifattach(ic); 10028 ic->ic_vap_create = iwx_vap_create; 10029 ic->ic_vap_delete = iwx_vap_delete; 10030 ic->ic_raw_xmit = iwx_raw_xmit; 10031 ic->ic_node_alloc = iwx_node_alloc; 10032 ic->ic_scan_start = iwx_scan_start; 10033 ic->ic_scan_end = iwx_scan_end; 10034 ic->ic_update_mcast = iwx_update_mcast; 10035 ic->ic_getradiocaps = iwx_init_channel_map; 10036 10037 ic->ic_set_channel = iwx_set_channel; 10038 ic->ic_scan_curchan = iwx_scan_curchan; 10039 ic->ic_scan_mindwell = iwx_scan_mindwell; 10040 ic->ic_wme.wme_update = iwx_wme_update; 10041 ic->ic_parent = iwx_parent; 10042 ic->ic_transmit = iwx_transmit; 10043 10044 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 10045 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start; 10046 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 10047 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop; 10048 10049 sc->sc_addba_request = ic->ic_addba_request; 10050 ic->ic_addba_request = iwx_addba_request; 10051 sc->sc_addba_response = ic->ic_addba_response; 10052 ic->ic_addba_response = iwx_addba_response; 10053 10054 iwx_radiotap_attach(sc); 10055 ieee80211_announce(ic); 10056 out: 10057 config_intrhook_disestablish(&sc->sc_preinit_hook); 10058 } 10059 10060 const struct iwx_device_cfg * 10061 iwx_find_device_cfg(struct iwx_softc *sc) 10062 { 10063 uint16_t sdev_id, mac_type, rf_type; 10064 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores; 10065 int i; 10066 10067 sdev_id = pci_get_subdevice(sc->sc_dev); 10068 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev); 10069 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2); 10070 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id); 10071 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id); 10072 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id); 10073 10074 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id); 10075 no_160 = IWX_SUBDEVICE_NO_160(sdev_id); 10076 cores = IWX_SUBDEVICE_CORES(sdev_id); 10077 10078 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) { 10079 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i]; 10080 10081 if (dev_info->device != (uint16_t)IWX_CFG_ANY && 10082 dev_info->device != sc->sc_pid) 10083 continue; 10084 10085 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY && 10086 dev_info->subdevice != sdev_id) 10087 continue; 10088 10089 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY && 10090 dev_info->mac_type != mac_type) 10091 continue; 10092 10093 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY && 10094 dev_info->mac_step != mac_step) 10095 continue; 10096 10097 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY && 10098 dev_info->rf_type != rf_type) 10099 continue; 10100 10101 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY && 10102 dev_info->cdb != cdb) 10103 continue; 10104 10105 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY && 10106 dev_info->jacket != jacket) 10107 continue; 10108 10109 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY && 10110 dev_info->rf_id != rf_id) 10111 continue; 10112 10113 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY && 10114 dev_info->no_160 != no_160) 10115 continue; 10116 10117 if (dev_info->cores != (uint8_t)IWX_CFG_ANY && 10118 dev_info->cores != cores) 10119 continue; 10120 10121 return dev_info->cfg; 10122 } 10123 10124 return NULL; 10125 } 10126 10127 static int 10128 iwx_probe(device_t dev) 10129 { 10130 int i; 10131 10132 for (i = 0; i < nitems(iwx_devices); i++) { 10133 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 10134 pci_get_device(dev) == iwx_devices[i].device) { 10135 device_set_desc(dev, iwx_devices[i].name); 10136 10137 /* 10138 * Due to significant existing deployments using 10139 * iwlwifi lower the priority of iwx. 10140 * 10141 * This inverts the advice in bus.h where drivers 10142 * supporting newer hardware should return 10143 * BUS_PROBE_DEFAULT and drivers for older devices 10144 * return BUS_PROBE_LOW_PRIORITY. 10145 * 10146 */ 10147 return (BUS_PROBE_LOW_PRIORITY); 10148 } 10149 } 10150 10151 return (ENXIO); 10152 } 10153 10154 static int 10155 iwx_attach(device_t dev) 10156 { 10157 struct iwx_softc *sc = device_get_softc(dev); 10158 struct ieee80211com *ic = &sc->sc_ic; 10159 const struct iwx_device_cfg *cfg; 10160 int err; 10161 int txq_i, i, j; 10162 size_t ctxt_info_size; 10163 int rid; 10164 int count; 10165 int error; 10166 sc->sc_dev = dev; 10167 sc->sc_pid = pci_get_device(dev); 10168 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 10169 10170 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc); 10171 IWX_LOCK_INIT(sc); 10172 mbufq_init(&sc->sc_snd, ifqmaxlen); 10173 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc); 10174 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc); 10175 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK, 10176 taskqueue_thread_enqueue, &sc->sc_tq); 10177 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq"); 10178 if (error != 0) { 10179 device_printf(dev, "can't start taskq thread, error %d\n", 10180 error); 10181 return (ENXIO); 10182 } 10183 10184 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 10185 if (sc->sc_cap_off == 0) { 10186 device_printf(dev, "PCIe capability structure not found!\n"); 10187 return (ENXIO); 10188 } 10189 10190 /* 10191 * We disable the RETRY_TIMEOUT register (0x41) to keep 10192 * PCI Tx retries from interfering with C3 CPU state. 10193 */ 10194 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 10195 10196 if (pci_msix_count(dev)) { 10197 sc->sc_msix = 1; 10198 } else { 10199 device_printf(dev, "no MSI-X found\n"); 10200 return (ENXIO); 10201 } 10202 10203 pci_enable_busmaster(dev); 10204 rid = PCIR_BAR(0); 10205 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 10206 RF_ACTIVE); 10207 if (sc->sc_mem == NULL) { 10208 device_printf(sc->sc_dev, "can't map mem space\n"); 10209 return (ENXIO); 10210 } 10211 sc->sc_st = rman_get_bustag(sc->sc_mem); 10212 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 10213 10214 count = 1; 10215 rid = 0; 10216 if (pci_alloc_msix(dev, &count) == 0) 10217 rid = 1; 10218 DPRINTF(("%s: count=%d\n", __func__, count)); 10219 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 10220 (rid != 0 ? 0 : RF_SHAREABLE)); 10221 if (sc->sc_irq == NULL) { 10222 device_printf(dev, "can't map interrupt\n"); 10223 return (ENXIO); 10224 } 10225 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 10226 NULL, iwx_intr_msix, sc, &sc->sc_ih); 10227 if (error != 0) { 10228 device_printf(dev, "can't establish interrupt\n"); 10229 return (ENXIO); 10230 } 10231 10232 /* Clear pending interrupts. */ 10233 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0); 10234 IWX_WRITE(sc, IWX_CSR_INT, ~0); 10235 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0); 10236 10237 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV); 10238 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev)); 10239 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID); 10240 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id)); 10241 10242 /* 10243 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 10244 * changed, and now the revision step also includes bit 0-1 (no more 10245 * "dash" value). To keep hw_rev backwards compatible - we'll store it 10246 * in the old format. 10247 */ 10248 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 10249 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 10250 10251 switch (sc->sc_pid) { 10252 case PCI_PRODUCT_INTEL_WL_22500_1: 10253 sc->sc_fwname = IWX_CC_A_FW; 10254 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10255 sc->sc_integrated = 0; 10256 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE; 10257 sc->sc_low_latency_xtal = 0; 10258 sc->sc_xtal_latency = 0; 10259 sc->sc_tx_with_siso_diversity = 0; 10260 sc->sc_uhb_supported = 0; 10261 break; 10262 case PCI_PRODUCT_INTEL_WL_22500_2: 10263 case PCI_PRODUCT_INTEL_WL_22500_5: 10264 /* These devices should be QuZ only. */ 10265 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) { 10266 device_printf(dev, "unsupported AX201 adapter\n"); 10267 return (ENXIO); 10268 } 10269 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10270 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10271 sc->sc_integrated = 1; 10272 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200; 10273 sc->sc_low_latency_xtal = 0; 10274 sc->sc_xtal_latency = 500; 10275 sc->sc_tx_with_siso_diversity = 0; 10276 sc->sc_uhb_supported = 0; 10277 break; 10278 case PCI_PRODUCT_INTEL_WL_22500_3: 10279 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10280 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10281 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10282 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10283 else 10284 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10285 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10286 sc->sc_integrated = 1; 10287 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200; 10288 sc->sc_low_latency_xtal = 0; 10289 sc->sc_xtal_latency = 500; 10290 sc->sc_tx_with_siso_diversity = 0; 10291 sc->sc_uhb_supported = 0; 10292 break; 10293 case PCI_PRODUCT_INTEL_WL_22500_4: 10294 case PCI_PRODUCT_INTEL_WL_22500_7: 10295 case PCI_PRODUCT_INTEL_WL_22500_8: 10296 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10297 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10298 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10299 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10300 else 10301 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10302 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10303 sc->sc_integrated = 1; 10304 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820; 10305 sc->sc_low_latency_xtal = 0; 10306 sc->sc_xtal_latency = 1820; 10307 sc->sc_tx_with_siso_diversity = 0; 10308 sc->sc_uhb_supported = 0; 10309 break; 10310 case PCI_PRODUCT_INTEL_WL_22500_6: 10311 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0) 10312 sc->sc_fwname = IWX_QU_C_HR_B_FW; 10313 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ) 10314 sc->sc_fwname = IWX_QUZ_A_HR_B_FW; 10315 else 10316 sc->sc_fwname = IWX_QU_B_HR_B_FW; 10317 sc->sc_device_family = IWX_DEVICE_FAMILY_22000; 10318 sc->sc_integrated = 1; 10319 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500; 10320 sc->sc_low_latency_xtal = 1; 10321 sc->sc_xtal_latency = 12000; 10322 sc->sc_tx_with_siso_diversity = 0; 10323 sc->sc_uhb_supported = 0; 10324 break; 10325 case PCI_PRODUCT_INTEL_WL_22500_9: 10326 case PCI_PRODUCT_INTEL_WL_22500_10: 10327 case PCI_PRODUCT_INTEL_WL_22500_11: 10328 case PCI_PRODUCT_INTEL_WL_22500_13: 10329 /* _14 is an MA device, not yet supported */ 10330 case PCI_PRODUCT_INTEL_WL_22500_15: 10331 case PCI_PRODUCT_INTEL_WL_22500_16: 10332 sc->sc_fwname = IWX_SO_A_GF_A_FW; 10333 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM; 10334 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210; 10335 sc->sc_integrated = 0; 10336 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE; 10337 sc->sc_low_latency_xtal = 0; 10338 sc->sc_xtal_latency = 0; 10339 sc->sc_tx_with_siso_diversity = 0; 10340 sc->sc_uhb_supported = 1; 10341 break; 10342 case PCI_PRODUCT_INTEL_WL_22500_12: 10343 case PCI_PRODUCT_INTEL_WL_22500_17: 10344 sc->sc_fwname = IWX_SO_A_GF_A_FW; 10345 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM; 10346 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210; 10347 sc->sc_integrated = 1; 10348 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500; 10349 sc->sc_low_latency_xtal = 1; 10350 sc->sc_xtal_latency = 12000; 10351 sc->sc_tx_with_siso_diversity = 0; 10352 sc->sc_uhb_supported = 0; 10353 sc->sc_imr_enabled = 1; 10354 break; 10355 default: 10356 device_printf(dev, "unknown adapter type\n"); 10357 return (ENXIO); 10358 } 10359 10360 cfg = iwx_find_device_cfg(sc); 10361 DPRINTF(("%s: cfg=%p\n", __func__, cfg)); 10362 if (cfg) { 10363 sc->sc_fwname = cfg->fw_name; 10364 sc->sc_pnvm_name = cfg->pnvm_name; 10365 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity; 10366 sc->sc_uhb_supported = cfg->uhb_supported; 10367 if (cfg->xtal_latency) { 10368 sc->sc_xtal_latency = cfg->xtal_latency; 10369 sc->sc_low_latency_xtal = cfg->low_latency_xtal; 10370 } 10371 } 10372 10373 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */ 10374 10375 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 10376 sc->sc_umac_prph_offset = 0x300000; 10377 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3; 10378 } else 10379 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX; 10380 10381 /* Allocate DMA memory for loading firmware. */ 10382 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) 10383 ctxt_info_size = sizeof(struct iwx_context_info_gen3); 10384 else 10385 ctxt_info_size = sizeof(struct iwx_context_info); 10386 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma, 10387 ctxt_info_size, 1); 10388 if (err) { 10389 device_printf(dev, 10390 "could not allocate memory for loading firmware\n"); 10391 return (ENXIO); 10392 } 10393 10394 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) { 10395 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma, 10396 sizeof(struct iwx_prph_scratch), 1); 10397 if (err) { 10398 device_printf(dev, 10399 "could not allocate prph scratch memory\n"); 10400 goto fail1; 10401 } 10402 10403 /* 10404 * Allocate prph information. The driver doesn't use this. 10405 * We use the second half of this page to give the device 10406 * some dummy TR/CR tail pointers - which shouldn't be 10407 * necessary as we don't use this, but the hardware still 10408 * reads/writes there and we can't let it go do that with 10409 * a NULL pointer. 10410 */ 10411 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2), 10412 ("iwx_prph_info has wrong size")); 10413 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma, 10414 PAGE_SIZE, 1); 10415 if (err) { 10416 device_printf(dev, 10417 "could not allocate prph info memory\n"); 10418 goto fail1; 10419 } 10420 } 10421 10422 /* Allocate interrupt cause table (ICT).*/ 10423 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 10424 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT); 10425 if (err) { 10426 device_printf(dev, "could not allocate ICT table\n"); 10427 goto fail1; 10428 } 10429 10430 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 10431 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i); 10432 if (err) { 10433 device_printf(dev, "could not allocate TX ring %d\n", 10434 txq_i); 10435 goto fail4; 10436 } 10437 } 10438 10439 err = iwx_alloc_rx_ring(sc, &sc->rxq); 10440 if (err) { 10441 device_printf(sc->sc_dev, "could not allocate RX ring\n"); 10442 goto fail4; 10443 } 10444 10445 #ifdef IWX_DEBUG 10446 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 10448 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging"); 10449 10450 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10451 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark", 10452 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark"); 10453 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10454 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark", 10455 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark"); 10456 10457 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10458 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk", 10459 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask"); 10460 10461 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10462 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0", 10463 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0"); 10464 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10465 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1", 10466 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1"); 10467 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10468 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2", 10469 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2"); 10470 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10471 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3", 10472 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3"); 10473 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10474 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4", 10475 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4"); 10476 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10477 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5", 10478 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5"); 10479 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10480 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6", 10481 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6"); 10482 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 10483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7", 10484 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7"); 10485 #endif 10486 ic->ic_softc = sc; 10487 ic->ic_name = device_get_nameunit(sc->sc_dev); 10488 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 10489 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 10490 10491 /* Set device capabilities. */ 10492 ic->ic_caps = 10493 IEEE80211_C_STA | 10494 IEEE80211_C_MONITOR | 10495 IEEE80211_C_WPA | /* WPA/RSN */ 10496 IEEE80211_C_WME | 10497 IEEE80211_C_PMGT | 10498 IEEE80211_C_SHSLOT | /* short slot time supported */ 10499 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 10500 IEEE80211_C_BGSCAN /* capable of bg scanning */ 10501 ; 10502 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 10503 /* Enable seqno offload */ 10504 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD; 10505 /* Don't send null data frames; let firmware do it */ 10506 ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA; 10507 10508 ic->ic_txstream = 2; 10509 ic->ic_rxstream = 2; 10510 ic->ic_htcaps |= IEEE80211_HTC_HT 10511 | IEEE80211_HTCAP_SMPS_OFF 10512 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 10513 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 10514 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 10515 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 10516 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */ 10517 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */ 10518 10519 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 10520 10521 /* 10522 * XXX: setupcurchan() expects vhtcaps to be non-zero 10523 * https://bugs.freebsd.org/274156 10524 */ 10525 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 10526 | IEEE80211_VHTCAP_SHORT_GI_80 10527 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S 10528 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN 10529 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN; 10530 10531 ic->ic_flags_ext |= IEEE80211_FEXT_VHT; 10532 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 10533 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 10534 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 10535 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 10536 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 10537 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 10538 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 10539 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; 10540 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap); 10541 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap); 10542 10543 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 10544 for (i = 0; i < nitems(sc->sc_rxba_data); i++) { 10545 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i]; 10546 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID; 10547 rxba->sc = sc; 10548 for (j = 0; j < nitems(rxba->entries); j++) 10549 mbufq_init(&rxba->entries[j].frames, ifqmaxlen); 10550 } 10551 10552 sc->sc_preinit_hook.ich_func = iwx_attach_hook; 10553 sc->sc_preinit_hook.ich_arg = sc; 10554 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 10555 device_printf(dev, 10556 "config_intrhook_establish failed\n"); 10557 goto fail4; 10558 } 10559 10560 return (0); 10561 10562 fail4: 10563 while (--txq_i >= 0) 10564 iwx_free_tx_ring(sc, &sc->txq[txq_i]); 10565 iwx_free_rx_ring(sc, &sc->rxq); 10566 if (sc->ict_dma.vaddr != NULL) 10567 iwx_dma_contig_free(&sc->ict_dma); 10568 10569 fail1: 10570 iwx_dma_contig_free(&sc->ctxt_info_dma); 10571 iwx_dma_contig_free(&sc->prph_scratch_dma); 10572 iwx_dma_contig_free(&sc->prph_info_dma); 10573 return (ENXIO); 10574 } 10575 10576 static int 10577 iwx_detach(device_t dev) 10578 { 10579 struct iwx_softc *sc = device_get_softc(dev); 10580 int txq_i; 10581 10582 iwx_stop_device(sc); 10583 10584 taskqueue_drain_all(sc->sc_tq); 10585 taskqueue_free(sc->sc_tq); 10586 10587 ieee80211_ifdetach(&sc->sc_ic); 10588 10589 callout_drain(&sc->watchdog_to); 10590 10591 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) 10592 iwx_free_tx_ring(sc, &sc->txq[txq_i]); 10593 iwx_free_rx_ring(sc, &sc->rxq); 10594 10595 if (sc->sc_fwp != NULL) { 10596 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD); 10597 sc->sc_fwp = NULL; 10598 } 10599 10600 if (sc->sc_pnvm != NULL) { 10601 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD); 10602 sc->sc_pnvm = NULL; 10603 } 10604 10605 if (sc->sc_irq != NULL) { 10606 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 10607 bus_release_resource(dev, SYS_RES_IRQ, 10608 rman_get_rid(sc->sc_irq), sc->sc_irq); 10609 pci_release_msi(dev); 10610 } 10611 if (sc->sc_mem != NULL) 10612 bus_release_resource(dev, SYS_RES_MEMORY, 10613 rman_get_rid(sc->sc_mem), sc->sc_mem); 10614 10615 IWX_LOCK_DESTROY(sc); 10616 10617 return (0); 10618 } 10619 10620 static void 10621 iwx_radiotap_attach(struct iwx_softc *sc) 10622 { 10623 struct ieee80211com *ic = &sc->sc_ic; 10624 10625 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE, 10626 "->%s begin\n", __func__); 10627 10628 ieee80211_radiotap_attach(ic, 10629 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 10630 IWX_TX_RADIOTAP_PRESENT, 10631 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 10632 IWX_RX_RADIOTAP_PRESENT); 10633 10634 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE, 10635 "->%s end\n", __func__); 10636 } 10637 10638 struct ieee80211vap * 10639 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 10640 enum ieee80211_opmode opmode, int flags, 10641 const uint8_t bssid[IEEE80211_ADDR_LEN], 10642 const uint8_t mac[IEEE80211_ADDR_LEN]) 10643 { 10644 struct iwx_vap *ivp; 10645 struct ieee80211vap *vap; 10646 10647 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 10648 return NULL; 10649 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO); 10650 vap = &ivp->iv_vap; 10651 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 10652 vap->iv_bmissthreshold = 10; /* override default */ 10653 /* Override with driver methods. */ 10654 ivp->iv_newstate = vap->iv_newstate; 10655 vap->iv_newstate = iwx_newstate; 10656 10657 ivp->id = IWX_DEFAULT_MACID; 10658 ivp->color = IWX_DEFAULT_COLOR; 10659 10660 ivp->have_wme = TRUE; 10661 ivp->ps_disabled = FALSE; 10662 10663 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 10664 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; 10665 10666 /* h/w crypto support */ 10667 vap->iv_key_alloc = iwx_key_alloc; 10668 vap->iv_key_delete = iwx_key_delete; 10669 vap->iv_key_set = iwx_key_set; 10670 vap->iv_key_update_begin = iwx_key_update_begin; 10671 vap->iv_key_update_end = iwx_key_update_end; 10672 10673 ieee80211_ratectl_init(vap); 10674 /* Complete setup. */ 10675 ieee80211_vap_attach(vap, ieee80211_media_change, 10676 ieee80211_media_status, mac); 10677 ic->ic_opmode = opmode; 10678 10679 return vap; 10680 } 10681 10682 static void 10683 iwx_vap_delete(struct ieee80211vap *vap) 10684 { 10685 struct iwx_vap *ivp = IWX_VAP(vap); 10686 10687 ieee80211_ratectl_deinit(vap); 10688 ieee80211_vap_detach(vap); 10689 free(ivp, M_80211_VAP); 10690 } 10691 10692 static void 10693 iwx_parent(struct ieee80211com *ic) 10694 { 10695 struct iwx_softc *sc = ic->ic_softc; 10696 IWX_LOCK(sc); 10697 10698 if (sc->sc_flags & IWX_FLAG_HW_INITED) { 10699 iwx_stop(sc); 10700 sc->sc_flags &= ~IWX_FLAG_HW_INITED; 10701 } else { 10702 iwx_init(sc); 10703 ieee80211_start_all(ic); 10704 } 10705 IWX_UNLOCK(sc); 10706 } 10707 10708 static int 10709 iwx_suspend(device_t dev) 10710 { 10711 struct iwx_softc *sc = device_get_softc(dev); 10712 struct ieee80211com *ic = &sc->sc_ic; 10713 10714 if (sc->sc_flags & IWX_FLAG_HW_INITED) { 10715 ieee80211_suspend_all(ic); 10716 10717 iwx_stop(sc); 10718 sc->sc_flags &= ~IWX_FLAG_HW_INITED; 10719 } 10720 return (0); 10721 } 10722 10723 static int 10724 iwx_resume(device_t dev) 10725 { 10726 struct iwx_softc *sc = device_get_softc(dev); 10727 struct ieee80211com *ic = &sc->sc_ic; 10728 int err; 10729 10730 /* 10731 * We disable the RETRY_TIMEOUT register (0x41) to keep 10732 * PCI Tx retries from interfering with C3 CPU state. 10733 */ 10734 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 10735 10736 IWX_LOCK(sc); 10737 10738 err = iwx_init(sc); 10739 if (err) { 10740 iwx_stop_device(sc); 10741 IWX_UNLOCK(sc); 10742 return err; 10743 } 10744 10745 IWX_UNLOCK(sc); 10746 10747 ieee80211_resume_all(ic); 10748 return (0); 10749 } 10750 10751 static void 10752 iwx_scan_start(struct ieee80211com *ic) 10753 { 10754 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 10755 struct iwx_softc *sc = ic->ic_softc; 10756 int err; 10757 10758 IWX_LOCK(sc); 10759 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0) 10760 err = iwx_scan(sc); 10761 else 10762 err = iwx_bgscan(ic); 10763 IWX_UNLOCK(sc); 10764 if (err) 10765 ieee80211_cancel_scan(vap); 10766 10767 return; 10768 } 10769 10770 static void 10771 iwx_update_mcast(struct ieee80211com *ic) 10772 { 10773 } 10774 10775 static void 10776 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 10777 { 10778 } 10779 10780 static void 10781 iwx_scan_mindwell(struct ieee80211_scan_state *ss) 10782 { 10783 } 10784 10785 static void 10786 iwx_scan_end(struct ieee80211com *ic) 10787 { 10788 iwx_endscan(ic->ic_softc); 10789 } 10790 10791 static void 10792 iwx_set_channel(struct ieee80211com *ic) 10793 { 10794 #if 0 10795 struct iwx_softc *sc = ic->ic_softc; 10796 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 10797 10798 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__); 10799 iwx_phy_ctxt_task((void *)sc); 10800 #endif 10801 } 10802 10803 static void 10804 iwx_endscan_cb(void *arg, int pending) 10805 { 10806 struct iwx_softc *sc = arg; 10807 struct ieee80211com *ic = &sc->sc_ic; 10808 10809 DPRINTF(("scan ended\n")); 10810 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 10811 } 10812 10813 static int 10814 iwx_wme_update(struct ieee80211com *ic) 10815 { 10816 return 0; 10817 } 10818 10819 static int 10820 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 10821 const struct ieee80211_bpf_params *params) 10822 { 10823 struct ieee80211com *ic = ni->ni_ic; 10824 struct iwx_softc *sc = ic->ic_softc; 10825 int err; 10826 10827 IWX_LOCK(sc); 10828 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) { 10829 err = iwx_tx(sc, m, ni); 10830 IWX_UNLOCK(sc); 10831 return err; 10832 } else { 10833 IWX_UNLOCK(sc); 10834 return EIO; 10835 } 10836 } 10837 10838 static int 10839 iwx_transmit(struct ieee80211com *ic, struct mbuf *m) 10840 { 10841 struct iwx_softc *sc = ic->ic_softc; 10842 int error; 10843 10844 // TODO: mbufq_enqueue in iwm 10845 // TODO dequeue in iwm_start, counters, locking 10846 IWX_LOCK(sc); 10847 error = mbufq_enqueue(&sc->sc_snd, m); 10848 if (error) { 10849 IWX_UNLOCK(sc); 10850 return (error); 10851 } 10852 10853 iwx_start(sc); 10854 IWX_UNLOCK(sc); 10855 return (0); 10856 } 10857 10858 static int 10859 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 10860 int baparamset, int batimeout, int baseqctl) 10861 { 10862 struct ieee80211com *ic = ni->ni_ic; 10863 struct iwx_softc *sc = ic->ic_softc; 10864 int tid; 10865 10866 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); 10867 sc->ni_rx_ba[tid].ba_winstart = 10868 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START); 10869 sc->ni_rx_ba[tid].ba_winsize = 10870 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ); 10871 sc->ni_rx_ba[tid].ba_timeout_val = batimeout; 10872 10873 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS || 10874 tid >= IWX_MAX_TID_COUNT) 10875 return ENOSPC; 10876 10877 if (sc->ba_rx.start_tidmask & (1 << tid)) { 10878 DPRINTF(("%s: tid %d already added\n", __func__, tid)); 10879 return EBUSY; 10880 } 10881 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask)); 10882 10883 sc->ba_rx.start_tidmask |= (1 << tid); 10884 DPRINTF(("%s: tid=%i\n", __func__, tid)); 10885 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart)); 10886 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize)); 10887 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val)); 10888 10889 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task); 10890 10891 // TODO:misha move to ba_task (serialize) 10892 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 10893 10894 return (0); 10895 } 10896 10897 static void 10898 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 10899 { 10900 return; 10901 } 10902 10903 static int 10904 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 10905 int dialogtoken, int baparamset, int batimeout) 10906 { 10907 struct iwx_softc *sc = ni->ni_ic->ic_softc; 10908 int tid; 10909 10910 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); 10911 DPRINTF(("%s: tid=%i\n", __func__, tid)); 10912 sc->ba_tx.start_tidmask |= (1 << tid); 10913 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task); 10914 return 0; 10915 } 10916 10917 10918 static int 10919 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 10920 int code, int baparamset, int batimeout) 10921 { 10922 return 0; 10923 } 10924 10925 static void 10926 iwx_key_update_begin(struct ieee80211vap *vap) 10927 { 10928 return; 10929 } 10930 10931 static void 10932 iwx_key_update_end(struct ieee80211vap *vap) 10933 { 10934 return; 10935 } 10936 10937 static int 10938 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, 10939 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 10940 { 10941 10942 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 10943 return 1; 10944 } 10945 if (!(&vap->iv_nw_keys[0] <= k && 10946 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { 10947 /* 10948 * Not in the global key table, the driver should handle this 10949 * by allocating a slot in the h/w key table/cache. In 10950 * lieu of that return key slot 0 for any unicast key 10951 * request. We disallow the request if this is a group key. 10952 * This default policy does the right thing for legacy hardware 10953 * with a 4 key table. It also handles devices that pass 10954 * packets through untouched when marked with the WEP bit 10955 * and key index 0. 10956 */ 10957 if (k->wk_flags & IEEE80211_KEY_GROUP) 10958 return 0; 10959 *keyix = 0; /* NB: use key index 0 for ucast key */ 10960 } else { 10961 *keyix = ieee80211_crypto_get_key_wepidx(vap, k); 10962 } 10963 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */ 10964 return 1; 10965 } 10966 10967 static int 10968 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 10969 { 10970 struct ieee80211com *ic = vap->iv_ic; 10971 struct iwx_softc *sc = ic->ic_softc; 10972 struct iwx_add_sta_key_cmd cmd; 10973 uint32_t status; 10974 int err; 10975 int id; 10976 10977 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) { 10978 return 1; 10979 } 10980 10981 IWX_LOCK(sc); 10982 /* 10983 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid. 10984 * Currently we only implement station mode where 'ni' is always 10985 * ic->ic_bss so there is no need to validate arguments beyond this: 10986 */ 10987 10988 memset(&cmd, 0, sizeof(cmd)); 10989 10990 if (k->wk_flags & IEEE80211_KEY_GROUP) { 10991 DPRINTF(("%s: adding group key\n", __func__)); 10992 } else { 10993 DPRINTF(("%s: adding key\n", __func__)); 10994 } 10995 if (k >= &vap->iv_nw_keys[0] && 10996 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) 10997 id = (k - vap->iv_nw_keys); 10998 else 10999 id = (0); 11000 DPRINTF(("%s: setting keyid=%i\n", __func__, id)); 11001 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM | 11002 IWX_STA_KEY_FLG_WEP_KEY_MAP | 11003 ((id << IWX_STA_KEY_FLG_KEYID_POS) & 11004 IWX_STA_KEY_FLG_KEYID_MSK)); 11005 if (k->wk_flags & IEEE80211_KEY_GROUP) { 11006 cmd.common.key_offset = 1; 11007 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST); 11008 } else { 11009 cmd.common.key_offset = 0; 11010 } 11011 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key), 11012 k->wk_keylen)); 11013 DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen)); 11014 for (int i=0; i<k->wk_keylen; i++) { 11015 DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i])); 11016 } 11017 cmd.common.sta_id = IWX_STATION_ID; 11018 11019 cmd.transmit_seq_cnt = htole64(k->wk_keytsc); 11020 DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc)); 11021 11022 status = IWX_ADD_STA_SUCCESS; 11023 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd, 11024 &status); 11025 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) 11026 err = EIO; 11027 if (err) { 11028 printf("%s: can't set wpa2 keys (error %d)\n", __func__, err); 11029 IWX_UNLOCK(sc); 11030 return err; 11031 } else 11032 DPRINTF(("%s: key added successfully\n", __func__)); 11033 IWX_UNLOCK(sc); 11034 return 1; 11035 } 11036 11037 static int 11038 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 11039 { 11040 return 1; 11041 } 11042 11043 static device_method_t iwx_pci_methods[] = { 11044 /* Device interface */ 11045 DEVMETHOD(device_probe, iwx_probe), 11046 DEVMETHOD(device_attach, iwx_attach), 11047 DEVMETHOD(device_detach, iwx_detach), 11048 DEVMETHOD(device_suspend, iwx_suspend), 11049 DEVMETHOD(device_resume, iwx_resume), 11050 11051 DEVMETHOD_END 11052 }; 11053 11054 static driver_t iwx_pci_driver = { 11055 "iwx", 11056 iwx_pci_methods, 11057 sizeof (struct iwx_softc) 11058 }; 11059 11060 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL); 11061 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver, 11062 iwx_devices, nitems(iwx_devices)); 11063 MODULE_DEPEND(iwx, firmware, 1, 1, 1); 11064 MODULE_DEPEND(iwx, pci, 1, 1, 1); 11065 MODULE_DEPEND(iwx, wlan, 1, 1, 1); 11066