1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186
187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195
196 #include <dev/iwx/if_iwx_debug.h>
197
198 #define PCI_CFG_RETRY_TIMEOUT 0x41
199
200 #define PCI_VENDOR_INTEL 0x8086
201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
218
219 static const struct iwx_devices {
220 uint16_t device;
221 char *name;
222 } iwx_devices[] = {
223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
240 };
241
242 static const uint8_t iwx_nvm_channels_8000[] = {
243 /* 2.4 GHz */
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 /* 5 GHz */
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250
251 static const uint8_t iwx_nvm_channels_uhb[] = {
252 /* 2.4 GHz */
253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
254 /* 5 GHz */
255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
257 149, 153, 157, 161, 165, 169, 173, 177, 181,
258 /* 6-7 GHz */
259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
263 };
264
265 #define IWX_NUM_2GHZ_CHANNELS 14
266 #define IWX_NUM_5GHZ_CHANNELS 37
267
268 const struct iwx_rate {
269 uint16_t rate;
270 uint8_t plcp;
271 uint8_t ht_plcp;
272 } iwx_rates[] = {
273 /* Legacy */ /* HT */
274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
295 };
296 #define IWX_RIDX_CCK 0
297 #define IWX_RIDX_OFDM 4
298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
302
303 /* Convert an MCS index into an iwx_rates[] index. */
304 const int iwx_mcs2ridx[] = {
305 IWX_RATE_MCS_0_INDEX,
306 IWX_RATE_MCS_1_INDEX,
307 IWX_RATE_MCS_2_INDEX,
308 IWX_RATE_MCS_3_INDEX,
309 IWX_RATE_MCS_4_INDEX,
310 IWX_RATE_MCS_5_INDEX,
311 IWX_RATE_MCS_6_INDEX,
312 IWX_RATE_MCS_7_INDEX,
313 IWX_RATE_MCS_8_INDEX,
314 IWX_RATE_MCS_9_INDEX,
315 IWX_RATE_MCS_10_INDEX,
316 IWX_RATE_MCS_11_INDEX,
317 IWX_RATE_MCS_12_INDEX,
318 IWX_RATE_MCS_13_INDEX,
319 IWX_RATE_MCS_14_INDEX,
320 IWX_RATE_MCS_15_INDEX,
321 };
322
323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
326 #if 0
327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
329 #endif
330 static int iwx_apply_debug_destination(struct iwx_softc *);
331 static void iwx_set_ltr(struct iwx_softc *);
332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
334 const struct iwx_fw_sects *);
335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
336 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
338 struct iwx_context_info_dram *);
339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
341 const uint8_t *, size_t);
342 static int iwx_set_default_calib(struct iwx_softc *, const void *);
343 static void iwx_fw_info_free(struct iwx_fw_info *);
344 static int iwx_read_firmware(struct iwx_softc *);
345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
354 static int iwx_nic_lock(struct iwx_softc *);
355 static void iwx_nic_assert_locked(struct iwx_softc *);
356 static void iwx_nic_unlock(struct iwx_softc *);
357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
358 uint32_t);
359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
363 bus_size_t, bus_size_t);
364 static void iwx_dma_contig_free(struct iwx_dma_info *);
365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void iwx_disable_rx_dma(struct iwx_softc *);
367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_enable_rfkill_int(struct iwx_softc *);
373 static int iwx_check_rfkill(struct iwx_softc *);
374 static void iwx_enable_interrupts(struct iwx_softc *);
375 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
376 #if 0
377 static void iwx_restore_interrupts(struct iwx_softc *);
378 #endif
379 static void iwx_disable_interrupts(struct iwx_softc *);
380 static void iwx_ict_reset(struct iwx_softc *);
381 static int iwx_set_hw_ready(struct iwx_softc *);
382 static int iwx_prepare_card_hw(struct iwx_softc *);
383 static int iwx_force_power_gating(struct iwx_softc *);
384 static void iwx_apm_config(struct iwx_softc *);
385 static int iwx_apm_init(struct iwx_softc *);
386 static void iwx_apm_stop(struct iwx_softc *);
387 static int iwx_allow_mcast(struct iwx_softc *);
388 static void iwx_init_msix_hw(struct iwx_softc *);
389 static void iwx_conf_msix_hw(struct iwx_softc *, int);
390 static int iwx_clear_persistence_bit(struct iwx_softc *);
391 static int iwx_start_hw(struct iwx_softc *);
392 static void iwx_stop_device(struct iwx_softc *);
393 static void iwx_nic_config(struct iwx_softc *);
394 static int iwx_nic_rx_init(struct iwx_softc *);
395 static int iwx_nic_init(struct iwx_softc *);
396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
398 static void iwx_post_alive(struct iwx_softc *);
399 static int iwx_schedule_session_protection(struct iwx_softc *,
400 struct iwx_node *, uint32_t);
401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
402 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
403 struct ieee80211_channel[]);
404 static int iwx_mimo_enabled(struct iwx_softc *);
405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
406 uint16_t);
407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
409 uint16_t, uint16_t, int, int);
410 static void iwx_sta_tx_agg_start(struct iwx_softc *,
411 struct ieee80211_node *, uint8_t);
412 static void iwx_ba_rx_task(void *, int);
413 static void iwx_ba_tx_task(void *, int);
414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
415 static int iwx_is_valid_mac_addr(const uint8_t *);
416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
417 static int iwx_nvm_get(struct iwx_softc *);
418 static int iwx_load_firmware(struct iwx_softc *);
419 static int iwx_start_fw(struct iwx_softc *);
420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
423 static int iwx_load_pnvm(struct iwx_softc *);
424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
426 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
427 static int iwx_send_dqa_cmd(struct iwx_softc *);
428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
429 static int iwx_config_ltr(struct iwx_softc *);
430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
431 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
434 struct iwx_rx_data *);
435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
437 #if 0
438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
439 struct ieee80211_node *, struct ieee80211_rxinfo *);
440 #endif
441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
442 int, int, uint32_t, uint8_t);
443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
445 struct iwx_tx_data *);
446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
448 struct iwx_rx_data *);
449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
451 struct iwx_rx_data *);
452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
456 #if 0
457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
459 #endif
460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
464 const void *);
465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
466 uint32_t *);
467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
468 const void *, uint32_t *);
469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
470 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
472 static uint32_t iwx_fw_rateidx_cck(uint8_t);
473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
475 struct mbuf *);
476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
477 uint16_t, uint16_t);
478 static int iwx_tx(struct iwx_softc *, struct mbuf *,
479 struct ieee80211_node *);
480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
484 struct iwx_beacon_filter_cmd *);
485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
486 int);
487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
488 struct iwx_mac_power_cmd *);
489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
490 static int iwx_power_update_device(struct iwx_softc *);
491 #if 0
492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
493 #endif
494 static int iwx_disable_beacon_filter(struct iwx_softc *);
495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
498 static int iwx_fill_probe_req(struct iwx_softc *,
499 struct iwx_scan_probe_req *);
500 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
503 struct iwx_scan_general_params_v10 *, int);
504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
505 struct iwx_scan_general_params_v10 *, uint16_t, int);
506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
507 struct iwx_scan_channel_params_v6 *, uint32_t, int);
508 static int iwx_umac_scan_v14(struct iwx_softc *, int);
509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
511 static int iwx_rval2ridx(int);
512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
513 int *);
514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
515 struct iwx_mac_ctx_cmd *, uint32_t);
516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
517 struct iwx_mac_data_sta *, int);
518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
519 uint32_t, int);
520 static int iwx_clear_statistics(struct iwx_softc *);
521 static int iwx_scan(struct iwx_softc *);
522 static int iwx_bgscan(struct ieee80211com *);
523 static int iwx_enable_mgmt_queue(struct iwx_softc *);
524 static int iwx_disable_mgmt_queue(struct iwx_softc *);
525 static int iwx_rs_rval2idx(uint8_t);
526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
527 int);
528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
533 uint8_t, uint8_t);
534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
536 uint8_t);
537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
538 static int iwx_deauth(struct iwx_softc *);
539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
540 static int iwx_run_stop(struct iwx_softc *);
541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
542 const uint8_t[IEEE80211_ADDR_LEN]);
543 #if 0
544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
545 struct ieee80211_key *);
546 void iwx_setkey_task(void *);
547 void iwx_delete_key(struct ieee80211com *,
548 struct ieee80211_node *, struct ieee80211_key *);
549 #endif
550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
551 static void iwx_endscan(struct iwx_softc *);
552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
553 struct ieee80211_node *);
554 static int iwx_sf_config(struct iwx_softc *, int);
555 static int iwx_send_bt_init_conf(struct iwx_softc *);
556 static int iwx_send_soc_conf(struct iwx_softc *);
557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
559 static int iwx_init_hw(struct iwx_softc *);
560 static int iwx_init(struct iwx_softc *);
561 static void iwx_stop(struct iwx_softc *);
562 static void iwx_watchdog(void *);
563 static const char *iwx_desc_lookup(uint32_t);
564 static void iwx_nic_error(struct iwx_softc *);
565 static void iwx_dump_driver_status(struct iwx_softc *);
566 static void iwx_nic_umac_error(struct iwx_softc *);
567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
570 struct mbuf *);
571 static void iwx_notif_intr(struct iwx_softc *);
572 #if 0
573 /* XXX-THJ - I don't have hardware for this */
574 static int iwx_intr(void *);
575 #endif
576 static void iwx_intr_msix(void *);
577 static int iwx_preinit(struct iwx_softc *);
578 static void iwx_attach_hook(void *);
579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
580 static int iwx_probe(device_t);
581 static int iwx_attach(device_t);
582 static int iwx_detach(device_t);
583
584 /* FreeBSD specific glue */
585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
587
588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
590
591 #if IWX_DEBUG
592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
593 #else
594 #define DPRINTF(x) do { ; } while (0)
595 #endif
596
597 /* FreeBSD specific functions */
598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
601 static void iwx_vap_delete(struct ieee80211vap *);
602 static void iwx_parent(struct ieee80211com *);
603 static void iwx_scan_start(struct ieee80211com *);
604 static void iwx_scan_end(struct ieee80211com *);
605 static void iwx_update_mcast(struct ieee80211com *ic);
606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
607 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
608 static void iwx_set_channel(struct ieee80211com *);
609 static void iwx_endscan_cb(void *, int );
610 static int iwx_wme_update(struct ieee80211com *);
611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
612 const struct ieee80211_bpf_params *);
613 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
614 static void iwx_start(struct iwx_softc *);
615 static int iwx_ampdu_rx_start(struct ieee80211_node *,
616 struct ieee80211_rx_ampdu *, int, int, int);
617 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
618 struct ieee80211_rx_ampdu *);
619 static int iwx_addba_request(struct ieee80211_node *,
620 struct ieee80211_tx_ampdu *, int, int, int);
621 static int iwx_addba_response(struct ieee80211_node *,
622 struct ieee80211_tx_ampdu *, int, int, int);
623 static void iwx_key_update_begin(struct ieee80211vap *);
624 static void iwx_key_update_end(struct ieee80211vap *);
625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
626 ieee80211_keyix *,ieee80211_keyix *);
627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
628 static int iwx_key_delete(struct ieee80211vap *,
629 const struct ieee80211_key *);
630 static int iwx_suspend(device_t);
631 static int iwx_resume(device_t);
632 static void iwx_radiotap_attach(struct iwx_softc *);
633
634 /* OpenBSD compat defines */
635 #define IEEE80211_HTOP0_SCO_SCN 0
636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
638
639 #define IEEE80211_HT_RATESET_SISO 0
640 #define IEEE80211_HT_RATESET_MIMO2 2
641
642 const struct ieee80211_rateset ieee80211_std_rateset_11a =
643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
644
645 const struct ieee80211_rateset ieee80211_std_rateset_11b =
646 { 4, { 2, 4, 11, 22 } };
647
648 const struct ieee80211_rateset ieee80211_std_rateset_11g =
649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
650
651 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)652 ieee80211_has_addr4(const struct ieee80211_frame *wh)
653 {
654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
655 IEEE80211_FC1_DIR_DSTODS;
656 }
657
658 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
660 {
661 const struct iwx_fw_cmd_version *entry;
662 int i;
663
664 for (i = 0; i < sc->n_cmd_versions; i++) {
665 entry = &sc->cmd_versions[i];
666 if (entry->group == grp && entry->cmd == cmd)
667 return entry->cmd_ver;
668 }
669
670 return IWX_FW_CMD_VER_UNKNOWN;
671 }
672
673 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
675 {
676 const struct iwx_fw_cmd_version *entry;
677 int i;
678
679 for (i = 0; i < sc->n_cmd_versions; i++) {
680 entry = &sc->cmd_versions[i];
681 if (entry->group == grp && entry->cmd == cmd)
682 return entry->notif_ver;
683 }
684
685 return IWX_FW_CMD_VER_UNKNOWN;
686 }
687
688 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
690 {
691 const struct iwx_fw_cscheme_list *l = (const void *)data;
692
693 if (dlen < sizeof(*l) ||
694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
695 return EINVAL;
696
697 /* we don't actually store anything for now, always use s/w crypto */
698
699 return 0;
700 }
701
702 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
705 {
706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
707 if (err) {
708 printf("%s: could not allocate context info DMA memory\n",
709 DEVNAME(sc));
710 return err;
711 }
712
713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
714
715 return 0;
716 }
717
718 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)719 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
720 {
721 struct iwx_self_init_dram *dram = &sc->init_dram;
722 int i;
723
724 if (!dram->paging)
725 return;
726
727 /* free paging*/
728 for (i = 0; i < dram->paging_cnt; i++)
729 iwx_dma_contig_free(&dram->paging[i]);
730
731 free(dram->paging, M_DEVBUF);
732 dram->paging_cnt = 0;
733 dram->paging = NULL;
734 }
735
736 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
738 {
739 int i = 0;
740
741 while (start < fws->fw_count &&
742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
744 start++;
745 i++;
746 }
747
748 return i;
749 }
750
751 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
753 struct iwx_context_info_dram *ctxt_dram)
754 {
755 struct iwx_self_init_dram *dram = &sc->init_dram;
756 int i, ret, fw_cnt = 0;
757
758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
759
760 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
761 /* add 1 due to separator */
762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
763 /* add 2 due to separators */
764 dram->paging_cnt = iwx_get_num_sections(fws,
765 dram->lmac_cnt + dram->umac_cnt + 2);
766
767 IWX_UNLOCK(sc);
768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
770 if (!dram->fw) {
771 printf("%s: could not allocate memory for firmware sections\n",
772 DEVNAME(sc));
773 IWX_LOCK(sc);
774 return ENOMEM;
775 }
776
777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
778 M_DEVBUF, M_ZERO | M_WAITOK);
779 IWX_LOCK(sc);
780 if (!dram->paging) {
781 printf("%s: could not allocate memory for firmware paging\n",
782 DEVNAME(sc));
783 return ENOMEM;
784 }
785
786 /* initialize lmac sections */
787 for (i = 0; i < dram->lmac_cnt; i++) {
788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
789 &dram->fw[fw_cnt]);
790 if (ret)
791 return ret;
792 ctxt_dram->lmac_img[i] =
793 htole64(dram->fw[fw_cnt].paddr);
794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
795 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
796 __func__, i,
797 (unsigned long long)dram->fw[fw_cnt].paddr,
798 (unsigned long long)dram->fw[fw_cnt].size);
799 fw_cnt++;
800 }
801
802 /* initialize umac sections */
803 for (i = 0; i < dram->umac_cnt; i++) {
804 /* access FW with +1 to make up for lmac separator */
805 ret = iwx_ctxt_info_alloc_dma(sc,
806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
807 if (ret)
808 return ret;
809 ctxt_dram->umac_img[i] =
810 htole64(dram->fw[fw_cnt].paddr);
811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
812 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
813 __func__, i,
814 (unsigned long long)dram->fw[fw_cnt].paddr,
815 (unsigned long long)dram->fw[fw_cnt].size);
816 fw_cnt++;
817 }
818
819 /*
820 * Initialize paging.
821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
822 * stored separately.
823 * This is since the timing of its release is different -
824 * while fw memory can be released on alive, the paging memory can be
825 * freed only when the device goes down.
826 * Given that, the logic here in accessing the fw image is a bit
827 * different - fw_cnt isn't changing so loop counter is added to it.
828 */
829 for (i = 0; i < dram->paging_cnt; i++) {
830 /* access FW with +2 to make up for lmac & umac separators */
831 int fw_idx = fw_cnt + i + 2;
832
833 ret = iwx_ctxt_info_alloc_dma(sc,
834 &fws->fw_sect[fw_idx], &dram->paging[i]);
835 if (ret)
836 return ret;
837
838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
840 "%s: firmware paging section %d at 0x%llx size %lld\n",
841 __func__, i,
842 (unsigned long long)dram->paging[i].paddr,
843 (unsigned long long)dram->paging[i].size);
844 }
845
846 return 0;
847 }
848
849 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)850 iwx_fw_version_str(char *buf, size_t bufsize,
851 uint32_t major, uint32_t minor, uint32_t api)
852 {
853 /*
854 * Starting with major version 35 the Linux driver prints the minor
855 * version in hexadecimal.
856 */
857 if (major >= 35)
858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
859 else
860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
861 }
862 #if 0
863 static int
864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
865 uint8_t min_power)
866 {
867 struct iwx_dma_info *fw_mon = &sc->fw_mon;
868 uint32_t size = 0;
869 uint8_t power;
870 int err;
871
872 if (fw_mon->size)
873 return 0;
874
875 for (power = max_power; power >= min_power; power--) {
876 size = (1 << power);
877
878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
879 if (err)
880 continue;
881
882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
883 "%s: allocated 0x%08x bytes for firmware monitor.\n",
884 DEVNAME(sc), size);
885 break;
886 }
887
888 if (err) {
889 fw_mon->size = 0;
890 return err;
891 }
892
893 if (power != max_power)
894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
896 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
897 (unsigned long)(1 << (max_power - 10)));
898
899 return 0;
900 }
901
902 static int
903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
904 {
905 if (!max_power) {
906 /* default max_power is maximum */
907 max_power = 26;
908 } else {
909 max_power += 11;
910 }
911
912 if (max_power > 26) {
913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
914 "%s: External buffer size for monitor is too big %d, "
915 "check the FW TLV\n", DEVNAME(sc), max_power);
916 return 0;
917 }
918
919 if (sc->fw_mon.size)
920 return 0;
921
922 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
923 }
924 #endif
925
926 static int
iwx_apply_debug_destination(struct iwx_softc * sc)927 iwx_apply_debug_destination(struct iwx_softc *sc)
928 {
929 #if 0
930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
931 int i, err;
932 uint8_t mon_mode, size_power, base_shift, end_shift;
933 uint32_t base_reg, end_reg;
934
935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
936 mon_mode = dest_v1->monitor_mode;
937 size_power = dest_v1->size_power;
938 base_reg = le32toh(dest_v1->base_reg);
939 end_reg = le32toh(dest_v1->end_reg);
940 base_shift = dest_v1->base_shift;
941 end_shift = dest_v1->end_shift;
942
943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
944
945 if (mon_mode == EXTERNAL_MODE) {
946 err = iwx_alloc_fw_monitor(sc, size_power);
947 if (err)
948 return err;
949 }
950
951 if (!iwx_nic_lock(sc))
952 return EBUSY;
953
954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
955 uint32_t addr, val;
956 uint8_t op;
957
958 addr = le32toh(dest_v1->reg_ops[i].addr);
959 val = le32toh(dest_v1->reg_ops[i].val);
960 op = dest_v1->reg_ops[i].op;
961
962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
963 switch (op) {
964 case CSR_ASSIGN:
965 IWX_WRITE(sc, addr, val);
966 break;
967 case CSR_SETBIT:
968 IWX_SETBITS(sc, addr, (1 << val));
969 break;
970 case CSR_CLEARBIT:
971 IWX_CLRBITS(sc, addr, (1 << val));
972 break;
973 case PRPH_ASSIGN:
974 iwx_write_prph(sc, addr, val);
975 break;
976 case PRPH_SETBIT:
977 err = iwx_set_bits_prph(sc, addr, (1 << val));
978 if (err)
979 return err;
980 break;
981 case PRPH_CLEARBIT:
982 err = iwx_clear_bits_prph(sc, addr, (1 << val));
983 if (err)
984 return err;
985 break;
986 case PRPH_BLOCKBIT:
987 if (iwx_read_prph(sc, addr) & (1 << val))
988 goto monitor;
989 break;
990 default:
991 DPRINTF(("%s: FW debug - unknown OP %d\n",
992 DEVNAME(sc), op));
993 break;
994 }
995 }
996
997 monitor:
998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
999 iwx_write_prph(sc, le32toh(base_reg),
1000 sc->fw_mon.paddr >> base_shift);
1001 iwx_write_prph(sc, end_reg,
1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1003 >> end_shift);
1004 }
1005
1006 iwx_nic_unlock(sc);
1007 return 0;
1008 #else
1009 return 0;
1010 #endif
1011 }
1012
1013 static void
iwx_set_ltr(struct iwx_softc * sc)1014 iwx_set_ltr(struct iwx_softc *sc)
1015 {
1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1027
1028 /*
1029 * To workaround hardware latency issues during the boot process,
1030 * initialize the LTR to ~250 usec (see ltr_val above).
1031 * The firmware initializes this again later (to a smaller value).
1032 */
1033 if (!sc->sc_integrated) {
1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1035 } else if (sc->sc_integrated &&
1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1038 IWX_HPM_MAC_LRT_ENABLE_ALL);
1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1040 }
1041 }
1042
1043 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1045 {
1046 struct iwx_context_info *ctxt_info;
1047 struct iwx_context_info_rbd_cfg *rx_cfg;
1048 uint32_t control_flags = 0;
1049 uint64_t paddr;
1050 int err;
1051
1052 ctxt_info = sc->ctxt_info_dma.vaddr;
1053 memset(ctxt_info, 0, sizeof(*ctxt_info));
1054
1055 ctxt_info->version.version = 0;
1056 ctxt_info->version.mac_id =
1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1058 /* size is in DWs */
1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1060
1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1063
1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1068 ctxt_info->control.control_flags = htole32(control_flags);
1069
1070 /* initialize RX default queue */
1071 rx_cfg = &ctxt_info->rbd_cfg;
1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1075
1076 /* initialize TX command queue */
1077 ctxt_info->hcmd_cfg.cmd_queue_addr =
1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1079 ctxt_info->hcmd_cfg.cmd_queue_size =
1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1081
1082 /* allocate ucode sections in dram and set addresses */
1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1084 if (err) {
1085 iwx_ctxt_info_free_fw_img(sc);
1086 return err;
1087 }
1088
1089 /* Configure debug, if exists */
1090 if (sc->sc_fw.dbg_dest_tlv_v1) {
1091 #if 1
1092 err = iwx_apply_debug_destination(sc);
1093 if (err) {
1094 iwx_ctxt_info_free_fw_img(sc);
1095 return err;
1096 }
1097 #endif
1098 }
1099
1100 /*
1101 * Write the context info DMA base address. The device expects a
1102 * 64-bit address but a simple bus_space_write_8 to this register
1103 * won't work on some devices, such as the AX201.
1104 */
1105 paddr = sc->ctxt_info_dma.paddr;
1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1108
1109 /* kick FW self load */
1110 if (!iwx_nic_lock(sc)) {
1111 iwx_ctxt_info_free_fw_img(sc);
1112 return EBUSY;
1113 }
1114
1115 iwx_set_ltr(sc);
1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1117 iwx_nic_unlock(sc);
1118
1119 /* Context info will be released upon alive or failure to get one */
1120
1121 return 0;
1122 }
1123
1124 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1126 {
1127 struct iwx_context_info_gen3 *ctxt_info_gen3;
1128 struct iwx_prph_scratch *prph_scratch;
1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1130 uint16_t cb_size;
1131 uint32_t control_flags, scratch_size;
1132 uint64_t paddr;
1133 int err;
1134
1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1136 printf("%s: no image loader found in firmware file\n",
1137 DEVNAME(sc));
1138 iwx_ctxt_info_free_fw_img(sc);
1139 return EINVAL;
1140 }
1141
1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1143 sc->sc_fw.iml_len, 1);
1144 if (err) {
1145 printf("%s: could not allocate DMA memory for "
1146 "firmware image loader\n", DEVNAME(sc));
1147 iwx_ctxt_info_free_fw_img(sc);
1148 return ENOMEM;
1149 }
1150
1151 prph_scratch = sc->prph_scratch_dma.vaddr;
1152 memset(prph_scratch, 0, sizeof(*prph_scratch));
1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1154 prph_sc_ctrl->version.version = 0;
1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1157
1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1159 IWX_PRPH_SCRATCH_MTR_MODE |
1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1161 if (sc->sc_imr_enabled)
1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1163 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1164
1165 /* initialize RX default queue */
1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1167 htole64(sc->rxq.free_desc_dma.paddr);
1168
1169 /* allocate ucode sections in dram and set addresses */
1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1171 if (err) {
1172 iwx_dma_contig_free(&sc->iml_dma);
1173 iwx_ctxt_info_free_fw_img(sc);
1174 return err;
1175 }
1176
1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1180 ctxt_info_gen3->prph_scratch_base_addr =
1181 htole64(sc->prph_scratch_dma.paddr);
1182 scratch_size = sizeof(*prph_scratch);
1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1184 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1185 htole64(sc->rxq.stat_dma.paddr);
1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1190 ctxt_info_gen3->mtr_base_addr =
1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1194 ctxt_info_gen3->mtr_size = htole16(cb_size);
1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1196 ctxt_info_gen3->mcr_size = htole16(cb_size);
1197
1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1199
1200 paddr = sc->ctxt_info_dma.paddr;
1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1203
1204 paddr = sc->iml_dma.paddr;
1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1208
1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1210 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1211
1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1213 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1214
1215 /* kick FW self load */
1216 if (!iwx_nic_lock(sc)) {
1217 iwx_dma_contig_free(&sc->iml_dma);
1218 iwx_ctxt_info_free_fw_img(sc);
1219 return EBUSY;
1220 }
1221 iwx_set_ltr(sc);
1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1223 iwx_nic_unlock(sc);
1224
1225 /* Context info will be released upon alive or failure to get one */
1226 return 0;
1227 }
1228
1229 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1231 {
1232 struct iwx_self_init_dram *dram = &sc->init_dram;
1233 int i;
1234
1235 if (!dram->fw)
1236 return;
1237
1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1239 iwx_dma_contig_free(&dram->fw[i]);
1240
1241 free(dram->fw, M_DEVBUF);
1242 dram->lmac_cnt = 0;
1243 dram->umac_cnt = 0;
1244 dram->fw = NULL;
1245 }
1246
1247 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1249 const uint8_t *data, size_t dlen)
1250 {
1251 struct iwx_fw_sects *fws;
1252 struct iwx_fw_onesect *fwone;
1253
1254 if (type >= IWX_UCODE_TYPE_MAX)
1255 return EINVAL;
1256 if (dlen < sizeof(uint32_t))
1257 return EINVAL;
1258
1259 fws = &sc->sc_fw.fw_sects[type];
1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1263 return EINVAL;
1264
1265 fwone = &fws->fw_sect[fws->fw_count];
1266
1267 /* first 32bit are device load offset */
1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1269
1270 /* rest is data */
1271 fwone->fws_data = data + sizeof(uint32_t);
1272 fwone->fws_len = dlen - sizeof(uint32_t);
1273
1274 fws->fw_count++;
1275 fws->fw_totlen += fwone->fws_len;
1276
1277 return 0;
1278 }
1279
1280 #define IWX_DEFAULT_SCAN_CHANNELS 40
1281 /* Newer firmware might support more channels. Raise this value if needed. */
1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1283
1284 struct iwx_tlv_calib_data {
1285 uint32_t ucode_type;
1286 struct iwx_tlv_calib_ctrl calib;
1287 } __packed;
1288
1289 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1291 {
1292 const struct iwx_tlv_calib_data *def_calib = data;
1293 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1294
1295 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1296 return EINVAL;
1297
1298 sc->sc_default_calib[ucode_type].flow_trigger =
1299 def_calib->calib.flow_trigger;
1300 sc->sc_default_calib[ucode_type].event_trigger =
1301 def_calib->calib.event_trigger;
1302
1303 return 0;
1304 }
1305
1306 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1307 iwx_fw_info_free(struct iwx_fw_info *fw)
1308 {
1309 free(fw->fw_rawdata, M_DEVBUF);
1310 fw->fw_rawdata = NULL;
1311 fw->fw_rawsize = 0;
1312 /* don't touch fw->fw_status */
1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1314 free(fw->iml, M_DEVBUF);
1315 fw->iml = NULL;
1316 fw->iml_len = 0;
1317 }
1318
1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1320
1321 static int
iwx_read_firmware(struct iwx_softc * sc)1322 iwx_read_firmware(struct iwx_softc *sc)
1323 {
1324 struct iwx_fw_info *fw = &sc->sc_fw;
1325 const struct iwx_tlv_ucode_header *uhdr;
1326 struct iwx_ucode_tlv tlv;
1327 uint32_t tlv_type;
1328 const uint8_t *data;
1329 int err = 0;
1330 size_t len;
1331 const struct firmware *fwp;
1332
1333 if (fw->fw_status == IWX_FW_STATUS_DONE)
1334 return 0;
1335
1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1337 fwp = firmware_get(sc->sc_fwname);
1338 sc->sc_fwp = fwp;
1339
1340 if (fwp == NULL) {
1341 printf("%s: could not read firmware %s\n",
1342 DEVNAME(sc), sc->sc_fwname);
1343 err = ENOENT;
1344 goto out;
1345 }
1346
1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1349
1350
1351 sc->sc_capaflags = 0;
1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1355 sc->n_cmd_versions = 0;
1356
1357 uhdr = (const void *)(fwp->data);
1358 if (*(const uint32_t *)fwp->data != 0
1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1360 printf("%s: invalid firmware %s\n",
1361 DEVNAME(sc), sc->sc_fwname);
1362 err = EINVAL;
1363 goto out;
1364 }
1365
1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_API(le32toh(uhdr->ver)));
1370
1371 data = uhdr->data;
1372 len = fwp->datasize - sizeof(*uhdr);
1373
1374 while (len >= sizeof(tlv)) {
1375 size_t tlv_len;
1376 const void *tlv_data;
1377
1378 memcpy(&tlv, data, sizeof(tlv));
1379 tlv_len = le32toh(tlv.length);
1380 tlv_type = le32toh(tlv.type);
1381
1382 len -= sizeof(tlv);
1383 data += sizeof(tlv);
1384 tlv_data = data;
1385
1386 if (len < tlv_len) {
1387 printf("%s: firmware too short: %zu bytes\n",
1388 DEVNAME(sc), len);
1389 err = EINVAL;
1390 goto parse_out;
1391 }
1392
1393 switch (tlv_type) {
1394 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1395 if (tlv_len < sizeof(uint32_t)) {
1396 err = EINVAL;
1397 goto parse_out;
1398 }
1399 sc->sc_capa_max_probe_len
1400 = le32toh(*(const uint32_t *)tlv_data);
1401 if (sc->sc_capa_max_probe_len >
1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1403 err = EINVAL;
1404 goto parse_out;
1405 }
1406 break;
1407 case IWX_UCODE_TLV_PAN:
1408 if (tlv_len) {
1409 err = EINVAL;
1410 goto parse_out;
1411 }
1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1413 break;
1414 case IWX_UCODE_TLV_FLAGS:
1415 if (tlv_len < sizeof(uint32_t)) {
1416 err = EINVAL;
1417 goto parse_out;
1418 }
1419 /*
1420 * Apparently there can be many flags, but Linux driver
1421 * parses only the first one, and so do we.
1422 *
1423 * XXX: why does this override IWX_UCODE_TLV_PAN?
1424 * Intentional or a bug? Observations from
1425 * current firmware file:
1426 * 1) TLV_PAN is parsed first
1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1428 * ==> this resets TLV_PAN to itself... hnnnk
1429 */
1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1431 break;
1432 case IWX_UCODE_TLV_CSCHEME:
1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1434 if (err)
1435 goto parse_out;
1436 break;
1437 case IWX_UCODE_TLV_NUM_OF_CPU: {
1438 uint32_t num_cpu;
1439 if (tlv_len != sizeof(uint32_t)) {
1440 err = EINVAL;
1441 goto parse_out;
1442 }
1443 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1444 if (num_cpu < 1 || num_cpu > 2) {
1445 err = EINVAL;
1446 goto parse_out;
1447 }
1448 break;
1449 }
1450 case IWX_UCODE_TLV_SEC_RT:
1451 err = iwx_firmware_store_section(sc,
1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1453 if (err)
1454 goto parse_out;
1455 break;
1456 case IWX_UCODE_TLV_SEC_INIT:
1457 err = iwx_firmware_store_section(sc,
1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1459 if (err)
1460 goto parse_out;
1461 break;
1462 case IWX_UCODE_TLV_SEC_WOWLAN:
1463 err = iwx_firmware_store_section(sc,
1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1465 if (err)
1466 goto parse_out;
1467 break;
1468 case IWX_UCODE_TLV_DEF_CALIB:
1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1470 err = EINVAL;
1471 goto parse_out;
1472 }
1473 err = iwx_set_default_calib(sc, tlv_data);
1474 if (err)
1475 goto parse_out;
1476 break;
1477 case IWX_UCODE_TLV_PHY_SKU:
1478 if (tlv_len != sizeof(uint32_t)) {
1479 err = EINVAL;
1480 goto parse_out;
1481 }
1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1483 break;
1484
1485 case IWX_UCODE_TLV_API_CHANGES_SET: {
1486 const struct iwx_ucode_api *api;
1487 int idx, i;
1488 if (tlv_len != sizeof(*api)) {
1489 err = EINVAL;
1490 goto parse_out;
1491 }
1492 api = (const struct iwx_ucode_api *)tlv_data;
1493 idx = le32toh(api->api_index);
1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1495 err = EINVAL;
1496 goto parse_out;
1497 }
1498 for (i = 0; i < 32; i++) {
1499 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1500 continue;
1501 setbit(sc->sc_ucode_api, i + (32 * idx));
1502 }
1503 break;
1504 }
1505
1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1507 const struct iwx_ucode_capa *capa;
1508 int idx, i;
1509 if (tlv_len != sizeof(*capa)) {
1510 err = EINVAL;
1511 goto parse_out;
1512 }
1513 capa = (const struct iwx_ucode_capa *)tlv_data;
1514 idx = le32toh(capa->api_index);
1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1516 goto parse_out;
1517 }
1518 for (i = 0; i < 32; i++) {
1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1520 continue;
1521 setbit(sc->sc_enabled_capa, i + (32 * idx));
1522 }
1523 break;
1524 }
1525
1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1528 /* ignore, not used by current driver */
1529 break;
1530
1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1532 err = iwx_firmware_store_section(sc,
1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1534 tlv_len);
1535 if (err)
1536 goto parse_out;
1537 break;
1538
1539 case IWX_UCODE_TLV_PAGING:
1540 if (tlv_len != sizeof(uint32_t)) {
1541 err = EINVAL;
1542 goto parse_out;
1543 }
1544 break;
1545
1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1547 if (tlv_len != sizeof(uint32_t)) {
1548 err = EINVAL;
1549 goto parse_out;
1550 }
1551 sc->sc_capa_n_scan_channels =
1552 le32toh(*(const uint32_t *)tlv_data);
1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1554 err = ERANGE;
1555 goto parse_out;
1556 }
1557 break;
1558
1559 case IWX_UCODE_TLV_FW_VERSION:
1560 if (tlv_len != sizeof(uint32_t) * 3) {
1561 err = EINVAL;
1562 goto parse_out;
1563 }
1564
1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1566 le32toh(((const uint32_t *)tlv_data)[0]),
1567 le32toh(((const uint32_t *)tlv_data)[1]),
1568 le32toh(((const uint32_t *)tlv_data)[2]));
1569 break;
1570
1571 case IWX_UCODE_TLV_FW_DBG_DEST: {
1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1573
1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1575 if (*fw->dbg_dest_ver != 0) {
1576 err = EINVAL;
1577 goto parse_out;
1578 }
1579
1580 if (fw->dbg_dest_tlv_init)
1581 break;
1582 fw->dbg_dest_tlv_init = true;
1583
1584 dest_v1 = (const void *)tlv_data;
1585 fw->dbg_dest_tlv_v1 = dest_v1;
1586 fw->n_dest_reg = tlv_len -
1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1590 "%s: found debug dest; n_dest_reg=%d\n",
1591 __func__, fw->n_dest_reg);
1592 break;
1593 }
1594
1595 case IWX_UCODE_TLV_FW_DBG_CONF: {
1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1597
1598 if (!fw->dbg_dest_tlv_init ||
1599 conf->id >= nitems(fw->dbg_conf_tlv) ||
1600 fw->dbg_conf_tlv[conf->id] != NULL)
1601 break;
1602
1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1604 "Found debug configuration: %d\n", conf->id);
1605 fw->dbg_conf_tlv[conf->id] = conf;
1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1607 break;
1608 }
1609
1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1611 const struct iwx_umac_debug_addrs *dbg_ptrs =
1612 (const void *)tlv_data;
1613
1614 if (tlv_len != sizeof(*dbg_ptrs)) {
1615 err = EINVAL;
1616 goto parse_out;
1617 }
1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1619 break;
1620 sc->sc_uc.uc_umac_error_event_table =
1621 le32toh(dbg_ptrs->error_info_addr) &
1622 ~IWX_FW_ADDR_CACHE_CONTROL;
1623 sc->sc_uc.error_event_table_tlv_status |=
1624 IWX_ERROR_EVENT_TABLE_UMAC;
1625 break;
1626 }
1627
1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1629 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1630 (const void *)tlv_data;
1631
1632 if (tlv_len != sizeof(*dbg_ptrs)) {
1633 err = EINVAL;
1634 goto parse_out;
1635 }
1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1637 break;
1638 sc->sc_uc.uc_lmac_error_event_table[0] =
1639 le32toh(dbg_ptrs->error_event_table_ptr) &
1640 ~IWX_FW_ADDR_CACHE_CONTROL;
1641 sc->sc_uc.error_event_table_tlv_status |=
1642 IWX_ERROR_EVENT_TABLE_LMAC1;
1643 break;
1644 }
1645
1646 case IWX_UCODE_TLV_FW_MEM_SEG:
1647 break;
1648
1649 case IWX_UCODE_TLV_IML:
1650 if (sc->sc_fw.iml != NULL) {
1651 free(fw->iml, M_DEVBUF);
1652 fw->iml_len = 0;
1653 }
1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1655 M_WAITOK | M_ZERO);
1656 if (sc->sc_fw.iml == NULL) {
1657 err = ENOMEM;
1658 goto parse_out;
1659 }
1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1661 sc->sc_fw.iml_len = tlv_len;
1662 break;
1663
1664 case IWX_UCODE_TLV_CMD_VERSIONS:
1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1666 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1667 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1668 }
1669 if (sc->n_cmd_versions != 0) {
1670 err = EINVAL;
1671 goto parse_out;
1672 }
1673 if (tlv_len > sizeof(sc->cmd_versions)) {
1674 err = EINVAL;
1675 goto parse_out;
1676 }
1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1679 break;
1680
1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1682 break;
1683
1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1686 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1687 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1688 break;
1689
1690 /* undocumented TLVs found in iwx-cc-a0-46 image */
1691 case 58:
1692 case 0x1000003:
1693 case 0x1000004:
1694 break;
1695
1696 /* undocumented TLVs found in iwx-cc-a0-48 image */
1697 case 0x1000000:
1698 case 0x1000002:
1699 break;
1700
1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1703 case IWX_UCODE_TLV_TYPE_HCMD:
1704 case IWX_UCODE_TLV_TYPE_REGIONS:
1705 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1706 case IWX_UCODE_TLV_TYPE_CONF_SET:
1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1709 case IWX_UCODE_TLV_CURRENT_PC:
1710 break;
1711
1712 /* undocumented TLV found in iwx-cc-a0-67 image */
1713 case 0x100000b:
1714 break;
1715
1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1717 case 0x101:
1718 break;
1719
1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1721 case 0x100000c:
1722 break;
1723
1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1725 case 69:
1726 break;
1727
1728 default:
1729 err = EINVAL;
1730 goto parse_out;
1731 }
1732
1733 /*
1734 * Check for size_t overflow and ignore missing padding at
1735 * end of firmware file.
1736 */
1737 if (roundup(tlv_len, 4) > len)
1738 break;
1739
1740 len -= roundup(tlv_len, 4);
1741 data += roundup(tlv_len, 4);
1742 }
1743
1744 KASSERT(err == 0, ("unhandled fw parse error"));
1745
1746 parse_out:
1747 if (err) {
1748 printf("%s: firmware parse error %d, "
1749 "section type %d\n", DEVNAME(sc), err, tlv_type);
1750 }
1751
1752 out:
1753 if (err) {
1754 fw->fw_status = IWX_FW_STATUS_NONE;
1755 if (fw->fw_rawdata != NULL)
1756 iwx_fw_info_free(fw);
1757 } else
1758 fw->fw_status = IWX_FW_STATUS_DONE;
1759 return err;
1760 }
1761
1762 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1763 iwx_prph_addr_mask(struct iwx_softc *sc)
1764 {
1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1766 return 0x00ffffff;
1767 else
1768 return 0x000fffff;
1769 }
1770
1771 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1773 {
1774 uint32_t mask = iwx_prph_addr_mask(sc);
1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1776 IWX_BARRIER_READ_WRITE(sc);
1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1778 }
1779
1780 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1782 {
1783 iwx_nic_assert_locked(sc);
1784 return iwx_read_prph_unlocked(sc, addr);
1785 }
1786
1787 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1789 {
1790 uint32_t mask = iwx_prph_addr_mask(sc);
1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1792 IWX_BARRIER_WRITE(sc);
1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1794 }
1795
1796 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1798 {
1799 iwx_nic_assert_locked(sc);
1800 iwx_write_prph_unlocked(sc, addr, val);
1801 }
1802
1803 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1805 {
1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1807 }
1808
1809 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1811 {
1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1813 }
1814
1815 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1817 {
1818 int offs, err = 0;
1819 uint32_t *vals = buf;
1820
1821 if (iwx_nic_lock(sc)) {
1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1823 for (offs = 0; offs < dwords; offs++)
1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1825 iwx_nic_unlock(sc);
1826 } else {
1827 err = EBUSY;
1828 }
1829 return err;
1830 }
1831
1832 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1834 int timo)
1835 {
1836 for (;;) {
1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1838 return 1;
1839 }
1840 if (timo < 10) {
1841 return 0;
1842 }
1843 timo -= 10;
1844 DELAY(10);
1845 }
1846 }
1847
1848 static int
iwx_nic_lock(struct iwx_softc * sc)1849 iwx_nic_lock(struct iwx_softc *sc)
1850 {
1851 if (sc->sc_nic_locks > 0) {
1852 iwx_nic_assert_locked(sc);
1853 sc->sc_nic_locks++;
1854 return 1; /* already locked */
1855 }
1856
1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1859
1860 DELAY(2);
1861
1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1866 sc->sc_nic_locks++;
1867 return 1;
1868 }
1869
1870 printf("%s: acquiring device failed\n", DEVNAME(sc));
1871 return 0;
1872 }
1873
1874 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1875 iwx_nic_assert_locked(struct iwx_softc *sc)
1876 {
1877 if (sc->sc_nic_locks <= 0)
1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1879 }
1880
1881 static void
iwx_nic_unlock(struct iwx_softc * sc)1882 iwx_nic_unlock(struct iwx_softc *sc)
1883 {
1884 if (sc->sc_nic_locks > 0) {
1885 if (--sc->sc_nic_locks == 0)
1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1888 } else
1889 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1890 }
1891
1892 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1894 uint32_t mask)
1895 {
1896 uint32_t val;
1897
1898 if (iwx_nic_lock(sc)) {
1899 val = iwx_read_prph(sc, reg) & mask;
1900 val |= bits;
1901 iwx_write_prph(sc, reg, val);
1902 iwx_nic_unlock(sc);
1903 return 0;
1904 }
1905 return EBUSY;
1906 }
1907
1908 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1910 {
1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1912 }
1913
1914 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1916 {
1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1918 }
1919
1920 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1922 {
1923 if (error != 0)
1924 return;
1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1926 *(bus_addr_t *)arg = segs[0].ds_addr;
1927 }
1928
1929 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1931 bus_size_t size, bus_size_t alignment)
1932 {
1933 int error;
1934
1935 dma->tag = NULL;
1936 dma->map = NULL;
1937 dma->size = size;
1938 dma->vaddr = NULL;
1939
1940 error = bus_dma_tag_create(tag, alignment,
1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1942 1, size, 0, NULL, NULL, &dma->tag);
1943 if (error != 0)
1944 goto fail;
1945
1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1948 if (error != 0)
1949 goto fail;
1950
1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1953 if (error != 0) {
1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1955 dma->vaddr = NULL;
1956 goto fail;
1957 }
1958
1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1960
1961 return 0;
1962
1963 fail:
1964 iwx_dma_contig_free(dma);
1965 return error;
1966 }
1967
1968 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1969 iwx_dma_contig_free(struct iwx_dma_info *dma)
1970 {
1971 if (dma->vaddr != NULL) {
1972 bus_dmamap_sync(dma->tag, dma->map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->tag, dma->map);
1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1976 dma->vaddr = NULL;
1977 }
1978 if (dma->tag != NULL) {
1979 bus_dma_tag_destroy(dma->tag);
1980 dma->tag = NULL;
1981 }
1982 }
1983
1984 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 bus_size_t size;
1988 int i, err;
1989
1990 ring->cur = 0;
1991
1992 /* Allocate RX descriptors (256-byte aligned). */
1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1994 size = sizeof(struct iwx_rx_transfer_desc);
1995 else
1996 size = sizeof(uint64_t);
1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1998 size * IWX_RX_MQ_RING_COUNT, 256);
1999 if (err) {
2000 device_printf(sc->sc_dev,
2001 "could not allocate RX ring DMA memory\n");
2002 goto fail;
2003 }
2004 ring->desc = ring->free_desc_dma.vaddr;
2005
2006 /* Allocate RX status area (16-byte aligned). */
2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2008 size = sizeof(uint16_t);
2009 else
2010 size = sizeof(*ring->stat);
2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2012 if (err) {
2013 device_printf(sc->sc_dev,
2014 "could not allocate RX status DMA memory\n");
2015 goto fail;
2016 }
2017 ring->stat = ring->stat_dma.vaddr;
2018
2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2020 size = sizeof(struct iwx_rx_completion_desc);
2021 else
2022 size = sizeof(uint32_t);
2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2024 size * IWX_RX_MQ_RING_COUNT, 256);
2025 if (err) {
2026 device_printf(sc->sc_dev,
2027 "could not allocate RX ring DMA memory\n");
2028 goto fail;
2029 }
2030
2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2033 0, NULL, NULL, &ring->data_dmat);
2034
2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2036 struct iwx_rx_data *data = &ring->data[i];
2037
2038 memset(data, 0, sizeof(*data));
2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (err) {
2041 device_printf(sc->sc_dev,
2042 "could not create RX buf DMA map\n");
2043 goto fail;
2044 }
2045
2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2047 if (err)
2048 goto fail;
2049 }
2050 return 0;
2051
2052 fail: iwx_free_rx_ring(sc, ring);
2053 return err;
2054 }
2055
2056 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2057 iwx_disable_rx_dma(struct iwx_softc *sc)
2058 {
2059 int ntries;
2060
2061 if (iwx_nic_lock(sc)) {
2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2064 else
2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2066 for (ntries = 0; ntries < 1000; ntries++) {
2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 if (iwx_read_umac_prph(sc,
2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2070 break;
2071 } else {
2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2073 IWX_RXF_DMA_IDLE)
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078 iwx_nic_unlock(sc);
2079 }
2080 }
2081
2082 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2084 {
2085 ring->cur = 0;
2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2087 BUS_DMASYNC_PREWRITE);
2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2089 uint16_t *status = sc->rxq.stat_dma.vaddr;
2090 *status = 0;
2091 } else
2092 memset(ring->stat, 0, sizeof(*ring->stat));
2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2094 BUS_DMASYNC_POSTWRITE);
2095
2096 }
2097
2098 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2100 {
2101 int i;
2102
2103 iwx_dma_contig_free(&ring->free_desc_dma);
2104 iwx_dma_contig_free(&ring->stat_dma);
2105 iwx_dma_contig_free(&ring->used_desc_dma);
2106
2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2108 struct iwx_rx_data *data = &ring->data[i];
2109 if (data->m != NULL) {
2110 bus_dmamap_sync(ring->data_dmat, data->map,
2111 BUS_DMASYNC_POSTREAD);
2112 bus_dmamap_unload(ring->data_dmat, data->map);
2113 m_freem(data->m);
2114 data->m = NULL;
2115 }
2116 if (data->map != NULL) {
2117 bus_dmamap_destroy(ring->data_dmat, data->map);
2118 data->map = NULL;
2119 }
2120 }
2121 if (ring->data_dmat != NULL) {
2122 bus_dma_tag_destroy(ring->data_dmat);
2123 ring->data_dmat = NULL;
2124 }
2125 }
2126
2127 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2129 {
2130 bus_addr_t paddr;
2131 bus_size_t size;
2132 int i, err;
2133 size_t bc_tbl_size;
2134 bus_size_t bc_align;
2135 size_t mapsize;
2136
2137 ring->qid = qid;
2138 ring->queued = 0;
2139 ring->cur = 0;
2140 ring->cur_hw = 0;
2141 ring->tail = 0;
2142 ring->tail_hw = 0;
2143
2144 /* Allocate TX descriptors (256-byte aligned). */
2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2147 if (err) {
2148 device_printf(sc->sc_dev,
2149 "could not allocate TX ring DMA memory\n");
2150 goto fail;
2151 }
2152 ring->desc = ring->desc_dma.vaddr;
2153
2154 /*
2155 * The hardware supports up to 512 Tx rings which is more
2156 * than we currently need.
2157 *
2158 * In DQA mode we use 1 command queue + 1 default queue for
2159 * management, control, and non-QoS data frames.
2160 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2161 *
2162 * Tx aggregation requires additional queues, one queue per TID for
2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2164 * Firmware may assign its own internal IDs for these queues
2165 * depending on which TID gets aggregation enabled first.
2166 * The driver maintains a table mapping driver-side queue IDs
2167 * to firmware-side queue IDs.
2168 */
2169
2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2173 bc_align = 128;
2174 } else {
2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2176 bc_align = 64;
2177 }
2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2179 bc_align);
2180 if (err) {
2181 device_printf(sc->sc_dev,
2182 "could not allocate byte count table DMA memory\n");
2183 goto fail;
2184 }
2185
2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2188 IWX_FIRST_TB_SIZE_ALIGN);
2189 if (err) {
2190 device_printf(sc->sc_dev,
2191 "could not allocate cmd DMA memory\n");
2192 goto fail;
2193 }
2194 ring->cmd = ring->cmd_dma.vaddr;
2195
2196 /* FW commands may require more mapped space than packets. */
2197 if (qid == IWX_DQA_CMD_QUEUE)
2198 mapsize = (sizeof(struct iwx_cmd_header) +
2199 IWX_MAX_CMD_PAYLOAD_SIZE);
2200 else
2201 mapsize = MCLBYTES;
2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2204 mapsize, 0, NULL, NULL, &ring->data_dmat);
2205
2206 paddr = ring->cmd_dma.paddr;
2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2208 struct iwx_tx_data *data = &ring->data[i];
2209
2210 data->cmd_paddr = paddr;
2211 paddr += sizeof(struct iwx_device_cmd);
2212
2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2214 if (err) {
2215 device_printf(sc->sc_dev,
2216 "could not create TX buf DMA map\n");
2217 goto fail;
2218 }
2219 }
2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2221 return 0;
2222
2223 fail:
2224 return err;
2225 }
2226
2227 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2229 {
2230 int i;
2231
2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2233 struct iwx_tx_data *data = &ring->data[i];
2234
2235 if (data->m != NULL) {
2236 bus_dmamap_sync(ring->data_dmat, data->map,
2237 BUS_DMASYNC_POSTWRITE);
2238 bus_dmamap_unload(ring->data_dmat, data->map);
2239 m_freem(data->m);
2240 data->m = NULL;
2241 }
2242 }
2243
2244 /* Clear byte count table. */
2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2246
2247 /* Clear TX descriptors. */
2248 memset(ring->desc, 0, ring->desc_dma.size);
2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2250 BUS_DMASYNC_PREWRITE);
2251 sc->qfullmsk &= ~(1 << ring->qid);
2252 sc->qenablemsk &= ~(1 << ring->qid);
2253 for (i = 0; i < nitems(sc->aggqid); i++) {
2254 if (sc->aggqid[i] == ring->qid) {
2255 sc->aggqid[i] = 0;
2256 break;
2257 }
2258 }
2259 ring->queued = 0;
2260 ring->cur = 0;
2261 ring->cur_hw = 0;
2262 ring->tail = 0;
2263 ring->tail_hw = 0;
2264 ring->tid = 0;
2265 }
2266
2267 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2269 {
2270 int i;
2271
2272 iwx_dma_contig_free(&ring->desc_dma);
2273 iwx_dma_contig_free(&ring->cmd_dma);
2274 iwx_dma_contig_free(&ring->bc_tbl);
2275
2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2277 struct iwx_tx_data *data = &ring->data[i];
2278
2279 if (data->m != NULL) {
2280 bus_dmamap_sync(ring->data_dmat, data->map,
2281 BUS_DMASYNC_POSTWRITE);
2282 bus_dmamap_unload(ring->data_dmat, data->map);
2283 m_freem(data->m);
2284 data->m = NULL;
2285 }
2286 if (data->map != NULL) {
2287 bus_dmamap_destroy(ring->data_dmat, data->map);
2288 data->map = NULL;
2289 }
2290 }
2291 if (ring->data_dmat != NULL) {
2292 bus_dma_tag_destroy(ring->data_dmat);
2293 ring->data_dmat = NULL;
2294 }
2295 }
2296
2297 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2298 iwx_enable_rfkill_int(struct iwx_softc *sc)
2299 {
2300 if (!sc->sc_msix) {
2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2303 } else {
2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2305 sc->sc_fh_init_mask);
2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2309 }
2310
2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2313 }
2314
2315 static int
iwx_check_rfkill(struct iwx_softc * sc)2316 iwx_check_rfkill(struct iwx_softc *sc)
2317 {
2318 uint32_t v;
2319 int rv;
2320
2321 /*
2322 * "documentation" is not really helpful here:
2323 * 27: HW_RF_KILL_SW
2324 * Indicates state of (platform's) hardware RF-Kill switch
2325 *
2326 * But apparently when it's off, it's on ...
2327 */
2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2330 if (rv) {
2331 sc->sc_flags |= IWX_FLAG_RFKILL;
2332 } else {
2333 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2334 }
2335
2336 return rv;
2337 }
2338
2339 static void
iwx_enable_interrupts(struct iwx_softc * sc)2340 iwx_enable_interrupts(struct iwx_softc *sc)
2341 {
2342 if (!sc->sc_msix) {
2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2345 } else {
2346 /*
2347 * fh/hw_mask keeps all the unmasked causes.
2348 * Unlike msi, in msix cause is enabled when it is unset.
2349 */
2350 sc->sc_hw_mask = sc->sc_hw_init_mask;
2351 sc->sc_fh_mask = sc->sc_fh_init_mask;
2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2353 ~sc->sc_fh_mask);
2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2355 ~sc->sc_hw_mask);
2356 }
2357 }
2358
2359 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2361 {
2362 if (!sc->sc_msix) {
2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2365 } else {
2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2369 /*
2370 * Leave all the FH causes enabled to get the ALIVE
2371 * notification.
2372 */
2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2374 ~sc->sc_fh_init_mask);
2375 sc->sc_fh_mask = sc->sc_fh_init_mask;
2376 }
2377 }
2378
2379 #if 0
2380 static void
2381 iwx_restore_interrupts(struct iwx_softc *sc)
2382 {
2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2384 }
2385 #endif
2386
2387 static void
iwx_disable_interrupts(struct iwx_softc * sc)2388 iwx_disable_interrupts(struct iwx_softc *sc)
2389 {
2390 if (!sc->sc_msix) {
2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2392
2393 /* acknowledge all interrupts */
2394 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2396 } else {
2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2398 sc->sc_fh_init_mask);
2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2400 sc->sc_hw_init_mask);
2401 }
2402 }
2403
2404 static void
iwx_ict_reset(struct iwx_softc * sc)2405 iwx_ict_reset(struct iwx_softc *sc)
2406 {
2407 iwx_disable_interrupts(sc);
2408
2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2410 sc->ict_cur = 0;
2411
2412 /* Set physical address of ICT (4KB aligned). */
2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2414 IWX_CSR_DRAM_INT_TBL_ENABLE
2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2418
2419 /* Switch to ICT interrupt mode in driver. */
2420 sc->sc_flags |= IWX_FLAG_USE_ICT;
2421
2422 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2423 iwx_enable_interrupts(sc);
2424 }
2425
2426 #define IWX_HW_READY_TIMEOUT 50
2427 static int
iwx_set_hw_ready(struct iwx_softc * sc)2428 iwx_set_hw_ready(struct iwx_softc *sc)
2429 {
2430 int ready;
2431
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2434
2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_HW_READY_TIMEOUT);
2439 if (ready)
2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2442
2443 DPRINTF(("%s: ready=%d\n", __func__, ready));
2444 return ready;
2445 }
2446 #undef IWX_HW_READY_TIMEOUT
2447
2448 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2449 iwx_prepare_card_hw(struct iwx_softc *sc)
2450 {
2451 int t = 0;
2452 int ntries;
2453
2454 if (iwx_set_hw_ready(sc))
2455 return 0;
2456
2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2459 DELAY(1000);
2460
2461 for (ntries = 0; ntries < 10; ntries++) {
2462 /* If HW is not ready, prepare the conditions to check again */
2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2465
2466 do {
2467 if (iwx_set_hw_ready(sc))
2468 return 0;
2469 DELAY(200);
2470 t += 200;
2471 } while (t < 150000);
2472 DELAY(25000);
2473 }
2474
2475 return ETIMEDOUT;
2476 }
2477
2478 static int
iwx_force_power_gating(struct iwx_softc * sc)2479 iwx_force_power_gating(struct iwx_softc *sc)
2480 {
2481 int err;
2482
2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2485 if (err)
2486 return err;
2487 DELAY(20);
2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2491 if (err)
2492 return err;
2493 DELAY(20);
2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2496 return err;
2497 }
2498
2499 static void
iwx_apm_config(struct iwx_softc * sc)2500 iwx_apm_config(struct iwx_softc *sc)
2501 {
2502 uint16_t lctl, cap;
2503 int pcie_ptr;
2504 int error;
2505
2506 /*
2507 * L0S states have been found to be unstable with our devices
2508 * and in newer hardware they are not officially supported at
2509 * all, so we must always set the L0S_DISABLED bit.
2510 */
2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2512
2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2514 if (error != 0) {
2515 printf("can't fill pcie_ptr\n");
2516 return;
2517 }
2518
2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2520 sizeof(lctl));
2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2523 #define PCI_PCIE_DCSR2 0x28
2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2525 sizeof(lctl));
2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2530 DEVNAME(sc),
2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2532 sc->sc_ltr_enabled ? "En" : "Dis"));
2533 #undef PCI_PCIE_LCSR_ASPM_L0S
2534 #undef PCI_PCIE_DCSR2
2535 #undef PCI_PCIE_DCSR2_LTREN
2536 #undef PCI_PCIE_LCSR_ASPM_L1
2537 }
2538
2539 /*
2540 * Start up NIC's basic functionality after it has been reset
2541 * e.g. after platform boot or shutdown.
2542 * NOTE: This does not load uCode nor start the embedded processor
2543 */
2544 static int
iwx_apm_init(struct iwx_softc * sc)2545 iwx_apm_init(struct iwx_softc *sc)
2546 {
2547 int err = 0;
2548
2549 /*
2550 * Disable L0s without affecting L1;
2551 * don't wait for ICH L0s (ICH bug W/A)
2552 */
2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2555
2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2558
2559 /*
2560 * Enable HAP INTA (interrupt from management bus) to
2561 * wake device's PCI Express link L1a -> L0s
2562 */
2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2565
2566 iwx_apm_config(sc);
2567
2568 /*
2569 * Set "initialization complete" bit to move adapter from
2570 * D0U* --> D0A* (powered-up active) state.
2571 */
2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2573
2574 /*
2575 * Wait for clock stabilization; once stabilized, access to
2576 * device-internal resources is supported, e.g. iwx_write_prph()
2577 * and accesses to uCode SRAM.
2578 */
2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2582 printf("%s: timeout waiting for clock stabilization\n",
2583 DEVNAME(sc));
2584 err = ETIMEDOUT;
2585 goto out;
2586 }
2587 out:
2588 if (err)
2589 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2590 return err;
2591 }
2592
2593 static void
iwx_apm_stop(struct iwx_softc * sc)2594 iwx_apm_stop(struct iwx_softc *sc)
2595 {
2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2601 DELAY(1000);
2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2604 DELAY(5000);
2605
2606 /* stop device's busmaster DMA activity */
2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2608
2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2613
2614 /*
2615 * Clear "initialization complete" bit to move adapter from
2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2617 */
2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2620 }
2621
2622 static void
iwx_init_msix_hw(struct iwx_softc * sc)2623 iwx_init_msix_hw(struct iwx_softc *sc)
2624 {
2625 iwx_conf_msix_hw(sc, 0);
2626
2627 if (!sc->sc_msix)
2628 return;
2629
2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2631 sc->sc_fh_mask = sc->sc_fh_init_mask;
2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2633 sc->sc_hw_mask = sc->sc_hw_init_mask;
2634 }
2635
2636 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2638 {
2639 int vector = 0;
2640
2641 if (!sc->sc_msix) {
2642 /* Newer chips default to MSIX. */
2643 if (!stopped && iwx_nic_lock(sc)) {
2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2645 IWX_UREG_CHICK_MSI_ENABLE);
2646 iwx_nic_unlock(sc);
2647 }
2648 return;
2649 }
2650
2651 if (!stopped && iwx_nic_lock(sc)) {
2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2653 IWX_UREG_CHICK_MSIX_ENABLE);
2654 iwx_nic_unlock(sc);
2655 }
2656
2657 /* Disable all interrupts */
2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2660
2661 /* Map fallback-queue (command/mgmt) to a single vector */
2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2664 /* Map RSS queue (data) to the same vector */
2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2667
2668 /* Enable the RX queues cause interrupts */
2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2671
2672 /* Map non-RX causes to the same vector */
2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2703
2704 /* Enable non-RX causes interrupts */
2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_S2D |
2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2722 }
2723
2724 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2725 iwx_clear_persistence_bit(struct iwx_softc *sc)
2726 {
2727 uint32_t hpm, wprot;
2728
2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2732 if (wprot & IWX_PREG_WFPM_ACCESS) {
2733 printf("%s: cannot clear persistence bit\n",
2734 DEVNAME(sc));
2735 return EPERM;
2736 }
2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2738 hpm & ~IWX_PERSISTENCE_BIT);
2739 }
2740
2741 return 0;
2742 }
2743
2744 static int
iwx_start_hw(struct iwx_softc * sc)2745 iwx_start_hw(struct iwx_softc *sc)
2746 {
2747 int err;
2748
2749 err = iwx_prepare_card_hw(sc);
2750 if (err)
2751 return err;
2752
2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2754 err = iwx_clear_persistence_bit(sc);
2755 if (err)
2756 return err;
2757 }
2758
2759 /* Reset the entire device */
2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2761 DELAY(5000);
2762
2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2764 sc->sc_integrated) {
2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2767 DELAY(20);
2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2771 printf("%s: timeout waiting for clock stabilization\n",
2772 DEVNAME(sc));
2773 return ETIMEDOUT;
2774 }
2775
2776 err = iwx_force_power_gating(sc);
2777 if (err)
2778 return err;
2779
2780 /* Reset the entire device */
2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2782 DELAY(5000);
2783 }
2784
2785 err = iwx_apm_init(sc);
2786 if (err)
2787 return err;
2788
2789 iwx_init_msix_hw(sc);
2790
2791 iwx_enable_rfkill_int(sc);
2792 iwx_check_rfkill(sc);
2793
2794 return 0;
2795 }
2796
2797 static void
iwx_stop_device(struct iwx_softc * sc)2798 iwx_stop_device(struct iwx_softc *sc)
2799 {
2800 int i;
2801
2802 iwx_disable_interrupts(sc);
2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2804
2805 iwx_disable_rx_dma(sc);
2806 iwx_reset_rx_ring(sc, &sc->rxq);
2807 for (i = 0; i < nitems(sc->txq); i++)
2808 iwx_reset_tx_ring(sc, &sc->txq[i]);
2809 #if 0
2810 /* XXX-THJ: Tidy up BA state on stop */
2811 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2813 if (ba->ba_state != IEEE80211_BA_AGREED)
2814 continue;
2815 ieee80211_delba_request(ic, ni, 0, 1, i);
2816 }
2817 #endif
2818 /* Make sure (redundant) we've released our request to stay awake */
2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2821 if (sc->sc_nic_locks > 0)
2822 printf("%s: %d active NIC locks forcefully cleared\n",
2823 DEVNAME(sc), sc->sc_nic_locks);
2824 sc->sc_nic_locks = 0;
2825
2826 /* Stop the device, and put it in low power state */
2827 iwx_apm_stop(sc);
2828
2829 /* Reset the on-board processor. */
2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2831 DELAY(5000);
2832
2833 /*
2834 * Upon stop, the IVAR table gets erased, so msi-x won't
2835 * work. This causes a bug in RF-KILL flows, since the interrupt
2836 * that enables radio won't fire on the correct irq, and the
2837 * driver won't be able to handle the interrupt.
2838 * Configure the IVAR table again after reset.
2839 */
2840 iwx_conf_msix_hw(sc, 1);
2841
2842 /*
2843 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2844 * Clear the interrupt again.
2845 */
2846 iwx_disable_interrupts(sc);
2847
2848 /* Even though we stop the HW we still want the RF kill interrupt. */
2849 iwx_enable_rfkill_int(sc);
2850 iwx_check_rfkill(sc);
2851
2852 iwx_prepare_card_hw(sc);
2853
2854 iwx_ctxt_info_free_paging(sc);
2855 iwx_dma_contig_free(&sc->pnvm_dma);
2856 }
2857
2858 static void
iwx_nic_config(struct iwx_softc * sc)2859 iwx_nic_config(struct iwx_softc *sc)
2860 {
2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2862 uint32_t mask, val, reg_val = 0;
2863
2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2867 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2869 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2870
2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2875
2876 /* radio configuration */
2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2880
2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2888
2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2890 val &= ~mask;
2891 val |= reg_val;
2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2893 }
2894
2895 static int
iwx_nic_rx_init(struct iwx_softc * sc)2896 iwx_nic_rx_init(struct iwx_softc *sc)
2897 {
2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2899
2900 /*
2901 * We don't configure the RFH; the firmware will do that.
2902 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2903 */
2904 return 0;
2905 }
2906
2907 static int
iwx_nic_init(struct iwx_softc * sc)2908 iwx_nic_init(struct iwx_softc *sc)
2909 {
2910 int err;
2911
2912 iwx_apm_init(sc);
2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2914 iwx_nic_config(sc);
2915
2916 err = iwx_nic_rx_init(sc);
2917 if (err)
2918 return err;
2919
2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2921
2922 return 0;
2923 }
2924
2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2926 const uint8_t iwx_ac_to_tx_fifo[] = {
2927 IWX_GEN2_EDCA_TX_FIFO_BE,
2928 IWX_GEN2_EDCA_TX_FIFO_BK,
2929 IWX_GEN2_EDCA_TX_FIFO_VI,
2930 IWX_GEN2_EDCA_TX_FIFO_VO,
2931 };
2932
2933 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2935 int num_slots)
2936 {
2937 struct iwx_rx_packet *pkt;
2938 struct iwx_tx_queue_cfg_rsp *resp;
2939 struct iwx_tx_queue_cfg_cmd cmd_v0;
2940 struct iwx_scd_queue_cfg_cmd cmd_v3;
2941 struct iwx_host_cmd hcmd = {
2942 .flags = IWX_CMD_WANT_RESP,
2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2944 };
2945 struct iwx_tx_ring *ring = &sc->txq[qid];
2946 int err, fwqid, cmd_ver;
2947 uint32_t wr_idx;
2948 size_t resp_len;
2949
2950 DPRINTF(("%s: tid=%i\n", __func__, tid));
2951 DPRINTF(("%s: qid=%i\n", __func__, qid));
2952 iwx_reset_tx_ring(sc, ring);
2953
2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2955 IWX_SCD_QUEUE_CONFIG_CMD);
2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2957 memset(&cmd_v0, 0, sizeof(cmd_v0));
2958 cmd_v0.sta_id = sta_id;
2959 cmd_v0.tid = tid;
2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2964 hcmd.id = IWX_SCD_QUEUE_CFG;
2965 hcmd.data[0] = &cmd_v0;
2966 hcmd.len[0] = sizeof(cmd_v0);
2967 } else if (cmd_ver == 3) {
2968 memset(&cmd_v3, 0, sizeof(cmd_v3));
2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2973 cmd_v3.u.add.flags = htole32(0);
2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2975 cmd_v3.u.add.tid = tid;
2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2977 IWX_SCD_QUEUE_CONFIG_CMD);
2978 hcmd.data[0] = &cmd_v3;
2979 hcmd.len[0] = sizeof(cmd_v3);
2980 } else {
2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2982 DEVNAME(sc), cmd_ver);
2983 return ENOTSUP;
2984 }
2985
2986 err = iwx_send_cmd(sc, &hcmd);
2987 if (err)
2988 return err;
2989
2990 pkt = hcmd.resp_pkt;
2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2992 err = EIO;
2993 goto out;
2994 }
2995
2996 resp_len = iwx_rx_packet_payload_len(pkt);
2997 if (resp_len != sizeof(*resp)) {
2998 err = EIO;
2999 goto out;
3000 }
3001
3002 resp = (void *)pkt->data;
3003 fwqid = le16toh(resp->queue_number);
3004 wr_idx = le16toh(resp->write_pointer);
3005
3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3007 if (fwqid != qid) {
3008 DPRINTF(("%s: === fwqid != qid\n", __func__));
3009 err = EIO;
3010 goto out;
3011 }
3012
3013 if (wr_idx != ring->cur_hw) {
3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3015 err = EIO;
3016 goto out;
3017 }
3018
3019 sc->qenablemsk |= (1 << qid);
3020 ring->tid = tid;
3021 out:
3022 iwx_free_resp(sc, &hcmd);
3023 return err;
3024 }
3025
3026 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3028 {
3029 struct iwx_rx_packet *pkt;
3030 struct iwx_tx_queue_cfg_rsp *resp;
3031 struct iwx_tx_queue_cfg_cmd cmd_v0;
3032 struct iwx_scd_queue_cfg_cmd cmd_v3;
3033 struct iwx_host_cmd hcmd = {
3034 .flags = IWX_CMD_WANT_RESP,
3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3036 };
3037 struct iwx_tx_ring *ring = &sc->txq[qid];
3038 int err, cmd_ver;
3039
3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3041 IWX_SCD_QUEUE_CONFIG_CMD);
3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3043 memset(&cmd_v0, 0, sizeof(cmd_v0));
3044 cmd_v0.sta_id = sta_id;
3045 cmd_v0.tid = tid;
3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3047 cmd_v0.cb_size = htole32(0);
3048 cmd_v0.byte_cnt_addr = htole64(0);
3049 cmd_v0.tfdq_addr = htole64(0);
3050 hcmd.id = IWX_SCD_QUEUE_CFG;
3051 hcmd.data[0] = &cmd_v0;
3052 hcmd.len[0] = sizeof(cmd_v0);
3053 } else if (cmd_ver == 3) {
3054 memset(&cmd_v3, 0, sizeof(cmd_v3));
3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3057 cmd_v3.u.remove.tid = tid;
3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3059 IWX_SCD_QUEUE_CONFIG_CMD);
3060 hcmd.data[0] = &cmd_v3;
3061 hcmd.len[0] = sizeof(cmd_v3);
3062 } else {
3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3064 DEVNAME(sc), cmd_ver);
3065 return ENOTSUP;
3066 }
3067
3068 err = iwx_send_cmd(sc, &hcmd);
3069 if (err)
3070 return err;
3071
3072 pkt = hcmd.resp_pkt;
3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3074 err = EIO;
3075 goto out;
3076 }
3077
3078 sc->qenablemsk &= ~(1 << qid);
3079 iwx_reset_tx_ring(sc, ring);
3080 out:
3081 iwx_free_resp(sc, &hcmd);
3082 return err;
3083 }
3084
3085 static void
iwx_post_alive(struct iwx_softc * sc)3086 iwx_post_alive(struct iwx_softc *sc)
3087 {
3088 int txcmd_ver;
3089
3090 iwx_ict_reset(sc);
3091
3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3094 sc->sc_rate_n_flags_version = 2;
3095 else
3096 sc->sc_rate_n_flags_version = 1;
3097
3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3099 }
3100
3101 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3103 uint32_t duration_tu)
3104 {
3105
3106 struct iwx_session_prot_cmd cmd = {
3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3108 in->in_color)),
3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3111 .duration_tu = htole32(duration_tu),
3112 };
3113 uint32_t cmd_id;
3114 int err;
3115
3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3118 if (!err)
3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3120 return err;
3121 }
3122
3123 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3125 {
3126 struct iwx_session_prot_cmd cmd = {
3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3128 in->in_color)),
3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3131 .duration_tu = 0,
3132 };
3133 uint32_t cmd_id;
3134
3135 /* Do nothing if the time event has already ended. */
3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3137 return;
3138
3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3142 }
3143
3144 /*
3145 * NVM read access and content parsing. We do not support
3146 * external NVM or writing NVM.
3147 */
3148
3149 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3151 {
3152 uint8_t tx_ant;
3153
3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3156
3157 if (sc->sc_nvm.valid_tx_ant)
3158 tx_ant &= sc->sc_nvm.valid_tx_ant;
3159
3160 return tx_ant;
3161 }
3162
3163 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3165 {
3166 uint8_t rx_ant;
3167
3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3170
3171 if (sc->sc_nvm.valid_rx_ant)
3172 rx_ant &= sc->sc_nvm.valid_rx_ant;
3173
3174 return rx_ant;
3175 }
3176
3177 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3179 struct ieee80211_channel chans[])
3180 {
3181 struct iwx_softc *sc = ic->ic_softc;
3182 struct iwx_nvm_data *data = &sc->sc_nvm;
3183 uint8_t bands[IEEE80211_MODE_BYTES];
3184 const uint8_t *nvm_channels;
3185 uint32_t ch_flags;
3186 int ch_idx, nchan;
3187
3188 if (sc->sc_uhb_supported) {
3189 nchan = nitems(iwx_nvm_channels_uhb);
3190 nvm_channels = iwx_nvm_channels_uhb;
3191 } else {
3192 nchan = nitems(iwx_nvm_channels_8000);
3193 nvm_channels = iwx_nvm_channels_8000;
3194 }
3195
3196 /* 2.4Ghz; 1-13: 11b/g channels. */
3197 if (!data->sku_cap_band_24GHz_enable)
3198 goto band_5;
3199
3200 memset(bands, 0, sizeof(bands));
3201 setbit(bands, IEEE80211_MODE_11B);
3202 setbit(bands, IEEE80211_MODE_11G);
3203 setbit(bands, IEEE80211_MODE_11NG);
3204 for (ch_idx = 0;
3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3206 ch_idx++) {
3207
3208 uint32_t nflags = 0;
3209 int cflags = 0;
3210
3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3212 ch_flags = le32_to_cpup(
3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3214 } else {
3215 ch_flags = le16_to_cpup(
3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3217 }
3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3219 continue;
3220
3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3222 cflags |= NET80211_CBW_FLAG_HT40;
3223
3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3225
3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3227 nvm_channels[ch_idx],
3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3230 nflags, bands, cflags);
3231 }
3232
3233 band_5:
3234 /* 5Ghz */
3235 if (!data->sku_cap_band_52GHz_enable)
3236 goto band_6;
3237
3238
3239 memset(bands, 0, sizeof(bands));
3240 setbit(bands, IEEE80211_MODE_11A);
3241 setbit(bands, IEEE80211_MODE_11NA);
3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3243
3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3246 ch_idx++) {
3247 uint32_t nflags = 0;
3248 int cflags = 0;
3249
3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3251 ch_flags = le32_to_cpup(
3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3253 else
3254 ch_flags = le16_to_cpup(
3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3256
3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3258 continue;
3259
3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3261 cflags |= NET80211_CBW_FLAG_HT40;
3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3263 cflags |= NET80211_CBW_FLAG_VHT80;
3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3265 cflags |= NET80211_CBW_FLAG_VHT160;
3266
3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3268
3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3270 nvm_channels[ch_idx],
3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3273 nflags, bands, cflags);
3274 }
3275 band_6:
3276 /* 6GHz one day ... */
3277 return;
3278 }
3279
3280 static int
iwx_mimo_enabled(struct iwx_softc * sc)3281 iwx_mimo_enabled(struct iwx_softc *sc)
3282 {
3283
3284 return !sc->sc_nvm.sku_cap_mimo_disable;
3285 }
3286
3287 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3289 uint16_t ssn, uint16_t buf_size)
3290 {
3291 reorder_buf->head_sn = ssn;
3292 reorder_buf->num_stored = 0;
3293 reorder_buf->buf_size = buf_size;
3294 reorder_buf->last_amsdu = 0;
3295 reorder_buf->last_sub_index = 0;
3296 reorder_buf->removed = 0;
3297 reorder_buf->valid = 0;
3298 reorder_buf->consec_oldsn_drops = 0;
3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3300 reorder_buf->consec_oldsn_prev_drop = 0;
3301 }
3302
3303 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3305 {
3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3307
3308 reorder_buf->removed = 1;
3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3310 }
3311
3312 #define IWX_MAX_RX_BA_SESSIONS 16
3313
3314 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3316 {
3317 int i;
3318
3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3320 if (sc->sc_rxba_data[i].baid ==
3321 IWX_RX_REORDER_DATA_INVALID_BAID)
3322 continue;
3323 if (sc->sc_rxba_data[i].tid == tid)
3324 return &sc->sc_rxba_data[i];
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3333 uint8_t *baid)
3334 {
3335 struct iwx_rx_baid_cfg_cmd cmd;
3336 uint32_t new_baid = 0;
3337 int err;
3338
3339 IWX_ASSERT_LOCKED(sc);
3340
3341 memset(&cmd, 0, sizeof(cmd));
3342
3343 if (start) {
3344 cmd.action = IWX_RX_BAID_ACTION_ADD;
3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3346 cmd.alloc.tid = tid;
3347 cmd.alloc.ssn = htole16(ssn);
3348 cmd.alloc.win_size = htole16(winsize);
3349 } else {
3350 struct iwx_rxba_data *rxba;
3351
3352 rxba = iwx_find_rxba_data(sc, tid);
3353 if (rxba == NULL)
3354 return ENOENT;
3355 *baid = rxba->baid;
3356
3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3360 cmd.remove_v1.baid = rxba->baid;
3361 } else {
3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3363 cmd.remove.tid = tid;
3364 }
3365 }
3366
3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3369 if (err)
3370 return err;
3371
3372 if (start) {
3373 if (new_baid >= nitems(sc->sc_rxba_data))
3374 return ERANGE;
3375 *baid = new_baid;
3376 }
3377
3378 return 0;
3379 }
3380
3381 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3384 {
3385 int err;
3386 struct iwx_rxba_data *rxba = NULL;
3387 uint8_t baid = 0;
3388
3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3390 return;
3391 }
3392
3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3395 timeout_val, start, &baid);
3396 } else {
3397 panic("sta_rx_agg unsupported hw");
3398 }
3399 if (err) {
3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3401 return;
3402 } else
3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3404
3405 rxba = &sc->sc_rxba_data[baid];
3406
3407 /* Deaggregation is done in hardware. */
3408 if (start) {
3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3410 return;
3411 }
3412 rxba->sta_id = IWX_STATION_ID;
3413 rxba->tid = tid;
3414 rxba->baid = baid;
3415 rxba->timeout = timeout_val;
3416 getmicrouptime(&rxba->last_rx);
3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3418 winsize);
3419 if (timeout_val != 0) {
3420 DPRINTF(("%s: timeout_val != 0\n", __func__));
3421 return;
3422 }
3423 } else
3424 iwx_clear_reorder_buffer(sc, rxba);
3425
3426 if (start) {
3427 sc->sc_rx_ba_sessions++;
3428 } else if (sc->sc_rx_ba_sessions > 0)
3429 sc->sc_rx_ba_sessions--;
3430 }
3431
3432 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3433 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3434 uint8_t tid)
3435 {
3436 int err, qid;
3437
3438 qid = sc->aggqid[tid];
3439 if (qid == 0) {
3440 /* Firmware should pick the next unused Tx queue. */
3441 qid = fls(sc->qenablemsk);
3442 }
3443
3444 DPRINTF(("%s: qid=%i\n", __func__, qid));
3445
3446 /*
3447 * Simply enable the queue.
3448 * Firmware handles Tx Ba session setup and teardown.
3449 */
3450 if ((sc->qenablemsk & (1 << qid)) == 0) {
3451 if (!iwx_nic_lock(sc)) {
3452 return;
3453 }
3454 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3455 IWX_TX_RING_COUNT);
3456 iwx_nic_unlock(sc);
3457 if (err) {
3458 printf("%s: could not enable Tx queue %d "
3459 "(error %d)\n", DEVNAME(sc), qid, err);
3460 return;
3461 }
3462 }
3463 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3464 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3465 sc->aggqid[tid] = qid;
3466 }
3467
3468 static void
iwx_ba_rx_task(void * arg,int npending __unused)3469 iwx_ba_rx_task(void *arg, int npending __unused)
3470 {
3471 struct iwx_softc *sc = arg;
3472 struct ieee80211com *ic = &sc->sc_ic;
3473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3474 struct ieee80211_node *ni = vap->iv_bss;
3475 int tid;
3476
3477 IWX_LOCK(sc);
3478 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3479 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3480 break;
3481 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3482 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3483 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3484 ba->ba_flags));
3485 if (ba->ba_flags == IWX_BA_DONE) {
3486 DPRINTF(("%s: ampdu for tid %i already added\n",
3487 __func__, tid));
3488 break;
3489 }
3490
3491 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3492 tid));
3493 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3494 ba->ba_winsize, ba->ba_timeout_val, 1);
3495 sc->ba_rx.start_tidmask &= ~(1 << tid);
3496 ba->ba_flags = IWX_BA_DONE;
3497 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3498 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3499 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3500 }
3501 }
3502 IWX_UNLOCK(sc);
3503 }
3504
3505 static void
iwx_ba_tx_task(void * arg,int npending __unused)3506 iwx_ba_tx_task(void *arg, int npending __unused)
3507 {
3508 struct iwx_softc *sc = arg;
3509 struct ieee80211com *ic = &sc->sc_ic;
3510 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3511 struct ieee80211_node *ni = vap->iv_bss;
3512 int tid;
3513
3514 IWX_LOCK(sc);
3515 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3516 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3517 break;
3518 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3519 DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
3520 tid));
3521 iwx_sta_tx_agg_start(sc, ni, tid);
3522 sc->ba_tx.start_tidmask &= ~(1 << tid);
3523 sc->sc_flags |= IWX_FLAG_AMPDUTX;
3524 }
3525 }
3526
3527 IWX_UNLOCK(sc);
3528 }
3529
3530 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3531 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3532 {
3533 uint32_t mac_addr0, mac_addr1;
3534
3535 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3536
3537 if (!iwx_nic_lock(sc))
3538 return;
3539
3540 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3541 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3542
3543 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3544
3545 /* If OEM fused a valid address, use it instead of the one in OTP. */
3546 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3547 iwx_nic_unlock(sc);
3548 return;
3549 }
3550
3551 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3552 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3553
3554 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3555
3556 iwx_nic_unlock(sc);
3557 }
3558
3559 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3560 iwx_is_valid_mac_addr(const uint8_t *addr)
3561 {
3562 static const uint8_t reserved_mac[] = {
3563 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3564 };
3565
3566 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3567 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3568 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3569 !ETHER_IS_MULTICAST(addr));
3570 }
3571
3572 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3573 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3574 {
3575 const uint8_t *hw_addr;
3576
3577 hw_addr = (const uint8_t *)&mac_addr0;
3578 dest[0] = hw_addr[3];
3579 dest[1] = hw_addr[2];
3580 dest[2] = hw_addr[1];
3581 dest[3] = hw_addr[0];
3582
3583 hw_addr = (const uint8_t *)&mac_addr1;
3584 dest[4] = hw_addr[1];
3585 dest[5] = hw_addr[0];
3586 }
3587
3588 static int
iwx_nvm_get(struct iwx_softc * sc)3589 iwx_nvm_get(struct iwx_softc *sc)
3590 {
3591 struct iwx_nvm_get_info cmd = {};
3592 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3593 struct iwx_host_cmd hcmd = {
3594 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3595 .data = { &cmd, },
3596 .len = { sizeof(cmd) },
3597 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3598 IWX_NVM_GET_INFO)
3599 };
3600 int err = 0;
3601 uint32_t mac_flags;
3602 /*
3603 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3604 * in v3, except for the channel profile part of the
3605 * regulatory. So we can just access the new struct, with the
3606 * exception of the latter.
3607 */
3608 struct iwx_nvm_get_info_rsp *rsp;
3609 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3610 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3611 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3612
3613 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3614 err = iwx_send_cmd(sc, &hcmd);
3615 if (err) {
3616 printf("%s: failed to send cmd (error %d)", __func__, err);
3617 return err;
3618 }
3619
3620 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3621 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3622 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3623 printf("%s: resp_len=%zu\n", __func__, resp_len);
3624 err = EIO;
3625 goto out;
3626 }
3627
3628 memset(nvm, 0, sizeof(*nvm));
3629
3630 iwx_set_mac_addr_from_csr(sc, nvm);
3631 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3632 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3633 err = EINVAL;
3634 goto out;
3635 }
3636
3637 rsp = (void *)hcmd.resp_pkt->data;
3638
3639 /* Initialize general data */
3640 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3641 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3642
3643 /* Initialize MAC sku data */
3644 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3645 nvm->sku_cap_11ac_enable =
3646 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3647 nvm->sku_cap_11n_enable =
3648 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3649 nvm->sku_cap_11ax_enable =
3650 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3651 nvm->sku_cap_band_24GHz_enable =
3652 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3653 nvm->sku_cap_band_52GHz_enable =
3654 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3655 nvm->sku_cap_mimo_disable =
3656 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3657
3658 /* Initialize PHY sku data */
3659 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3660 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3661
3662 if (le32toh(rsp->regulatory.lar_enabled) &&
3663 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3664 nvm->lar_enabled = 1;
3665 }
3666
3667 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3668 if (v4) {
3669 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3670 } else {
3671 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3672 }
3673 out:
3674 iwx_free_resp(sc, &hcmd);
3675 return err;
3676 }
3677
3678 static int
iwx_load_firmware(struct iwx_softc * sc)3679 iwx_load_firmware(struct iwx_softc *sc)
3680 {
3681 struct iwx_fw_sects *fws;
3682 int err;
3683
3684 IWX_ASSERT_LOCKED(sc)
3685
3686 sc->sc_uc.uc_intr = 0;
3687 sc->sc_uc.uc_ok = 0;
3688
3689 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3690 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3691 err = iwx_ctxt_info_gen3_init(sc, fws);
3692 else
3693 err = iwx_ctxt_info_init(sc, fws);
3694 if (err) {
3695 printf("%s: could not init context info\n", DEVNAME(sc));
3696 return err;
3697 }
3698
3699 /* wait for the firmware to load */
3700 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3701 if (err || !sc->sc_uc.uc_ok) {
3702 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3703 iwx_ctxt_info_free_paging(sc);
3704 }
3705
3706 iwx_dma_contig_free(&sc->iml_dma);
3707 iwx_ctxt_info_free_fw_img(sc);
3708
3709 if (!sc->sc_uc.uc_ok)
3710 return EINVAL;
3711
3712 return err;
3713 }
3714
3715 static int
iwx_start_fw(struct iwx_softc * sc)3716 iwx_start_fw(struct iwx_softc *sc)
3717 {
3718 int err;
3719
3720 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3721
3722 iwx_disable_interrupts(sc);
3723
3724 /* make sure rfkill handshake bits are cleared */
3725 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3726 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3727 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3728
3729 /* clear (again), then enable firmware load interrupt */
3730 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3731
3732 err = iwx_nic_init(sc);
3733 if (err) {
3734 printf("%s: unable to init nic\n", DEVNAME(sc));
3735 return err;
3736 }
3737
3738 iwx_enable_fwload_interrupt(sc);
3739
3740 return iwx_load_firmware(sc);
3741 }
3742
3743 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3744 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3745 size_t len)
3746 {
3747 const struct iwx_ucode_tlv *tlv;
3748 uint32_t sha1 = 0;
3749 uint16_t mac_type = 0, rf_id = 0;
3750 uint8_t *pnvm_data = NULL, *tmp;
3751 int hw_match = 0;
3752 uint32_t size = 0;
3753 int err;
3754
3755 while (len >= sizeof(*tlv)) {
3756 uint32_t tlv_len, tlv_type;
3757
3758 len -= sizeof(*tlv);
3759 tlv = (const void *)data;
3760
3761 tlv_len = le32toh(tlv->length);
3762 tlv_type = le32toh(tlv->type);
3763
3764 if (len < tlv_len) {
3765 printf("%s: invalid TLV len: %zd/%u\n",
3766 DEVNAME(sc), len, tlv_len);
3767 err = EINVAL;
3768 goto out;
3769 }
3770
3771 data += sizeof(*tlv);
3772
3773 switch (tlv_type) {
3774 case IWX_UCODE_TLV_PNVM_VERSION:
3775 if (tlv_len < sizeof(uint32_t))
3776 break;
3777
3778 sha1 = le32_to_cpup((const uint32_t *)data);
3779 break;
3780 case IWX_UCODE_TLV_HW_TYPE:
3781 if (tlv_len < 2 * sizeof(uint16_t))
3782 break;
3783
3784 if (hw_match)
3785 break;
3786
3787 mac_type = le16_to_cpup((const uint16_t *)data);
3788 rf_id = le16_to_cpup((const uint16_t *)(data +
3789 sizeof(uint16_t)));
3790
3791 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3792 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3793 hw_match = 1;
3794 break;
3795 case IWX_UCODE_TLV_SEC_RT: {
3796 const struct iwx_pnvm_section *section;
3797 uint32_t data_len;
3798
3799 section = (const void *)data;
3800 data_len = tlv_len - sizeof(*section);
3801
3802 /* TODO: remove, this is a deprecated separator */
3803 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3804 break;
3805
3806 tmp = malloc(size + data_len, M_DEVBUF,
3807 M_WAITOK | M_ZERO);
3808 if (tmp == NULL) {
3809 err = ENOMEM;
3810 goto out;
3811 }
3812 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3813 memcpy(tmp, pnvm_data, size);
3814 memcpy(tmp + size, section->data, data_len);
3815 free(pnvm_data, M_DEVBUF);
3816 pnvm_data = tmp;
3817 size += data_len;
3818 break;
3819 }
3820 case IWX_UCODE_TLV_PNVM_SKU:
3821 /* New PNVM section started, stop parsing. */
3822 goto done;
3823 default:
3824 break;
3825 }
3826
3827 if (roundup(tlv_len, 4) > len)
3828 break;
3829 len -= roundup(tlv_len, 4);
3830 data += roundup(tlv_len, 4);
3831 }
3832 done:
3833 if (!hw_match || size == 0) {
3834 err = ENOENT;
3835 goto out;
3836 }
3837
3838 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3839 if (err) {
3840 printf("%s: could not allocate DMA memory for PNVM\n",
3841 DEVNAME(sc));
3842 err = ENOMEM;
3843 goto out;
3844 }
3845 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3846 iwx_ctxt_info_gen3_set_pnvm(sc);
3847 sc->sc_pnvm_ver = sha1;
3848 out:
3849 free(pnvm_data, M_DEVBUF);
3850 return err;
3851 }
3852
3853 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3854 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3855 {
3856 const struct iwx_ucode_tlv *tlv;
3857
3858 while (len >= sizeof(*tlv)) {
3859 uint32_t tlv_len, tlv_type;
3860
3861 len -= sizeof(*tlv);
3862 tlv = (const void *)data;
3863
3864 tlv_len = le32toh(tlv->length);
3865 tlv_type = le32toh(tlv->type);
3866
3867 if (len < tlv_len || roundup(tlv_len, 4) > len)
3868 return EINVAL;
3869
3870 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3871 const struct iwx_sku_id *sku_id =
3872 (const void *)(data + sizeof(*tlv));
3873
3874 data += sizeof(*tlv) + roundup(tlv_len, 4);
3875 len -= roundup(tlv_len, 4);
3876
3877 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3878 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3879 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3880 iwx_pnvm_handle_section(sc, data, len) == 0)
3881 return 0;
3882 } else {
3883 data += sizeof(*tlv) + roundup(tlv_len, 4);
3884 len -= roundup(tlv_len, 4);
3885 }
3886 }
3887
3888 return ENOENT;
3889 }
3890
3891 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3892 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3893 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3894 {
3895 struct iwx_prph_scratch *prph_scratch;
3896 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3897
3898 prph_scratch = sc->prph_scratch_dma.vaddr;
3899 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3900
3901 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3902 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3903
3904 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3905 }
3906
3907 /*
3908 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3909 * This data apparently contains regulatory information and affects device
3910 * channel configuration.
3911 * The SKU of AX210 devices tells us which PNVM file section is needed.
3912 * Pre-AX210 devices store NVM data onboard.
3913 */
3914 static int
iwx_load_pnvm(struct iwx_softc * sc)3915 iwx_load_pnvm(struct iwx_softc *sc)
3916 {
3917 const int wait_flags = IWX_PNVM_COMPLETE;
3918 int err = 0;
3919 const struct firmware *pnvm;
3920
3921 if (sc->sc_sku_id[0] == 0 &&
3922 sc->sc_sku_id[1] == 0 &&
3923 sc->sc_sku_id[2] == 0)
3924 return 0;
3925
3926 if (sc->sc_pnvm_name) {
3927 if (sc->pnvm_dma.vaddr == NULL) {
3928 IWX_UNLOCK(sc);
3929 pnvm = firmware_get(sc->sc_pnvm_name);
3930 if (pnvm == NULL) {
3931 printf("%s: could not read %s (error %d)\n",
3932 DEVNAME(sc), sc->sc_pnvm_name, err);
3933 IWX_LOCK(sc);
3934 return EINVAL;
3935 }
3936 sc->sc_pnvm = pnvm;
3937
3938 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3939 IWX_LOCK(sc);
3940 if (err && err != ENOENT) {
3941 return EINVAL;
3942 }
3943 } else
3944 iwx_ctxt_info_gen3_set_pnvm(sc);
3945 }
3946
3947 if (!iwx_nic_lock(sc)) {
3948 return EBUSY;
3949 }
3950
3951 /*
3952 * If we don't have a platform NVM file simply ask firmware
3953 * to proceed without it.
3954 */
3955
3956 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3957 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3958
3959 /* Wait for the pnvm complete notification from firmware. */
3960 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3961 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3962 if (err)
3963 break;
3964 }
3965
3966 iwx_nic_unlock(sc);
3967
3968 return err;
3969 }
3970
3971 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)3972 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3973 {
3974 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3975 .valid = htole32(valid_tx_ant),
3976 };
3977
3978 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3979 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3980 }
3981
3982 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)3983 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3984 {
3985 struct iwx_phy_cfg_cmd phy_cfg_cmd;
3986
3987 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3988 phy_cfg_cmd.calib_control.event_trigger =
3989 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3990 phy_cfg_cmd.calib_control.flow_trigger =
3991 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3992
3993 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3994 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3995 }
3996
3997 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)3998 iwx_send_dqa_cmd(struct iwx_softc *sc)
3999 {
4000 struct iwx_dqa_enable_cmd dqa_cmd = {
4001 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4002 };
4003 uint32_t cmd_id;
4004
4005 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4006 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4007 }
4008
4009 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4010 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4011 {
4012 int err;
4013
4014 IWX_UNLOCK(sc);
4015 err = iwx_read_firmware(sc);
4016 IWX_LOCK(sc);
4017 if (err)
4018 return err;
4019
4020 err = iwx_start_fw(sc);
4021 if (err)
4022 return err;
4023
4024 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4025 err = iwx_load_pnvm(sc);
4026 if (err)
4027 return err;
4028 }
4029
4030 iwx_post_alive(sc);
4031
4032 return 0;
4033 }
4034
4035 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4036 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4037 {
4038 const int wait_flags = IWX_INIT_COMPLETE;
4039 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4040 struct iwx_init_extended_cfg_cmd init_cfg = {
4041 .init_flags = htole32(IWX_INIT_NVM),
4042 };
4043
4044 int err;
4045
4046 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4047 printf("%s: radio is disabled by hardware switch\n",
4048 DEVNAME(sc));
4049 return EPERM;
4050 }
4051
4052 sc->sc_init_complete = 0;
4053 err = iwx_load_ucode_wait_alive(sc);
4054 if (err) {
4055 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4056 "%s: failed to load init firmware\n", DEVNAME(sc));
4057 return err;
4058 } else {
4059 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4060 "%s: successfully loaded init firmware\n", __func__);
4061 }
4062
4063 /*
4064 * Send init config command to mark that we are sending NVM
4065 * access commands
4066 */
4067 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4068 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4069 if (err) {
4070 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4071 err);
4072 return err;
4073 }
4074
4075 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4076 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4077 if (err) {
4078 return err;
4079 }
4080
4081 /* Wait for the init complete notification from the firmware. */
4082 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4083 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4084 if (err) {
4085 DPRINTF(("%s: will return err=%d\n", __func__, err));
4086 return err;
4087 } else {
4088 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4089 __func__));
4090 }
4091 }
4092
4093 if (readnvm) {
4094 err = iwx_nvm_get(sc);
4095 DPRINTF(("%s: err=%d\n", __func__, err));
4096 if (err) {
4097 printf("%s: failed to read nvm (error %d)\n",
4098 DEVNAME(sc), err);
4099 return err;
4100 } else {
4101 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4102 }
4103 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4104 }
4105 return 0;
4106 }
4107
4108 static int
iwx_config_ltr(struct iwx_softc * sc)4109 iwx_config_ltr(struct iwx_softc *sc)
4110 {
4111 struct iwx_ltr_config_cmd cmd = {
4112 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4113 };
4114
4115 if (!sc->sc_ltr_enabled)
4116 return 0;
4117
4118 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4119 }
4120
4121 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4122 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4123 bus_dma_segment_t *seg)
4124 {
4125 struct iwx_rx_data *data = &ring->data[idx];
4126
4127 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4128 struct iwx_rx_transfer_desc *desc = ring->desc;
4129 desc[idx].rbid = htole16(idx & 0xffff);
4130 desc[idx].addr = htole64((*seg).ds_addr);
4131 bus_dmamap_sync(ring->data_dmat, data->map,
4132 BUS_DMASYNC_PREWRITE);
4133 } else {
4134 ((uint64_t *)ring->desc)[idx] =
4135 htole64((*seg).ds_addr);
4136 bus_dmamap_sync(ring->data_dmat, data->map,
4137 BUS_DMASYNC_PREWRITE);
4138 }
4139 }
4140
4141 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4142 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4143 {
4144 struct iwx_rx_ring *ring = &sc->rxq;
4145 struct iwx_rx_data *data = &ring->data[idx];
4146 struct mbuf *m;
4147 int err;
4148 int fatal = 0;
4149 bus_dma_segment_t seg;
4150 int nsegs;
4151
4152 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4153 if (m == NULL)
4154 return ENOBUFS;
4155
4156 if (data->m != NULL) {
4157 bus_dmamap_unload(ring->data_dmat, data->map);
4158 fatal = 1;
4159 }
4160
4161 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4162 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4163 &nsegs, BUS_DMA_NOWAIT);
4164 if (err) {
4165 /* XXX */
4166 if (fatal)
4167 panic("could not load RX mbuf");
4168 m_freem(m);
4169 return err;
4170 }
4171 data->m = m;
4172 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4173
4174 /* Update RX descriptor. */
4175 iwx_update_rx_desc(sc, ring, idx, &seg);
4176 return 0;
4177 }
4178
4179 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4180 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4181 struct iwx_rx_mpdu_desc *desc)
4182 {
4183 int energy_a, energy_b;
4184
4185 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4186 energy_a = desc->v3.energy_a;
4187 energy_b = desc->v3.energy_b;
4188 } else {
4189 energy_a = desc->v1.energy_a;
4190 energy_b = desc->v1.energy_b;
4191 }
4192 energy_a = energy_a ? -energy_a : -256;
4193 energy_b = energy_b ? -energy_b : -256;
4194 return MAX(energy_a, energy_b);
4195 }
4196
4197 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4198 iwx_rxmq_get_chains(struct iwx_softc *sc,
4199 struct iwx_rx_mpdu_desc *desc)
4200 {
4201
4202 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4203 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4204 IWX_RATE_MCS_ANT_POS);
4205 else
4206 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4207 IWX_RATE_MCS_ANT_POS);
4208 }
4209
4210 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4211 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4212 struct iwx_rx_data *data)
4213 {
4214 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4215 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4216 int qid = cmd_hdr->qid;
4217 struct iwx_tx_ring *ring = &sc->txq[qid];
4218
4219 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4220 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4221 }
4222
4223 /*
4224 * Retrieve the average noise (in dBm) among receivers.
4225 */
4226 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4227 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4228 {
4229 int i, total, nbant, noise;
4230
4231 total = nbant = noise = 0;
4232 for (i = 0; i < 3; i++) {
4233 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4234 if (noise) {
4235 total += noise;
4236 nbant++;
4237 }
4238 }
4239
4240 /* There should be at least one antenna but check anyway. */
4241 return (nbant == 0) ? -127 : (total / nbant) - 107;
4242 }
4243
4244 #if 0
4245 int
4246 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4247 struct ieee80211_rxinfo *rxi)
4248 {
4249 struct ieee80211com *ic = &sc->sc_ic;
4250 struct ieee80211_key *k;
4251 struct ieee80211_frame *wh;
4252 uint64_t pn, *prsc;
4253 uint8_t *ivp;
4254 uint8_t tid;
4255 int hdrlen, hasqos;
4256
4257 wh = mtod(m, struct ieee80211_frame *);
4258 hdrlen = ieee80211_get_hdrlen(wh);
4259 ivp = (uint8_t *)wh + hdrlen;
4260
4261 /* find key for decryption */
4262 k = ieee80211_get_rxkey(ic, m, ni);
4263 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4264 return 1;
4265
4266 /* Check that ExtIV bit is be set. */
4267 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4268 return 1;
4269
4270 hasqos = ieee80211_has_qos(wh);
4271 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4272 prsc = &k->k_rsc[tid];
4273
4274 /* Extract the 48-bit PN from the CCMP header. */
4275 pn = (uint64_t)ivp[0] |
4276 (uint64_t)ivp[1] << 8 |
4277 (uint64_t)ivp[4] << 16 |
4278 (uint64_t)ivp[5] << 24 |
4279 (uint64_t)ivp[6] << 32 |
4280 (uint64_t)ivp[7] << 40;
4281 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4282 if (pn < *prsc) {
4283 ic->ic_stats.is_ccmp_replays++;
4284 return 1;
4285 }
4286 } else if (pn <= *prsc) {
4287 ic->ic_stats.is_ccmp_replays++;
4288 return 1;
4289 }
4290 /* Last seen packet number is updated in ieee80211_inputm(). */
4291
4292 /*
4293 * Some firmware versions strip the MIC, and some don't. It is not
4294 * clear which of the capability flags could tell us what to expect.
4295 * For now, keep things simple and just leave the MIC in place if
4296 * it is present.
4297 *
4298 * The IV will be stripped by ieee80211_inputm().
4299 */
4300 return 0;
4301 }
4302 #endif
4303
4304 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4305 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4306 {
4307 struct ieee80211_frame *wh;
4308 int ret = 0;
4309 uint8_t type, subtype;
4310
4311 wh = mtod(m, struct ieee80211_frame *);
4312
4313 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4314 if (type == IEEE80211_FC0_TYPE_CTL) {
4315 return 0;
4316 }
4317
4318 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4319 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4320 return 0;
4321 }
4322
4323
4324 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4325 IEEE80211_FC0_TYPE_CTL)
4326 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4327 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4328 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4329 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4330 ret = 1;
4331 goto out;
4332 }
4333 /* Check whether decryption was successful or not. */
4334 if ((rx_pkt_status &
4335 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4336 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4337 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4338 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4339 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4340 ret = 1;
4341 goto out;
4342 }
4343 }
4344 out:
4345 return ret;
4346 }
4347
4348 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4349 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4350 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4351 uint32_t device_timestamp, uint8_t rssi)
4352 {
4353 struct ieee80211com *ic = &sc->sc_ic;
4354 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4355 struct ieee80211_frame *wh;
4356 struct ieee80211_node *ni;
4357
4358 /*
4359 * We need to turn the hardware provided channel index into a channel
4360 * and then find it in our ic_channels array
4361 */
4362 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4363 /*
4364 * OpenBSD points this at the ibss chan, which it defaults to
4365 * channel 1 and then never touches again. Skip a step.
4366 */
4367 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4368 chanidx = 1;
4369 }
4370
4371 int channel = chanidx;
4372 for (int i = 0; i < ic->ic_nchans; i++) {
4373 if (ic->ic_channels[i].ic_ieee == channel) {
4374 chanidx = i;
4375 }
4376 }
4377 ic->ic_curchan = &ic->ic_channels[chanidx];
4378
4379 wh = mtod(m, struct ieee80211_frame *);
4380 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4381
4382 #if 0 /* XXX hw decrypt */
4383 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4384 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4385 m_freem(m);
4386 ieee80211_release_node(ic, ni);
4387 return;
4388 }
4389 #endif
4390 if (ieee80211_radiotap_active_vap(vap)) {
4391 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4392 uint16_t chan_flags;
4393 int have_legacy_rate = 1;
4394 uint8_t mcs, rate;
4395
4396 tap->wr_flags = 0;
4397 if (is_shortpre)
4398 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4399 tap->wr_chan_freq =
4400 htole16(ic->ic_channels[chanidx].ic_freq);
4401 chan_flags = ic->ic_channels[chanidx].ic_flags;
4402 #if 0
4403 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4404 ic->ic_curmode != IEEE80211_MODE_11AC) {
4405 chan_flags &= ~IEEE80211_CHAN_HT;
4406 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4407 }
4408 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4409 chan_flags &= ~IEEE80211_CHAN_VHT;
4410 #else
4411 chan_flags &= ~IEEE80211_CHAN_HT;
4412 #endif
4413 tap->wr_chan_flags = htole16(chan_flags);
4414 tap->wr_dbm_antsignal = rssi;
4415 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4416 tap->wr_tsft = device_timestamp;
4417
4418 if (sc->sc_rate_n_flags_version >= 2) {
4419 uint32_t mod_type = (rate_n_flags &
4420 IWX_RATE_MCS_MOD_TYPE_MSK);
4421 const struct ieee80211_rateset *rs = NULL;
4422 uint32_t ridx;
4423 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4424 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4425 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4426 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4427 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4428 rs = &ieee80211_std_rateset_11b;
4429 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4430 rs = &ieee80211_std_rateset_11a;
4431 if (rs && ridx < rs->rs_nrates) {
4432 rate = (rs->rs_rates[ridx] &
4433 IEEE80211_RATE_VAL);
4434 } else
4435 rate = 0;
4436 } else {
4437 have_legacy_rate = ((rate_n_flags &
4438 (IWX_RATE_MCS_HT_MSK_V1 |
4439 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4440 mcs = (rate_n_flags &
4441 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4442 IWX_RATE_HT_MCS_NSS_MSK_V1));
4443 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4444 }
4445 if (!have_legacy_rate) {
4446 tap->wr_rate = (0x80 | mcs);
4447 } else {
4448 switch (rate) {
4449 /* CCK rates. */
4450 case 10: tap->wr_rate = 2; break;
4451 case 20: tap->wr_rate = 4; break;
4452 case 55: tap->wr_rate = 11; break;
4453 case 110: tap->wr_rate = 22; break;
4454 /* OFDM rates. */
4455 case 0xd: tap->wr_rate = 12; break;
4456 case 0xf: tap->wr_rate = 18; break;
4457 case 0x5: tap->wr_rate = 24; break;
4458 case 0x7: tap->wr_rate = 36; break;
4459 case 0x9: tap->wr_rate = 48; break;
4460 case 0xb: tap->wr_rate = 72; break;
4461 case 0x1: tap->wr_rate = 96; break;
4462 case 0x3: tap->wr_rate = 108; break;
4463 /* Unknown rate: should not happen. */
4464 default: tap->wr_rate = 0;
4465 }
4466 // XXX hack - this needs rebased with the new rate stuff anyway
4467 tap->wr_rate = rate;
4468 }
4469 }
4470
4471 IWX_UNLOCK(sc);
4472 if (ni == NULL) {
4473 if (ieee80211_input_mimo_all(ic, m) == -1)
4474 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4475 } else {
4476
4477 if (ieee80211_input_mimo(ni, m) == -1)
4478 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4479 ieee80211_free_node(ni);
4480 }
4481 IWX_LOCK(sc);
4482 }
4483
4484 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4485 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4486 size_t maxlen)
4487 {
4488 struct ieee80211com *ic = &sc->sc_ic;
4489 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4490 struct ieee80211_node *ni = vap->iv_bss;
4491 struct ieee80211_key *k;
4492 struct ieee80211_rx_stats rxs;
4493 struct iwx_rx_mpdu_desc *desc;
4494 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4495 int rssi;
4496 uint8_t chanidx;
4497 uint16_t phy_info;
4498 size_t desc_size;
4499 int pad = 0;
4500
4501 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4502 desc_size = sizeof(*desc);
4503 else
4504 desc_size = IWX_RX_DESC_SIZE_V1;
4505
4506 if (maxlen < desc_size) {
4507 m_freem(m);
4508 return; /* drop */
4509 }
4510
4511 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4512
4513 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4514 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4515 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4516 m_freem(m);
4517 return; /* drop */
4518 }
4519
4520 len = le16toh(desc->mpdu_len);
4521 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4522 /* Allow control frames in monitor mode. */
4523 if (len < sizeof(struct ieee80211_frame_cts)) {
4524 m_freem(m);
4525 return;
4526 }
4527
4528 } else if (len < sizeof(struct ieee80211_frame)) {
4529 m_freem(m);
4530 return;
4531 }
4532 if (len > maxlen - desc_size) {
4533 m_freem(m);
4534 return;
4535 }
4536
4537 // TODO: arithmetic on a pointer to void is a GNU extension
4538 m->m_data = (char *)pktdata + desc_size;
4539 m->m_pkthdr.len = m->m_len = len;
4540
4541 /* Account for padding following the frame header. */
4542 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4543 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4544 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4545 if (type == IEEE80211_FC0_TYPE_CTL) {
4546 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4547 case IEEE80211_FC0_SUBTYPE_CTS:
4548 hdrlen = sizeof(struct ieee80211_frame_cts);
4549 break;
4550 case IEEE80211_FC0_SUBTYPE_ACK:
4551 hdrlen = sizeof(struct ieee80211_frame_ack);
4552 break;
4553 default:
4554 hdrlen = sizeof(struct ieee80211_frame_min);
4555 break;
4556 }
4557 } else
4558 hdrlen = ieee80211_hdrsize(wh);
4559
4560 if ((le16toh(desc->status) &
4561 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4562 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4563 // CCMP header length
4564 hdrlen += 8;
4565 }
4566
4567 memmove(m->m_data + 2, m->m_data, hdrlen);
4568 m_adj(m, 2);
4569
4570 }
4571
4572 if ((le16toh(desc->status) &
4573 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4574 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4575 pad = 1;
4576 }
4577
4578 // /*
4579 // * Hardware de-aggregates A-MSDUs and copies the same MAC header
4580 // * in place for each subframe. But it leaves the 'A-MSDU present'
4581 // * bit set in the frame header. We need to clear this bit ourselves.
4582 // * (XXX This workaround is not required on AX200/AX201 devices that
4583 // * have been tested by me, but it's unclear when this problem was
4584 // * fixed in the hardware. It definitely affects the 9k generation.
4585 // * Leaving this in place for now since some 9k/AX200 hybrids seem
4586 // * to exist that we may eventually add support for.)
4587 // *
4588 // * And we must allow the same CCMP PN for subframes following the
4589 // * first subframe. Otherwise they would be discarded as replays.
4590 // */
4591 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4592 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4593 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4594 // uint8_t subframe_idx = (desc->amsdu_info &
4595 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4596 // if (subframe_idx > 0)
4597 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4598 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4599 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4600 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4601 // struct ieee80211_qosframe_addr4 *);
4602 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4603 // } else if (ieee80211_has_qos(wh) &&
4604 // m->m_len >= sizeof(struct ieee80211_qosframe)) {
4605 // struct ieee80211_qosframe *qwh = mtod(m,
4606 // struct ieee80211_qosframe *);
4607 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4608 // }
4609 }
4610
4611 /*
4612 * Verify decryption before duplicate detection. The latter uses
4613 * the TID supplied in QoS frame headers and this TID is implicitly
4614 * verified as part of the CCMP nonce.
4615 */
4616 k = ieee80211_crypto_get_txkey(ni, m);
4617 if (k != NULL &&
4618 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4619 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4620 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4621 m_freem(m);
4622 return;
4623 }
4624
4625 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4626 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4627 chanidx = desc->v3.channel;
4628 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4629 } else {
4630 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4631 chanidx = desc->v1.channel;
4632 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4633 }
4634
4635 phy_info = le16toh(desc->phy_info);
4636
4637 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4638 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4639 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4640
4641 memset(&rxs, 0, sizeof(rxs));
4642 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4643 rxs.r_flags |= IEEE80211_R_BAND;
4644 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4645 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4646
4647 rxs.c_ieee = chanidx;
4648 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4649 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4650 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4651 rxs.c_rx_tsf = device_timestamp;
4652 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4653 if (rxs.c_chain != 0)
4654 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4655
4656 /* rssi is in 1/2db units */
4657 rxs.c_rssi = rssi * 2;
4658 rxs.c_nf = sc->sc_noise;
4659
4660 if (pad) {
4661 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4662 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4663 }
4664
4665 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4666 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4667 return;
4668 }
4669
4670 ieee80211_add_rx_params(m, &rxs);
4671
4672 #if 0
4673 if (iwx_rx_reorder(sc, m, chanidx, desc,
4674 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4675 rate_n_flags, device_timestamp, &rxi, ml))
4676 return;
4677 #endif
4678
4679 if (pad) {
4680 #define TRIM 8
4681 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4682 hdrlen = ieee80211_hdrsize(wh);
4683 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4684 m_adj(m, TRIM);
4685 #undef TRIM
4686 }
4687
4688 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4689 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4690 rate_n_flags, device_timestamp, rssi);
4691 }
4692
4693 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4694 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4695 {
4696 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4697 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4698 int i;
4699
4700 /* First TB is never cleared - it is bidirectional DMA data. */
4701 for (i = 1; i < num_tbs; i++) {
4702 struct iwx_tfh_tb *tb = &desc->tbs[i];
4703 memset(tb, 0, sizeof(*tb));
4704 }
4705 desc->num_tbs = htole16(1);
4706
4707 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4708 BUS_DMASYNC_PREWRITE);
4709 }
4710
4711 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4712 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4713 struct iwx_tx_data *txd)
4714 {
4715 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4716 bus_dmamap_unload(ring->data_dmat, txd->map);
4717
4718 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4719 txd->m = NULL;
4720 txd->in = NULL;
4721 }
4722
4723 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4724 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4725 {
4726 struct iwx_tx_data *txd;
4727
4728 while (ring->tail_hw != idx) {
4729 txd = &ring->data[ring->tail];
4730 if (txd->m != NULL) {
4731 iwx_clear_tx_desc(sc, ring, ring->tail);
4732 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4733 iwx_txd_done(sc, ring, txd);
4734 ring->queued--;
4735 if (ring->queued < 0)
4736 panic("caught negative queue count");
4737 }
4738 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4739 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4740 }
4741 }
4742
4743 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4744 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4745 struct iwx_rx_data *data)
4746 {
4747 struct ieee80211com *ic = &sc->sc_ic;
4748 struct ifnet *ifp = IC2IFP(ic);
4749 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4750 int qid = cmd_hdr->qid, status, txfail;
4751 struct iwx_tx_ring *ring = &sc->txq[qid];
4752 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4753 uint32_t ssn;
4754 uint32_t len = iwx_rx_packet_len(pkt);
4755 int idx = cmd_hdr->idx;
4756 struct iwx_tx_data *txd = &ring->data[idx];
4757 struct mbuf *m = txd->m;
4758
4759 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4760
4761 /* Sanity checks. */
4762 if (sizeof(*tx_resp) > len)
4763 return;
4764 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4765 return;
4766 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4767 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4768 return;
4769
4770 sc->sc_tx_timer[qid] = 0;
4771
4772 if (tx_resp->frame_count > 1) /* A-MPDU */
4773 return;
4774
4775 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4776 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4777 status != IWX_TX_STATUS_DIRECT_DONE);
4778
4779 #ifdef __not_yet__
4780 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4781 ieee80211_tx_complete(&in->in_ni, m, txfail);
4782 #else
4783 if (txfail)
4784 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4785 else {
4786 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4787 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4788 if (m->m_flags & M_MCAST)
4789 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4790 }
4791 #endif
4792 /*
4793 * On hardware supported by iwx(4) the SSN counter corresponds
4794 * to a Tx ring index rather than a sequence number.
4795 * Frames up to this index (non-inclusive) can now be freed.
4796 */
4797 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4798 ssn = le32toh(ssn);
4799 if (ssn < sc->max_tfd_queue_size) {
4800 iwx_txq_advance(sc, ring, ssn);
4801 iwx_clear_oactive(sc, ring);
4802 }
4803 }
4804
4805 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4806 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4807 {
4808 if (ring->queued < iwx_lomark) {
4809 sc->qfullmsk &= ~(1 << ring->qid);
4810 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4811 /*
4812 * Well, we're in interrupt context, but then again
4813 * I guess net80211 does all sorts of stunts in
4814 * interrupt context, so maybe this is no biggie.
4815 */
4816 iwx_start(sc);
4817 }
4818 }
4819 }
4820
4821 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4822 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4823 {
4824 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4825 struct ieee80211com *ic = &sc->sc_ic;
4826 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4827 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4828 struct ieee80211_node *ni = &in->in_ni;
4829 struct iwx_tx_ring *ring;
4830 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4831 int qid;
4832
4833 // if (ic->ic_state != IEEE80211_S_RUN)
4834 // return;
4835
4836 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4837 return;
4838
4839 if (ba_res->sta_id != IWX_STATION_ID)
4840 return;
4841
4842 in = (void *)ni;
4843
4844 tfd_cnt = le16toh(ba_res->tfd_cnt);
4845 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4846 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4847 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4848 sizeof(ba_res->tfd[0]) * tfd_cnt))
4849 return;
4850
4851 for (i = 0; i < tfd_cnt; i++) {
4852 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4853 uint8_t tid;
4854
4855 tid = ba_tfd->tid;
4856 if (tid >= nitems(sc->aggqid))
4857 continue;
4858
4859 qid = sc->aggqid[tid];
4860 if (qid != htole16(ba_tfd->q_num))
4861 continue;
4862
4863 ring = &sc->txq[qid];
4864
4865 #if 0
4866 ba = &ni->ni_tx_ba[tid];
4867 if (ba->ba_state != IEEE80211_BA_AGREED)
4868 continue;
4869 #endif
4870 idx = le16toh(ba_tfd->tfd_index);
4871 sc->sc_tx_timer[qid] = 0;
4872 iwx_txq_advance(sc, ring, idx);
4873 iwx_clear_oactive(sc, ring);
4874 }
4875 }
4876
4877 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4878 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4879 struct iwx_rx_data *data)
4880 {
4881 struct ieee80211com *ic = &sc->sc_ic;
4882 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4883 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4884 uint32_t missed;
4885
4886 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4887 (vap->iv_state != IEEE80211_S_RUN))
4888 return;
4889
4890 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4891 BUS_DMASYNC_POSTREAD);
4892
4893 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4894 if (missed > vap->iv_bmissthreshold) {
4895 ieee80211_beacon_miss(ic);
4896 }
4897
4898 }
4899
4900 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4901 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4902 {
4903 struct iwx_binding_cmd cmd;
4904 struct ieee80211com *ic = &sc->sc_ic;
4905 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4906 struct iwx_vap *ivp = IWX_VAP(vap);
4907 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4908 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4909 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4910 uint32_t status;
4911
4912 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4913 panic("binding already added");
4914 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4915 panic("binding already removed");
4916
4917 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4918 return EINVAL;
4919
4920 memset(&cmd, 0, sizeof(cmd));
4921
4922 cmd.id_and_color
4923 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4924 cmd.action = htole32(action);
4925 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4926
4927 cmd.macs[0] = htole32(mac_id);
4928 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4929 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4930
4931 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4932 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4933 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4934 else
4935 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4936
4937 status = 0;
4938 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4939 &cmd, &status);
4940 if (err == 0 && status != 0)
4941 err = EIO;
4942
4943 return err;
4944 }
4945
4946 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4947 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4948 {
4949 int ctlchan = ieee80211_chan2ieee(ic, chan);
4950 int midpoint = chan->ic_vht_ch_freq1;
4951
4952 /*
4953 * The FW is expected to check the control channel position only
4954 * when in HT/VHT and the channel width is not 20MHz. Return
4955 * this value as the default one:
4956 */
4957 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4958
4959 switch (ctlchan - midpoint) {
4960 case -6:
4961 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4962 break;
4963 case -2:
4964 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4965 break;
4966 case 2:
4967 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4968 break;
4969 case 6:
4970 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4971 break;
4972 default:
4973 break;
4974 }
4975
4976 return pos;
4977 }
4978
4979 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)4980 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4981 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4982 uint8_t vht_chan_width, int cmdver)
4983 {
4984 struct ieee80211com *ic = &sc->sc_ic;
4985 struct iwx_phy_context_cmd_uhb cmd;
4986 uint8_t active_cnt, idle_cnt;
4987 struct ieee80211_channel *chan = ctxt->channel;
4988
4989 memset(&cmd, 0, sizeof(cmd));
4990 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4991 ctxt->color));
4992 cmd.action = htole32(action);
4993
4994 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
4995 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4996 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4997 else
4998 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4999
5000 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5001 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5002 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5003
5004 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5005 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5006 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5007 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5008 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5009 if (IEEE80211_IS_CHAN_HT40D(chan))
5010 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5011 else
5012 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5013 } else {
5014 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5015 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5016 }
5017
5018 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5019 IWX_RLC_CONFIG_CMD) != 2) {
5020 idle_cnt = chains_static;
5021 active_cnt = chains_dynamic;
5022 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5023 IWX_PHY_RX_CHAIN_VALID_POS);
5024 cmd.rxchain_info |= htole32(idle_cnt <<
5025 IWX_PHY_RX_CHAIN_CNT_POS);
5026 cmd.rxchain_info |= htole32(active_cnt <<
5027 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5028 }
5029
5030 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5031 }
5032
5033 #if 0
5034 int
5035 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5036 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5037 uint8_t vht_chan_width, int cmdver)
5038 {
5039 struct ieee80211com *ic = &sc->sc_ic;
5040 struct iwx_phy_context_cmd cmd;
5041 uint8_t active_cnt, idle_cnt;
5042 struct ieee80211_channel *chan = ctxt->channel;
5043
5044 memset(&cmd, 0, sizeof(cmd));
5045 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5046 ctxt->color));
5047 cmd.action = htole32(action);
5048
5049 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5050 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5051 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5052 else
5053 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5054
5055 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5056 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5057 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5058 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5059 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5060 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5061 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5062 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5063 /* secondary chan above -> control chan below */
5064 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5065 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5066 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5067 /* secondary chan below -> control chan above */
5068 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5069 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5070 } else {
5071 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5072 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5073 }
5074 } else {
5075 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5076 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5077 }
5078
5079 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5080 IWX_RLC_CONFIG_CMD) != 2) {
5081 idle_cnt = chains_static;
5082 active_cnt = chains_dynamic;
5083 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5084 IWX_PHY_RX_CHAIN_VALID_POS);
5085 cmd.rxchain_info |= htole32(idle_cnt <<
5086 IWX_PHY_RX_CHAIN_CNT_POS);
5087 cmd.rxchain_info |= htole32(active_cnt <<
5088 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5089 }
5090
5091 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5092 }
5093 #endif
5094
5095 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5096 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5097 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5098 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5099 {
5100 int cmdver;
5101
5102 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5103 if (cmdver != 3 && cmdver != 4) {
5104 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5105 DEVNAME(sc));
5106 return ENOTSUP;
5107 }
5108
5109 /*
5110 * Intel increased the size of the fw_channel_info struct and neglected
5111 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5112 * member in the middle.
5113 * To keep things simple we use a separate function to handle the larger
5114 * variant of the phy context command.
5115 */
5116 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5117 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5118 chains_dynamic, action, sco, vht_chan_width, cmdver);
5119 } else
5120 panic("Unsupported old hardware contact thj@");
5121
5122 #if 0
5123 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5124 action, sco, vht_chan_width, cmdver);
5125 #endif
5126 }
5127
5128 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5129 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5130 {
5131 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5132 struct iwx_tfh_tfd *desc;
5133 struct iwx_tx_data *txdata;
5134 struct iwx_device_cmd *cmd;
5135 struct mbuf *m;
5136 bus_addr_t paddr;
5137 uint64_t addr;
5138 int err = 0, i, paylen, off/*, s*/;
5139 int idx, code, async, group_id;
5140 size_t hdrlen, datasz;
5141 uint8_t *data;
5142 int generation = sc->sc_generation;
5143 bus_dma_segment_t seg[10];
5144 int nsegs;
5145
5146 code = hcmd->id;
5147 async = hcmd->flags & IWX_CMD_ASYNC;
5148 idx = ring->cur;
5149
5150 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5151 paylen += hcmd->len[i];
5152 }
5153
5154 /* If this command waits for a response, allocate response buffer. */
5155 hcmd->resp_pkt = NULL;
5156 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5157 uint8_t *resp_buf;
5158 KASSERT(!async, ("async command want response"));
5159 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5160 ("wrong pkt len 1"));
5161 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5162 ("wrong pkt len 2"));
5163 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5164 return ENOSPC;
5165 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5166 M_NOWAIT | M_ZERO);
5167 if (resp_buf == NULL)
5168 return ENOMEM;
5169 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5170 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5171 } else {
5172 sc->sc_cmd_resp_pkt[idx] = NULL;
5173 }
5174
5175 desc = &ring->desc[idx];
5176 txdata = &ring->data[idx];
5177
5178 /*
5179 * XXX Intel inside (tm)
5180 * Firmware API versions >= 50 reject old-style commands in
5181 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5182 * that such commands were in the LONG_GROUP instead in order
5183 * for firmware to accept them.
5184 */
5185 if (iwx_cmd_groupid(code) == 0) {
5186 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5187 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5188 } else
5189 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5190
5191 group_id = iwx_cmd_groupid(code);
5192
5193 hdrlen = sizeof(cmd->hdr_wide);
5194 datasz = sizeof(cmd->data_wide);
5195
5196 if (paylen > datasz) {
5197 /* Command is too large to fit in pre-allocated space. */
5198 size_t totlen = hdrlen + paylen;
5199 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5200 printf("%s: firmware command too long (%zd bytes)\n",
5201 DEVNAME(sc), totlen);
5202 err = EINVAL;
5203 goto out;
5204 }
5205 if (totlen > IWX_RBUF_SIZE)
5206 panic("totlen > IWX_RBUF_SIZE");
5207 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5208 if (m == NULL) {
5209 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5210 DEVNAME(sc), IWX_RBUF_SIZE);
5211 err = ENOMEM;
5212 goto out;
5213 }
5214 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5215 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5216 seg, &nsegs, BUS_DMA_NOWAIT);
5217 if (nsegs > 20)
5218 panic("nsegs > 20");
5219 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5220 if (err) {
5221 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5222 DEVNAME(sc), totlen);
5223 m_freem(m);
5224 goto out;
5225 }
5226 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5227 cmd = mtod(m, struct iwx_device_cmd *);
5228 paddr = seg[0].ds_addr;
5229 } else {
5230 cmd = &ring->cmd[idx];
5231 paddr = txdata->cmd_paddr;
5232 }
5233
5234 memset(cmd, 0, sizeof(*cmd));
5235 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5236 cmd->hdr_wide.group_id = group_id;
5237 cmd->hdr_wide.qid = ring->qid;
5238 cmd->hdr_wide.idx = idx;
5239 cmd->hdr_wide.length = htole16(paylen);
5240 cmd->hdr_wide.version = iwx_cmd_version(code);
5241 data = cmd->data_wide;
5242
5243 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5244 if (hcmd->len[i] == 0)
5245 continue;
5246 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5247 off += hcmd->len[i];
5248 }
5249 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5250
5251 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5252 addr = htole64(paddr);
5253 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5254 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5255 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5256 paylen));
5257 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5258 IWX_FIRST_TB_SIZE);
5259 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5260 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5261 desc->num_tbs = htole16(2);
5262 } else
5263 desc->num_tbs = htole16(1);
5264
5265 if (paylen > datasz) {
5266 bus_dmamap_sync(ring->data_dmat, txdata->map,
5267 BUS_DMASYNC_PREWRITE);
5268 } else {
5269 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5270 BUS_DMASYNC_PREWRITE);
5271 }
5272 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5273 BUS_DMASYNC_PREWRITE);
5274
5275 /* Kick command ring. */
5276 ring->queued++;
5277 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5278 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5279 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5280 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5281
5282 if (!async) {
5283 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5284 if (err == 0) {
5285 /* if hardware is no longer up, return error */
5286 if (generation != sc->sc_generation) {
5287 err = ENXIO;
5288 goto out;
5289 }
5290
5291 /* Response buffer will be freed in iwx_free_resp(). */
5292 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5293 sc->sc_cmd_resp_pkt[idx] = NULL;
5294 } else if (generation == sc->sc_generation) {
5295 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5296 sc->sc_cmd_resp_pkt[idx] = NULL;
5297 }
5298 }
5299 out:
5300 return err;
5301 }
5302
5303 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5304 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5305 uint16_t len, const void *data)
5306 {
5307 struct iwx_host_cmd cmd = {
5308 .id = id,
5309 .len = { len, },
5310 .data = { data, },
5311 .flags = flags,
5312 };
5313
5314 return iwx_send_cmd(sc, &cmd);
5315 }
5316
5317 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5318 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5319 uint32_t *status)
5320 {
5321 struct iwx_rx_packet *pkt;
5322 struct iwx_cmd_response *resp;
5323 int err, resp_len;
5324
5325 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5326 cmd->flags |= IWX_CMD_WANT_RESP;
5327 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5328
5329 err = iwx_send_cmd(sc, cmd);
5330 if (err)
5331 return err;
5332
5333 pkt = cmd->resp_pkt;
5334 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5335 return EIO;
5336
5337 resp_len = iwx_rx_packet_payload_len(pkt);
5338 if (resp_len != sizeof(*resp)) {
5339 iwx_free_resp(sc, cmd);
5340 return EIO;
5341 }
5342
5343 resp = (void *)pkt->data;
5344 *status = le32toh(resp->status);
5345 iwx_free_resp(sc, cmd);
5346 return err;
5347 }
5348
5349 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5350 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5351 const void *data, uint32_t *status)
5352 {
5353 struct iwx_host_cmd cmd = {
5354 .id = id,
5355 .len = { len, },
5356 .data = { data, },
5357 };
5358
5359 return iwx_send_cmd_status(sc, &cmd, status);
5360 }
5361
5362 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5363 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5364 {
5365 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5366 ("hcmd flags !IWX_CMD_WANT_RESP"));
5367 free(hcmd->resp_pkt, M_DEVBUF);
5368 hcmd->resp_pkt = NULL;
5369 }
5370
5371 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5372 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5373 {
5374 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5375 struct iwx_tx_data *data;
5376
5377 if (qid != IWX_DQA_CMD_QUEUE) {
5378 return; /* Not a command ack. */
5379 }
5380
5381 data = &ring->data[idx];
5382
5383 if (data->m != NULL) {
5384 bus_dmamap_sync(ring->data_dmat, data->map,
5385 BUS_DMASYNC_POSTWRITE);
5386 bus_dmamap_unload(ring->data_dmat, data->map);
5387 m_freem(data->m);
5388 data->m = NULL;
5389 }
5390 wakeup(&ring->desc[idx]);
5391
5392 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5393 if (ring->queued == 0) {
5394 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5395 DEVNAME(sc), code));
5396 } else if (ring->queued > 0)
5397 ring->queued--;
5398 }
5399
5400 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5401 iwx_fw_rateidx_ofdm(uint8_t rval)
5402 {
5403 /* Firmware expects indices which match our 11a rate set. */
5404 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5405 int i;
5406
5407 for (i = 0; i < rs->rs_nrates; i++) {
5408 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5409 return i;
5410 }
5411
5412 return 0;
5413 }
5414
5415 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5416 iwx_fw_rateidx_cck(uint8_t rval)
5417 {
5418 /* Firmware expects indices which match our 11b rate set. */
5419 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5420 int i;
5421
5422 for (i = 0; i < rs->rs_nrates; i++) {
5423 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5424 return i;
5425 }
5426
5427 return 0;
5428 }
5429
5430 static int
iwx_min_basic_rate(struct ieee80211com * ic)5431 iwx_min_basic_rate(struct ieee80211com *ic)
5432 {
5433 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5434 struct ieee80211_node *ni = vap->iv_bss;
5435 struct ieee80211_rateset *rs = &ni->ni_rates;
5436 struct ieee80211_channel *c = ni->ni_chan;
5437 int i, min, rval;
5438
5439 min = -1;
5440
5441 if (c == IEEE80211_CHAN_ANYC) {
5442 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5443 return -1;
5444 }
5445
5446 for (i = 0; i < rs->rs_nrates; i++) {
5447 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5448 continue;
5449 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5450 if (min == -1)
5451 min = rval;
5452 else if (rval < min)
5453 min = rval;
5454 }
5455
5456 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5457 if (min == -1)
5458 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5459
5460 return min;
5461 }
5462
5463 /*
5464 * Determine the Tx command flags and Tx rate+flags to use.
5465 * Return the selected Tx rate.
5466 */
5467 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5468 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5469 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5470 struct mbuf *m)
5471 {
5472 struct ieee80211com *ic = &sc->sc_ic;
5473 struct ieee80211_node *ni = &in->in_ni;
5474 struct ieee80211_rateset *rs = &ni->ni_rates;
5475 const struct iwx_rate *rinfo = NULL;
5476 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5477 int ridx = iwx_min_basic_rate(ic);
5478 int min_ridx, rate_flags;
5479 uint8_t rval;
5480
5481 /* We're in the process of clearing the node, no channel already */
5482 if (ridx == -1)
5483 return NULL;
5484
5485 min_ridx = iwx_rval2ridx(ridx);
5486
5487 *flags = 0;
5488
5489 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5490 type != IEEE80211_FC0_TYPE_DATA) {
5491 /* for non-data, use the lowest supported rate */
5492 ridx = min_ridx;
5493 *flags |= IWX_TX_FLAGS_CMD_RATE;
5494 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5495 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5496 & ~IEEE80211_RATE_MCS];
5497 } else {
5498 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5499 & IEEE80211_RATE_VAL);
5500 ridx = iwx_rval2ridx(rval);
5501 if (ridx < min_ridx)
5502 ridx = min_ridx;
5503 }
5504
5505 if (m->m_flags & M_EAPOL)
5506 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5507
5508 rinfo = &iwx_rates[ridx];
5509
5510 /*
5511 * Do not fill rate_n_flags if firmware controls the Tx rate.
5512 * For data frames we rely on Tx rate scaling in firmware by default.
5513 */
5514 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5515 *rate_n_flags = 0;
5516 return rinfo;
5517 }
5518
5519 /*
5520 * Forcing a CCK/OFDM legacy rate is important for management frames.
5521 * Association will only succeed if we do this correctly.
5522 */
5523
5524 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5525 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5526 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5527 if (IWX_RIDX_IS_CCK(ridx)) {
5528 if (sc->sc_rate_n_flags_version >= 2)
5529 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5530 else
5531 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5532 } else if (sc->sc_rate_n_flags_version >= 2)
5533 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5534
5535 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5536 & IEEE80211_RATE_VAL);
5537 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5538 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5539
5540 if (sc->sc_rate_n_flags_version >= 2) {
5541 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5542 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5543 IWX_RATE_LEGACY_RATE_MSK);
5544 } else {
5545 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5546 IWX_RATE_LEGACY_RATE_MSK);
5547 }
5548 } else
5549 rate_flags |= rinfo->plcp;
5550
5551 *rate_n_flags = rate_flags;
5552 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5553 __func__, __LINE__,*flags);
5554 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5555 __func__, __LINE__, *rate_n_flags);
5556
5557 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5558 print_ratenflags(__func__, __LINE__,
5559 *rate_n_flags, sc->sc_rate_n_flags_version);
5560
5561 return rinfo;
5562 }
5563
5564 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5565 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5566 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5567 {
5568 uint8_t filled_tfd_size, num_fetch_chunks;
5569 uint16_t len = byte_cnt;
5570 uint16_t bc_ent;
5571
5572 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5573 num_tbs * sizeof(struct iwx_tfh_tb);
5574 /*
5575 * filled_tfd_size contains the number of filled bytes in the TFD.
5576 * Dividing it by 64 will give the number of chunks to fetch
5577 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5578 * If, for example, TFD contains only 3 TBs then 32 bytes
5579 * of the TFD are used, and only one chunk of 64 bytes should
5580 * be fetched
5581 */
5582 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5583
5584 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5585 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5586 /* Starting from AX210, the HW expects bytes */
5587 bc_ent = htole16(len | (num_fetch_chunks << 14));
5588 scd_bc_tbl[idx].tfd_offset = bc_ent;
5589 } else {
5590 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5591 /* Before AX210, the HW expects DW */
5592 len = howmany(len, 4);
5593 bc_ent = htole16(len | (num_fetch_chunks << 12));
5594 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5595 }
5596
5597 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5598 }
5599
5600 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5601 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5602 {
5603 struct ieee80211com *ic = &sc->sc_ic;
5604 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5605 struct iwx_node *in = (void *)ni;
5606 struct iwx_tx_ring *ring;
5607 struct iwx_tx_data *data;
5608 struct iwx_tfh_tfd *desc;
5609 struct iwx_device_cmd *cmd;
5610 struct ieee80211_frame *wh;
5611 struct ieee80211_key *k = NULL;
5612 const struct iwx_rate *rinfo;
5613 uint64_t paddr;
5614 u_int hdrlen;
5615 uint32_t rate_n_flags;
5616 uint16_t num_tbs, flags, offload_assist = 0;
5617 uint8_t type, subtype;
5618 int i, totlen, err, pad, qid;
5619 #define IWM_MAX_SCATTER 20
5620 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5621 int nsegs;
5622 struct mbuf *m1;
5623 size_t txcmd_size;
5624
5625 wh = mtod(m, struct ieee80211_frame *);
5626 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5627 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5628 hdrlen = ieee80211_anyhdrsize(wh);
5629
5630 qid = sc->first_data_qid;
5631
5632 /* Put QoS frames on the data queue which maps to their TID. */
5633 if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
5634 uint16_t qos = ieee80211_gettid(wh);
5635 uint8_t tid = qos & IEEE80211_QOS_TID;
5636 #if 0
5637 /*
5638 * XXX-THJ: TODO when we enable ba we need to manage the
5639 * mappings
5640 */
5641 struct ieee80211_tx_ba *ba;
5642 ba = &ni->ni_tx_ba[tid];
5643
5644 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5645 type == IEEE80211_FC0_TYPE_DATA &&
5646 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5647 subtype != IEEE80211_FC0_SUBTYPE_BAR &&
5648 sc->aggqid[tid] != 0 /*&&
5649 ba->ba_state == IEEE80211_BA_AGREED*/) {
5650 qid = sc->aggqid[tid];
5651 #else
5652 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5653 type == IEEE80211_FC0_TYPE_DATA &&
5654 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5655 sc->aggqid[tid] != 0) {
5656 qid = sc->aggqid[tid];
5657 #endif
5658 }
5659 }
5660
5661 ring = &sc->txq[qid];
5662 desc = &ring->desc[ring->cur];
5663 memset(desc, 0, sizeof(*desc));
5664 data = &ring->data[ring->cur];
5665
5666 cmd = &ring->cmd[ring->cur];
5667 cmd->hdr.code = IWX_TX_CMD;
5668 cmd->hdr.flags = 0;
5669 cmd->hdr.qid = ring->qid;
5670 cmd->hdr.idx = ring->cur;
5671
5672 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5673 if (rinfo == NULL)
5674 return EINVAL;
5675
5676 /* Offloaded sequence number assignment */
5677 /* Note: Should be done in firmware on all supported devices */
5678
5679 /* Radiotap */
5680 if (ieee80211_radiotap_active_vap(vap)) {
5681 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5682
5683 tap->wt_flags = 0;
5684 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5685 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5686 tap->wt_rate = rinfo->rate;
5687 if (k != NULL)
5688 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5689 ieee80211_radiotap_tx(vap, m);
5690 }
5691
5692 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
5693 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5694 k = ieee80211_crypto_get_txkey(ni, m);
5695 if (k == NULL) {
5696 printf("%s: k is NULL!\n", __func__);
5697 m_freem(m);
5698 return (ENOBUFS);
5699 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5700 k->wk_keytsc++;
5701 } else {
5702 k->wk_cipher->ic_encap(k, m);
5703
5704 /* 802.11 headers may have moved */
5705 wh = mtod(m, struct ieee80211_frame *);
5706 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5707 }
5708 } else
5709 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5710
5711 totlen = m->m_pkthdr.len;
5712
5713 if (hdrlen & 3) {
5714 /* First segment length must be a multiple of 4. */
5715 pad = 4 - (hdrlen & 3);
5716 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5717 } else
5718 pad = 0;
5719
5720 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5721 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5722 memset(tx, 0, sizeof(*tx));
5723 tx->len = htole16(totlen);
5724 tx->offload_assist = htole32(offload_assist);
5725 tx->flags = htole16(flags);
5726 tx->rate_n_flags = htole32(rate_n_flags);
5727 memcpy(tx->hdr, wh, hdrlen);
5728 txcmd_size = sizeof(*tx);
5729 } else {
5730 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5731 memset(tx, 0, sizeof(*tx));
5732 tx->len = htole16(totlen);
5733 tx->offload_assist = htole16(offload_assist);
5734 tx->flags = htole32(flags);
5735 tx->rate_n_flags = htole32(rate_n_flags);
5736 memcpy(tx->hdr, wh, hdrlen);
5737 txcmd_size = sizeof(*tx);
5738 }
5739
5740 /* Trim 802.11 header. */
5741 m_adj(m, hdrlen);
5742
5743 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5744 &nsegs, BUS_DMA_NOWAIT);
5745 if (err && err != EFBIG) {
5746 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5747 m_freem(m);
5748 return err;
5749 }
5750 if (err) {
5751 /* Too many DMA segments, linearize mbuf. */
5752 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5753 if (m1 == NULL) {
5754 printf("%s: could not defrag mbufs\n", __func__);
5755 m_freem(m);
5756 return (ENOBUFS);
5757 }
5758 m = m1;
5759 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5760 segs, &nsegs, BUS_DMA_NOWAIT);
5761 if (err) {
5762 printf("%s: can't map mbuf (error %d)\n", __func__,
5763 err);
5764 m_freem(m);
5765 return (err);
5766 }
5767 }
5768 data->m = m;
5769 data->in = in;
5770
5771 /* Fill TX descriptor. */
5772 num_tbs = 2 + nsegs;
5773 desc->num_tbs = htole16(num_tbs);
5774
5775 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5776 paddr = htole64(data->cmd_paddr);
5777 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5778 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5779 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5780 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5781 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5782 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5783 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5784
5785 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5786 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5787
5788 /* Other DMA segments are for data payload. */
5789 for (i = 0; i < nsegs; i++) {
5790 seg = &segs[i];
5791 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5792 paddr = htole64(seg->ds_addr);
5793 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5794 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5795 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5796 }
5797
5798 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5799 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5800 BUS_DMASYNC_PREWRITE);
5801 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5802 BUS_DMASYNC_PREWRITE);
5803
5804 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5805
5806 /* Kick TX ring. */
5807 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5808 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5809 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5810
5811 /* Mark TX ring as full if we reach a certain threshold. */
5812 if (++ring->queued > iwx_himark) {
5813 sc->qfullmsk |= 1 << ring->qid;
5814 }
5815
5816 sc->sc_tx_timer[ring->qid] = 15;
5817
5818 return 0;
5819 }
5820
5821 static int
5822 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5823 {
5824 struct iwx_rx_packet *pkt;
5825 struct iwx_tx_path_flush_cmd_rsp *resp;
5826 struct iwx_tx_path_flush_cmd flush_cmd = {
5827 .sta_id = htole32(sta_id),
5828 .tid_mask = htole16(tids),
5829 };
5830 struct iwx_host_cmd hcmd = {
5831 .id = IWX_TXPATH_FLUSH,
5832 .len = { sizeof(flush_cmd), },
5833 .data = { &flush_cmd, },
5834 .flags = IWX_CMD_WANT_RESP,
5835 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5836 };
5837 int err, resp_len, i, num_flushed_queues;
5838
5839 err = iwx_send_cmd(sc, &hcmd);
5840 if (err)
5841 return err;
5842
5843 pkt = hcmd.resp_pkt;
5844 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5845 err = EIO;
5846 goto out;
5847 }
5848
5849 resp_len = iwx_rx_packet_payload_len(pkt);
5850 /* Some firmware versions don't provide a response. */
5851 if (resp_len == 0)
5852 goto out;
5853 else if (resp_len != sizeof(*resp)) {
5854 err = EIO;
5855 goto out;
5856 }
5857
5858 resp = (void *)pkt->data;
5859
5860 if (le16toh(resp->sta_id) != sta_id) {
5861 err = EIO;
5862 goto out;
5863 }
5864
5865 num_flushed_queues = le16toh(resp->num_flushed_queues);
5866 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5867 err = EIO;
5868 goto out;
5869 }
5870
5871 for (i = 0; i < num_flushed_queues; i++) {
5872 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5873 uint16_t tid = le16toh(queue_info->tid);
5874 uint16_t read_after = le16toh(queue_info->read_after_flush);
5875 uint16_t qid = le16toh(queue_info->queue_num);
5876 struct iwx_tx_ring *txq;
5877
5878 if (qid >= nitems(sc->txq))
5879 continue;
5880
5881 txq = &sc->txq[qid];
5882 if (tid != txq->tid)
5883 continue;
5884
5885 iwx_txq_advance(sc, txq, read_after);
5886 }
5887 out:
5888 iwx_free_resp(sc, &hcmd);
5889 return err;
5890 }
5891
5892 #define IWX_FLUSH_WAIT_MS 2000
5893
5894 static int
5895 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5896 {
5897 struct iwx_add_sta_cmd cmd;
5898 int err;
5899 uint32_t status;
5900
5901 memset(&cmd, 0, sizeof(cmd));
5902 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5903 in->in_color));
5904 cmd.sta_id = IWX_STATION_ID;
5905 cmd.add_modify = IWX_STA_MODE_MODIFY;
5906 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5907 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5908
5909 status = IWX_ADD_STA_SUCCESS;
5910 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5911 sizeof(cmd), &cmd, &status);
5912 if (err) {
5913 printf("%s: could not update sta (error %d)\n",
5914 DEVNAME(sc), err);
5915 return err;
5916 }
5917
5918 switch (status & IWX_ADD_STA_STATUS_MASK) {
5919 case IWX_ADD_STA_SUCCESS:
5920 break;
5921 default:
5922 err = EIO;
5923 printf("%s: Couldn't %s draining for station\n",
5924 DEVNAME(sc), drain ? "enable" : "disable");
5925 break;
5926 }
5927
5928 return err;
5929 }
5930
5931 static int
5932 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5933 {
5934 int err;
5935
5936 IWX_ASSERT_LOCKED(sc);
5937
5938 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5939
5940 err = iwx_drain_sta(sc, in, 1);
5941 if (err)
5942 goto done;
5943
5944 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5945 if (err) {
5946 printf("%s: could not flush Tx path (error %d)\n",
5947 DEVNAME(sc), err);
5948 goto done;
5949 }
5950
5951 /*
5952 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5953 * fc drive rand has has been replaced in OpenBSD.
5954 */
5955
5956 err = iwx_drain_sta(sc, in, 0);
5957 done:
5958 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5959 return err;
5960 }
5961
5962 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
5963
5964 static int
5965 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5966 struct iwx_beacon_filter_cmd *cmd)
5967 {
5968 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5969 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5970 }
5971
5972 static int
5973 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5974 {
5975 struct iwx_beacon_filter_cmd cmd = {
5976 IWX_BF_CMD_CONFIG_DEFAULTS,
5977 .bf_enable_beacon_filter = htole32(1),
5978 .ba_enable_beacon_abort = htole32(enable),
5979 };
5980
5981 if (!sc->sc_bf.bf_enabled)
5982 return 0;
5983
5984 sc->sc_bf.ba_enabled = enable;
5985 return iwx_beacon_filter_send_cmd(sc, &cmd);
5986 }
5987
5988 static void
5989 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5990 struct iwx_mac_power_cmd *cmd)
5991 {
5992 struct ieee80211com *ic = &sc->sc_ic;
5993 struct ieee80211_node *ni = &in->in_ni;
5994 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5995 int dtim_period, dtim_msec, keep_alive;
5996
5997 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5998 in->in_color));
5999 if (vap->iv_dtim_period)
6000 dtim_period = vap->iv_dtim_period;
6001 else
6002 dtim_period = 1;
6003
6004 /*
6005 * Regardless of power management state the driver must set
6006 * keep alive period. FW will use it for sending keep alive NDPs
6007 * immediately after association. Check that keep alive period
6008 * is at least 3 * DTIM.
6009 */
6010 dtim_msec = dtim_period * ni->ni_intval;
6011 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6012 keep_alive = roundup(keep_alive, 1000) / 1000;
6013 cmd->keep_alive_seconds = htole16(keep_alive);
6014
6015 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6016 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6017 }
6018
6019 static int
6020 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6021 {
6022 int err;
6023 int ba_enable;
6024 struct iwx_mac_power_cmd cmd;
6025
6026 memset(&cmd, 0, sizeof(cmd));
6027
6028 iwx_power_build_cmd(sc, in, &cmd);
6029
6030 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6031 sizeof(cmd), &cmd);
6032 if (err != 0)
6033 return err;
6034
6035 ba_enable = !!(cmd.flags &
6036 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6037 return iwx_update_beacon_abort(sc, in, ba_enable);
6038 }
6039
6040 static int
6041 iwx_power_update_device(struct iwx_softc *sc)
6042 {
6043 struct iwx_device_power_cmd cmd = { };
6044 struct ieee80211com *ic = &sc->sc_ic;
6045
6046 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6047 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6048
6049 return iwx_send_cmd_pdu(sc,
6050 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6051 }
6052 #if 0
6053 static int
6054 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6055 {
6056 struct iwx_beacon_filter_cmd cmd = {
6057 IWX_BF_CMD_CONFIG_DEFAULTS,
6058 .bf_enable_beacon_filter = htole32(1),
6059 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6060 };
6061 int err;
6062
6063 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6064 if (err == 0)
6065 sc->sc_bf.bf_enabled = 1;
6066
6067 return err;
6068 }
6069 #endif
6070 static int
6071 iwx_disable_beacon_filter(struct iwx_softc *sc)
6072 {
6073 struct iwx_beacon_filter_cmd cmd;
6074 int err;
6075
6076 memset(&cmd, 0, sizeof(cmd));
6077
6078 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6079 if (err == 0)
6080 sc->sc_bf.bf_enabled = 0;
6081
6082 return err;
6083 }
6084
6085 static int
6086 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6087 {
6088 struct iwx_add_sta_cmd add_sta_cmd;
6089 int err, i;
6090 uint32_t status, aggsize;
6091 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6092 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6093 struct ieee80211com *ic = &sc->sc_ic;
6094 struct ieee80211_node *ni = &in->in_ni;
6095 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6096
6097 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6098 panic("STA already added");
6099
6100 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6101
6102 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6103 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6104 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6105 } else {
6106 add_sta_cmd.sta_id = IWX_STATION_ID;
6107 add_sta_cmd.station_type = IWX_STA_LINK;
6108 }
6109 add_sta_cmd.mac_id_n_color
6110 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6111 if (!update) {
6112 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6113 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6114 etheranyaddr);
6115 else
6116 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6117 in->in_macaddr);
6118 }
6119 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6120 ether_sprintf(add_sta_cmd.addr)));
6121 add_sta_cmd.add_modify = update ? 1 : 0;
6122 add_sta_cmd.station_flags_msk
6123 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6124
6125 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6126 add_sta_cmd.station_flags_msk
6127 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6128 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6129
6130 if (iwx_mimo_enabled(sc)) {
6131 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6132 add_sta_cmd.station_flags |=
6133 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6134 } else {
6135 int hasmimo = 0;
6136 for (i = 0; i < htrs->rs_nrates; i++) {
6137 if (htrs->rs_rates[i] > 7) {
6138 hasmimo = 1;
6139 break;
6140 }
6141 }
6142 if (hasmimo) {
6143 add_sta_cmd.station_flags |=
6144 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6145 }
6146 }
6147 }
6148
6149 if (ni->ni_flags & IEEE80211_NODE_HT &&
6150 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6151 add_sta_cmd.station_flags |= htole32(
6152 IWX_STA_FLG_FAT_EN_40MHZ);
6153 }
6154
6155
6156 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6157 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6158 add_sta_cmd.station_flags |= htole32(
6159 IWX_STA_FLG_FAT_EN_80MHZ);
6160 }
6161 // XXX-misha: TODO get real ampdu size
6162 aggsize = max_aggsize;
6163 } else {
6164 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6165 IEEE80211_HTCAP_MAXRXAMPDU);
6166 }
6167
6168 if (aggsize > max_aggsize)
6169 aggsize = max_aggsize;
6170 add_sta_cmd.station_flags |= htole32((aggsize <<
6171 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6172 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6173
6174 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6175 IEEE80211_HTCAP_MPDUDENSITY)) {
6176 case IEEE80211_HTCAP_MPDUDENSITY_2:
6177 add_sta_cmd.station_flags
6178 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6179 break;
6180 case IEEE80211_HTCAP_MPDUDENSITY_4:
6181 add_sta_cmd.station_flags
6182 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6183 break;
6184 case IEEE80211_HTCAP_MPDUDENSITY_8:
6185 add_sta_cmd.station_flags
6186 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6187 break;
6188 case IEEE80211_HTCAP_MPDUDENSITY_16:
6189 add_sta_cmd.station_flags
6190 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6191 break;
6192 default:
6193 break;
6194 }
6195 }
6196
6197 status = IWX_ADD_STA_SUCCESS;
6198 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6199 &add_sta_cmd, &status);
6200 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6201 err = EIO;
6202
6203 return err;
6204 }
6205
6206 static int
6207 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6208 {
6209 struct ieee80211com *ic = &sc->sc_ic;
6210 struct iwx_rm_sta_cmd rm_sta_cmd;
6211 int err;
6212
6213 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6214 panic("sta already removed");
6215
6216 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6217 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6218 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6219 else
6220 rm_sta_cmd.sta_id = IWX_STATION_ID;
6221
6222 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6223 &rm_sta_cmd);
6224
6225 return err;
6226 }
6227
6228 static int
6229 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6230 {
6231 int err, i, cmd_ver;
6232
6233 err = iwx_flush_sta(sc, in);
6234 if (err) {
6235 printf("%s: could not flush Tx path (error %d)\n",
6236 DEVNAME(sc), err);
6237 return err;
6238 }
6239
6240 /*
6241 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6242 * before a station gets removed.
6243 */
6244 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6245 IWX_SCD_QUEUE_CONFIG_CMD);
6246 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6247 err = iwx_disable_mgmt_queue(sc);
6248 if (err)
6249 return err;
6250 for (i = IWX_FIRST_AGG_TX_QUEUE;
6251 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6252 struct iwx_tx_ring *ring = &sc->txq[i];
6253 if ((sc->qenablemsk & (1 << i)) == 0)
6254 continue;
6255 err = iwx_disable_txq(sc, IWX_STATION_ID,
6256 ring->qid, ring->tid);
6257 if (err) {
6258 printf("%s: could not disable Tx queue %d "
6259 "(error %d)\n", DEVNAME(sc), ring->qid,
6260 err);
6261 return err;
6262 }
6263 }
6264 }
6265
6266 err = iwx_rm_sta_cmd(sc, in);
6267 if (err) {
6268 printf("%s: could not remove STA (error %d)\n",
6269 DEVNAME(sc), err);
6270 return err;
6271 }
6272
6273 in->in_flags = 0;
6274
6275 sc->sc_rx_ba_sessions = 0;
6276 sc->ba_rx.start_tidmask = 0;
6277 sc->ba_rx.stop_tidmask = 0;
6278 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6279 sc->ba_tx.start_tidmask = 0;
6280 sc->ba_tx.stop_tidmask = 0;
6281 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6282 sc->qenablemsk &= ~(1 << i);
6283
6284 #if 0
6285 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6286 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6287 if (ba->ba_state != IEEE80211_BA_AGREED)
6288 continue;
6289 ieee80211_delba_request(ic, ni, 0, 1, i);
6290 }
6291 #endif
6292 /* Clear ampdu rx state (GOS-1525) */
6293 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6294 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6295 ba->ba_flags = 0;
6296 }
6297
6298 return 0;
6299 }
6300
6301 static uint8_t
6302 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6303 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6304 int n_ssids, uint32_t channel_cfg_flags)
6305 {
6306 struct ieee80211com *ic = &sc->sc_ic;
6307 struct ieee80211_scan_state *ss = ic->ic_scan;
6308 struct ieee80211_channel *c;
6309 uint8_t nchan;
6310 int j;
6311
6312 for (nchan = j = 0;
6313 j < ss->ss_last &&
6314 nchan < sc->sc_capa_n_scan_channels;
6315 j++) {
6316 uint8_t channel_num;
6317
6318 c = ss->ss_chans[j];
6319 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6320 if (isset(sc->sc_ucode_api,
6321 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6322 chan->v2.channel_num = channel_num;
6323 if (IEEE80211_IS_CHAN_2GHZ(c))
6324 chan->v2.band = IWX_PHY_BAND_24;
6325 else
6326 chan->v2.band = IWX_PHY_BAND_5;
6327 chan->v2.iter_count = 1;
6328 chan->v2.iter_interval = 0;
6329 } else {
6330 chan->v1.channel_num = channel_num;
6331 chan->v1.iter_count = 1;
6332 chan->v1.iter_interval = htole16(0);
6333 }
6334 chan->flags |= htole32(channel_cfg_flags);
6335 chan++;
6336 nchan++;
6337 }
6338
6339 return nchan;
6340 }
6341
6342 static int
6343 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6344 {
6345 struct ieee80211com *ic = &sc->sc_ic;
6346 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6347 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6348 struct ieee80211_rateset *rs;
6349 size_t remain = sizeof(preq->buf);
6350 uint8_t *frm, *pos;
6351
6352 memset(preq, 0, sizeof(*preq));
6353
6354 if (remain < sizeof(*wh) + 2)
6355 return ENOBUFS;
6356
6357 /*
6358 * Build a probe request frame. Most of the following code is a
6359 * copy & paste of what is done in net80211.
6360 */
6361 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6362 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6363 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6364 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6365 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6366 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6367 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6368 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6369
6370 frm = (uint8_t *)(wh + 1);
6371 *frm++ = IEEE80211_ELEMID_SSID;
6372 *frm++ = 0;
6373 /* hardware inserts SSID */
6374
6375 /* Tell the firmware where the MAC header is. */
6376 preq->mac_header.offset = 0;
6377 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6378 remain -= frm - (uint8_t *)wh;
6379
6380 /* Fill in 2GHz IEs and tell firmware where they are. */
6381 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6382 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6383 if (remain < 4 + rs->rs_nrates)
6384 return ENOBUFS;
6385 } else if (remain < 2 + rs->rs_nrates)
6386 return ENOBUFS;
6387 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6388 pos = frm;
6389 frm = ieee80211_add_rates(frm, rs);
6390 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6391 frm = ieee80211_add_xrates(frm, rs);
6392 remain -= frm - pos;
6393
6394 if (isset(sc->sc_enabled_capa,
6395 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6396 if (remain < 3)
6397 return ENOBUFS;
6398 *frm++ = IEEE80211_ELEMID_DSPARMS;
6399 *frm++ = 1;
6400 *frm++ = 0;
6401 remain -= 3;
6402 }
6403 preq->band_data[0].len = htole16(frm - pos);
6404
6405 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6406 /* Fill in 5GHz IEs. */
6407 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6408 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6409 if (remain < 4 + rs->rs_nrates)
6410 return ENOBUFS;
6411 } else if (remain < 2 + rs->rs_nrates)
6412 return ENOBUFS;
6413 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6414 pos = frm;
6415 frm = ieee80211_add_rates(frm, rs);
6416 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6417 frm = ieee80211_add_xrates(frm, rs);
6418 preq->band_data[1].len = htole16(frm - pos);
6419 remain -= frm - pos;
6420 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6421 if (remain < 14)
6422 return ENOBUFS;
6423 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6424 remain -= frm - pos;
6425 preq->band_data[1].len = htole16(frm - pos);
6426 }
6427 }
6428
6429 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6430 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6431 pos = frm;
6432 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6433 if (remain < 28)
6434 return ENOBUFS;
6435 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6436 /* XXX add WME info? */
6437 remain -= frm - pos;
6438 }
6439
6440 preq->common_data.len = htole16(frm - pos);
6441
6442 return 0;
6443 }
6444
6445 static int
6446 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6447 {
6448 struct iwx_scan_config scan_cfg;
6449 struct iwx_host_cmd hcmd = {
6450 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6451 .len[0] = sizeof(scan_cfg),
6452 .data[0] = &scan_cfg,
6453 .flags = 0,
6454 };
6455 int cmdver;
6456
6457 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6458 printf("%s: firmware does not support reduced scan config\n",
6459 DEVNAME(sc));
6460 return ENOTSUP;
6461 }
6462
6463 memset(&scan_cfg, 0, sizeof(scan_cfg));
6464
6465 /*
6466 * SCAN_CFG version >= 5 implies that the broadcast
6467 * STA ID field is deprecated.
6468 */
6469 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6470 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6471 scan_cfg.bcast_sta_id = 0xff;
6472
6473 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6474 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6475
6476 return iwx_send_cmd(sc, &hcmd);
6477 }
6478
6479 static uint16_t
6480 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6481 {
6482 struct ieee80211com *ic = &sc->sc_ic;
6483 struct ieee80211_scan_state *ss = ic->ic_scan;
6484 uint16_t flags = 0;
6485
6486 if (ss->ss_nssid == 0) {
6487 DPRINTF(("%s: Passive scan started\n", __func__));
6488 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6489 }
6490
6491 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6492 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6493 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6494
6495 return flags;
6496 }
6497
6498 #define IWX_SCAN_DWELL_ACTIVE 10
6499 #define IWX_SCAN_DWELL_PASSIVE 110
6500
6501 /* adaptive dwell max budget time [TU] for full scan */
6502 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6503 /* adaptive dwell max budget time [TU] for directed scan */
6504 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6505 /* adaptive dwell default high band APs number */
6506 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6507 /* adaptive dwell default low band APs number */
6508 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6509 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6510 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6511 /* adaptive dwell number of APs override for p2p friendly GO channels */
6512 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6513 /* adaptive dwell number of APs override for social channels */
6514 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6515
6516 static void
6517 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6518 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6519 {
6520 uint32_t suspend_time, max_out_time;
6521 uint8_t active_dwell, passive_dwell;
6522
6523 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6524 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6525
6526 general_params->adwell_default_social_chn =
6527 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6528 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6529 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6530
6531 if (bgscan)
6532 general_params->adwell_max_budget =
6533 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6534 else
6535 general_params->adwell_max_budget =
6536 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6537
6538 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6539 if (bgscan) {
6540 max_out_time = htole32(120);
6541 suspend_time = htole32(120);
6542 } else {
6543 max_out_time = htole32(0);
6544 suspend_time = htole32(0);
6545 }
6546 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6547 htole32(max_out_time);
6548 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6549 htole32(suspend_time);
6550 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6551 htole32(max_out_time);
6552 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6553 htole32(suspend_time);
6554
6555 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6556 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6557 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6558 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6559 }
6560
6561 static void
6562 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6563 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6564 {
6565 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6566
6567 gp->flags = htole16(gen_flags);
6568
6569 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6570 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6571 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6572 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6573
6574 gp->scan_start_mac_id = 0;
6575 }
6576
6577 static void
6578 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6579 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6580 int n_ssid)
6581 {
6582 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6583
6584 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6585 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6586
6587 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6588 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6589 }
6590
6591 static int
6592 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6593 {
6594 struct ieee80211com *ic = &sc->sc_ic;
6595 struct ieee80211_scan_state *ss = ic->ic_scan;
6596 struct iwx_host_cmd hcmd = {
6597 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6598 .len = { 0, },
6599 .data = { NULL, },
6600 .flags = 0,
6601 };
6602 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6603 struct iwx_scan_req_params_v14 *scan_p;
6604 int err, async = bgscan, n_ssid = 0;
6605 uint16_t gen_flags;
6606 uint32_t bitmap_ssid = 0;
6607
6608 IWX_ASSERT_LOCKED(sc);
6609
6610 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6611
6612 scan_p = &cmd->scan_params;
6613
6614 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6615 cmd->uid = htole32(0);
6616
6617 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6618 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6619 gen_flags, bgscan);
6620
6621 scan_p->periodic_params.schedule[0].interval = htole16(0);
6622 scan_p->periodic_params.schedule[0].iter_count = 1;
6623
6624 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6625 if (err) {
6626 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6627 err);
6628 return err;
6629 }
6630
6631 for (int i=0; i < ss->ss_nssid; i++) {
6632 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6633 scan_p->probe_params.direct_scan[i].len =
6634 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6635 DPRINTF(("%s: Active scan started for ssid ", __func__));
6636 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6637 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6638 n_ssid++;
6639 bitmap_ssid |= (1 << i);
6640 }
6641 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6642
6643 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6644 n_ssid);
6645
6646 hcmd.len[0] = sizeof(*cmd);
6647 hcmd.data[0] = (void *)cmd;
6648 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6649
6650 err = iwx_send_cmd(sc, &hcmd);
6651 return err;
6652 }
6653
6654 static void
6655 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6656 {
6657 char alpha2[3];
6658
6659 snprintf(alpha2, sizeof(alpha2), "%c%c",
6660 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6661
6662 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6663 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6664
6665 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6666 }
6667
6668 uint8_t
6669 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6670 {
6671 int i;
6672 uint8_t rval;
6673
6674 for (i = 0; i < rs->rs_nrates; i++) {
6675 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6676 if (rval == iwx_rates[ridx].rate)
6677 return rs->rs_rates[i];
6678 }
6679
6680 return 0;
6681 }
6682
6683 static int
6684 iwx_rval2ridx(int rval)
6685 {
6686 int ridx;
6687
6688 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6689 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6690 continue;
6691 if (rval == iwx_rates[ridx].rate)
6692 break;
6693 }
6694
6695 return ridx;
6696 }
6697
6698 static void
6699 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6700 int *ofdm_rates)
6701 {
6702 struct ieee80211_node *ni = &in->in_ni;
6703 struct ieee80211_rateset *rs = &ni->ni_rates;
6704 int lowest_present_ofdm = -1;
6705 int lowest_present_cck = -1;
6706 uint8_t cck = 0;
6707 uint8_t ofdm = 0;
6708 int i;
6709
6710 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6711 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6712 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6713 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6714 continue;
6715 cck |= (1 << i);
6716 if (lowest_present_cck == -1 || lowest_present_cck > i)
6717 lowest_present_cck = i;
6718 }
6719 }
6720 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6721 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6722 continue;
6723 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6724 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6725 lowest_present_ofdm = i;
6726 }
6727
6728 /*
6729 * Now we've got the basic rates as bitmaps in the ofdm and cck
6730 * variables. This isn't sufficient though, as there might not
6731 * be all the right rates in the bitmap. E.g. if the only basic
6732 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6733 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6734 *
6735 * [...] a STA responding to a received frame shall transmit
6736 * its Control Response frame [...] at the highest rate in the
6737 * BSSBasicRateSet parameter that is less than or equal to the
6738 * rate of the immediately previous frame in the frame exchange
6739 * sequence ([...]) and that is of the same modulation class
6740 * ([...]) as the received frame. If no rate contained in the
6741 * BSSBasicRateSet parameter meets these conditions, then the
6742 * control frame sent in response to a received frame shall be
6743 * transmitted at the highest mandatory rate of the PHY that is
6744 * less than or equal to the rate of the received frame, and
6745 * that is of the same modulation class as the received frame.
6746 *
6747 * As a consequence, we need to add all mandatory rates that are
6748 * lower than all of the basic rates to these bitmaps.
6749 */
6750
6751 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6752 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6753 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6754 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6755 /* 6M already there or needed so always add */
6756 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6757
6758 /*
6759 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6760 * Note, however:
6761 * - if no CCK rates are basic, it must be ERP since there must
6762 * be some basic rates at all, so they're OFDM => ERP PHY
6763 * (or we're in 5 GHz, and the cck bitmap will never be used)
6764 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6765 * - if 5.5M is basic, 1M and 2M are mandatory
6766 * - if 2M is basic, 1M is mandatory
6767 * - if 1M is basic, that's the only valid ACK rate.
6768 * As a consequence, it's not as complicated as it sounds, just add
6769 * any lower rates to the ACK rate bitmap.
6770 */
6771 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6772 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6773 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6774 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6775 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6776 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6777 /* 1M already there or needed so always add */
6778 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6779
6780 *cck_rates = cck;
6781 *ofdm_rates = ofdm;
6782 }
6783
6784 static void
6785 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6786 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6787 {
6788 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6789 struct ieee80211com *ic = &sc->sc_ic;
6790 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6791 struct ieee80211_node *ni = vap->iv_bss;
6792 int cck_ack_rates, ofdm_ack_rates;
6793
6794 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6795 in->in_color));
6796 cmd->action = htole32(action);
6797
6798 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6799 return;
6800
6801 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6802 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6803 else if (ic->ic_opmode == IEEE80211_M_STA)
6804 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6805 else
6806 panic("unsupported operating mode %d", ic->ic_opmode);
6807 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6808
6809 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6810 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6811 ether_sprintf(cmd->node_addr)));
6812 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6813 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6814 return;
6815 }
6816
6817 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6818 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6819 ether_sprintf(cmd->bssid_addr)));
6820 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6821 cmd->cck_rates = htole32(cck_ack_rates);
6822 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6823
6824 cmd->cck_short_preamble
6825 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6826 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6827 cmd->short_slot
6828 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6829 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6830
6831 struct chanAccParams chp;
6832 ieee80211_wme_vap_getparams(vap, &chp);
6833
6834 for (int i = 0; i < WME_NUM_AC; i++) {
6835 int txf = iwx_ac_to_tx_fifo[i];
6836 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6837 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6838 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6839 cmd->ac[txf].fifos_mask = (1 << txf);
6840 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6841
6842 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6843 }
6844
6845 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6846 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6847 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6848 }
6849
6850 if (ni->ni_flags & IEEE80211_NODE_HT) {
6851 switch (vap->iv_curhtprotmode) {
6852 case IEEE80211_HTINFO_OPMODE_PURE:
6853 break;
6854 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6855 case IEEE80211_HTINFO_OPMODE_MIXED:
6856 cmd->protection_flags |=
6857 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6858 IWX_MAC_PROT_FLG_FAT_PROT);
6859 break;
6860 case IEEE80211_HTINFO_OPMODE_HT20PR:
6861 if (in->in_phyctxt &&
6862 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6863 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6864 cmd->protection_flags |=
6865 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6866 IWX_MAC_PROT_FLG_FAT_PROT);
6867 }
6868 break;
6869 default:
6870 break;
6871 }
6872 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6873 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6874 }
6875
6876 if (ic->ic_flags & IEEE80211_F_USEPROT)
6877 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6878 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6879 #undef IWX_EXP2
6880 }
6881
6882 static void
6883 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6884 struct iwx_mac_data_sta *sta, int assoc)
6885 {
6886 struct ieee80211_node *ni = &in->in_ni;
6887 struct ieee80211com *ic = &sc->sc_ic;
6888 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6889 uint32_t dtim_off;
6890 uint64_t tsf;
6891 int dtim_period;
6892
6893 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6894 tsf = le64toh(ni->ni_tstamp.tsf);
6895 dtim_period = vap->iv_dtim_period;
6896
6897 sta->is_assoc = htole32(assoc);
6898
6899 if (assoc) {
6900 sta->dtim_time = htole32(tsf + dtim_off);
6901 sta->dtim_tsf = htole64(tsf + dtim_off);
6902 // XXX: unset in iwm
6903 sta->assoc_beacon_arrive_time = 0;
6904 }
6905 sta->bi = htole32(ni->ni_intval);
6906 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6907 sta->data_policy = htole32(0);
6908 sta->listen_interval = htole32(10);
6909 sta->assoc_id = htole32(ni->ni_associd);
6910 }
6911
6912 static int
6913 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6914 int assoc)
6915 {
6916 struct ieee80211com *ic = &sc->sc_ic;
6917 struct ieee80211_node *ni = &in->in_ni;
6918 struct iwx_mac_ctx_cmd cmd;
6919 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6920
6921 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6922 panic("MAC already added");
6923 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6924 panic("MAC already removed");
6925
6926 memset(&cmd, 0, sizeof(cmd));
6927
6928 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6929
6930 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6931 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6932 sizeof(cmd), &cmd);
6933 }
6934
6935 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6936 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6937 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6938 IWX_MAC_FILTER_ACCEPT_GRP |
6939 IWX_MAC_FILTER_IN_BEACON |
6940 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6941 IWX_MAC_FILTER_IN_CRC32);
6942 // XXX: dtim period is in vap
6943 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6944 /*
6945 * Allow beacons to pass through as long as we are not
6946 * associated or we do not have dtim period information.
6947 */
6948 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6949 }
6950 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6951 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6952 }
6953
6954 static int
6955 iwx_clear_statistics(struct iwx_softc *sc)
6956 {
6957 struct iwx_statistics_cmd scmd = {
6958 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6959 };
6960 struct iwx_host_cmd cmd = {
6961 .id = IWX_STATISTICS_CMD,
6962 .len[0] = sizeof(scmd),
6963 .data[0] = &scmd,
6964 .flags = IWX_CMD_WANT_RESP,
6965 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
6966 };
6967 int err;
6968
6969 err = iwx_send_cmd(sc, &cmd);
6970 if (err)
6971 return err;
6972
6973 iwx_free_resp(sc, &cmd);
6974 return 0;
6975 }
6976
6977 static int
6978 iwx_scan(struct iwx_softc *sc)
6979 {
6980 int err;
6981 err = iwx_umac_scan_v14(sc, 0);
6982
6983 if (err) {
6984 printf("%s: could not initiate scan\n", DEVNAME(sc));
6985 return err;
6986 }
6987 return 0;
6988 }
6989
6990 static int
6991 iwx_bgscan(struct ieee80211com *ic)
6992 {
6993 struct iwx_softc *sc = ic->ic_softc;
6994 int err;
6995
6996 err = iwx_umac_scan_v14(sc, 1);
6997 if (err) {
6998 printf("%s: could not initiate scan\n", DEVNAME(sc));
6999 return err;
7000 }
7001 return 0;
7002 }
7003
7004 static int
7005 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7006 {
7007 int err;
7008
7009 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7010
7011 /*
7012 * Non-QoS frames use the "MGMT" TID and queue.
7013 * Other TIDs and data queues are reserved for QoS data frames.
7014 */
7015 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7016 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7017 if (err) {
7018 printf("%s: could not enable Tx queue %d (error %d)\n",
7019 DEVNAME(sc), sc->first_data_qid, err);
7020 return err;
7021 }
7022
7023 return 0;
7024 }
7025
7026 static int
7027 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7028 {
7029 int err, cmd_ver;
7030
7031 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7032 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7033 IWX_SCD_QUEUE_CONFIG_CMD);
7034 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7035 return 0;
7036
7037 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7038
7039 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7040 IWX_MGMT_TID);
7041 if (err) {
7042 printf("%s: could not disable Tx queue %d (error %d)\n",
7043 DEVNAME(sc), sc->first_data_qid, err);
7044 return err;
7045 }
7046
7047 return 0;
7048 }
7049
7050 static int
7051 iwx_rs_rval2idx(uint8_t rval)
7052 {
7053 /* Firmware expects indices which match our 11g rate set. */
7054 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7055 int i;
7056
7057 for (i = 0; i < rs->rs_nrates; i++) {
7058 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7059 return i;
7060 }
7061
7062 return -1;
7063 }
7064
7065 static uint16_t
7066 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7067 {
7068 uint16_t htrates = 0;
7069 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7070 int i;
7071
7072 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7073 for (i = 0; i < htrs->rs_nrates; i++) {
7074 if (htrs->rs_rates[i] <= 7)
7075 htrates |= (1 << htrs->rs_rates[i]);
7076 }
7077 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7078 for (i = 0; i < htrs->rs_nrates; i++) {
7079 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7080 htrates |= (1 << (htrs->rs_rates[i] - 8));
7081 }
7082 } else
7083 panic(("iwx_rs_ht_rates"));
7084
7085 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7086 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7087
7088 return htrates;
7089 }
7090
7091 uint16_t
7092 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7093 {
7094 uint16_t rx_mcs;
7095 int max_mcs = -1;
7096 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7097 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7098 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7099 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7100 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7101
7102 switch (rx_mcs) {
7103 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7104 break;
7105 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7106 max_mcs = 7;
7107 break;
7108 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7109 max_mcs = 8;
7110 break;
7111 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7112 /* Disable VHT MCS 9 for 20MHz-only stations. */
7113 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7114 max_mcs = 8;
7115 else
7116 max_mcs = 9;
7117 break;
7118 default:
7119 /* Should not happen; Values above cover the possible range. */
7120 panic("invalid VHT Rx MCS value %u", rx_mcs);
7121 }
7122
7123 return ((1 << (max_mcs + 1)) - 1);
7124 }
7125
7126 static int
7127 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7128 {
7129 #if 1
7130 panic("iwx: Trying to init rate set on untested version");
7131 #else
7132 struct ieee80211_node *ni = &in->in_ni;
7133 struct ieee80211_rateset *rs = &ni->ni_rates;
7134 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7135 uint32_t cmd_id;
7136 int i;
7137 size_t cmd_size = sizeof(cfg_cmd);
7138
7139 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7140
7141 for (i = 0; i < rs->rs_nrates; i++) {
7142 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7143 int idx = iwx_rs_rval2idx(rval);
7144 if (idx == -1)
7145 return EINVAL;
7146 cfg_cmd.non_ht_rates |= (1 << idx);
7147 }
7148
7149 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7150 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7151 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7152 htole16(iwx_rs_vht_rates(sc, ni, 1));
7153 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7154 htole16(iwx_rs_vht_rates(sc, ni, 2));
7155 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7156 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7157 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7158 htole16(iwx_rs_ht_rates(sc, ni,
7159 IEEE80211_HT_RATESET_SISO));
7160 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7161 htole16(iwx_rs_ht_rates(sc, ni,
7162 IEEE80211_HT_RATESET_MIMO2));
7163 } else
7164 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7165
7166 cfg_cmd.sta_id = IWX_STATION_ID;
7167 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7168 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7169 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7170 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7171 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7172 else
7173 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7174 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7175 if (ni->ni_flags & IEEE80211_NODE_VHT)
7176 cfg_cmd.max_mpdu_len = htole16(3895);
7177 else
7178 cfg_cmd.max_mpdu_len = htole16(3839);
7179 if (ni->ni_flags & IEEE80211_NODE_HT) {
7180 if (ieee80211_node_supports_ht_sgi20(ni)) {
7181 cfg_cmd.sgi_ch_width_supp |= (1 <<
7182 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7183 }
7184 if (ieee80211_node_supports_ht_sgi40(ni)) {
7185 cfg_cmd.sgi_ch_width_supp |= (1 <<
7186 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7187 }
7188 }
7189 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7190 ieee80211_node_supports_vht_sgi80(ni))
7191 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7192
7193 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7194 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7195 #endif
7196 }
7197
7198 static int
7199 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7200 {
7201 struct ieee80211_node *ni = &in->in_ni;
7202 struct ieee80211_rateset *rs = &ni->ni_rates;
7203 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7204 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7205 uint32_t cmd_id;
7206 int i;
7207 int sgi80 = 0;
7208 size_t cmd_size = sizeof(cfg_cmd);
7209
7210 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7211
7212 for (i = 0; i < rs->rs_nrates; i++) {
7213 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7214 int idx = iwx_rs_rval2idx(rval);
7215 if (idx == -1)
7216 return EINVAL;
7217 cfg_cmd.non_ht_rates |= (1 << idx);
7218 }
7219 for (i = 0; i < htrs->rs_nrates; i++) {
7220 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7221 }
7222
7223 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7224 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7225 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7226 htole16(iwx_rs_vht_rates(sc, ni, 1));
7227 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7228 htole16(iwx_rs_vht_rates(sc, ni, 2));
7229
7230 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7231 __func__, __LINE__,
7232 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7233 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7234 __func__, __LINE__,
7235 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7236 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7237 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7238 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7239 htole16(iwx_rs_ht_rates(sc, ni,
7240 IEEE80211_HT_RATESET_SISO));
7241 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7242 htole16(iwx_rs_ht_rates(sc, ni,
7243 IEEE80211_HT_RATESET_MIMO2));
7244
7245 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7246 __func__, __LINE__,
7247 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7248 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7249 __func__, __LINE__,
7250 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7251 } else
7252 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7253
7254 cfg_cmd.sta_id = IWX_STATION_ID;
7255 #if 0
7256 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7257 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7258 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7259 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7260 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7261 else
7262 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7263 #endif
7264 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7265 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7266 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7267 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7268 } else {
7269 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7270 }
7271
7272 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7273 if (ni->ni_flags & IEEE80211_NODE_VHT)
7274 cfg_cmd.max_mpdu_len = htole16(3895);
7275 else
7276 cfg_cmd.max_mpdu_len = htole16(3839);
7277 if (ni->ni_flags & IEEE80211_NODE_HT) {
7278 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7279 cfg_cmd.sgi_ch_width_supp |= (1 <<
7280 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7281 }
7282 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7283 cfg_cmd.sgi_ch_width_supp |= (1 <<
7284 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7285 }
7286 }
7287 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7288 IEEE80211_VHTCAP_SHORT_GI_80);
7289 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7290 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7291 }
7292
7293 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7294 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7295 }
7296
7297 static int
7298 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7299 {
7300 int cmd_ver;
7301
7302 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7303 IWX_TLC_MNG_CONFIG_CMD);
7304 if (cmd_ver == 4)
7305 return iwx_rs_init_v4(sc, in);
7306 else
7307 return iwx_rs_init_v3(sc, in);
7308 }
7309
7310 static void
7311 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7312 {
7313 struct ieee80211com *ic = &sc->sc_ic;
7314 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7315 struct ieee80211_node *ni = (void *)vap->iv_bss;
7316
7317 struct ieee80211_rateset *rs = &ni->ni_rates;
7318 uint32_t rate_n_flags;
7319 uint8_t plcp, rval;
7320 int i, cmd_ver, rate_n_flags_ver2 = 0;
7321
7322 if (notif->sta_id != IWX_STATION_ID ||
7323 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7324 return;
7325
7326 rate_n_flags = le32toh(notif->rate);
7327
7328 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7329 print_ratenflags(__func__, __LINE__,
7330 rate_n_flags, sc->sc_rate_n_flags_version);
7331
7332 cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7333 IWX_TLC_MNG_UPDATE_NOTIF);
7334 if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7335 rate_n_flags_ver2 = 1;
7336
7337 if (rate_n_flags_ver2) {
7338 uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7339 if (mod_type == IWX_RATE_MCS_HT_MSK) {
7340
7341 ieee80211_node_set_txrate_dot11rate(ni,
7342 IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
7343 IEEE80211_RATE_MCS);
7344 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7345 "%s:%d new MCS: %d rate_n_flags: %x\n",
7346 __func__, __LINE__,
7347 ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
7348 rate_n_flags);
7349 return;
7350 }
7351 } else {
7352 if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7353 ieee80211_node_set_txrate_dot11rate(ni,
7354 rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7355 IWX_RATE_HT_MCS_NSS_MSK_V1));
7356
7357 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7358 "%s:%d new MCS idx: %d rate_n_flags: %x\n",
7359 __func__, __LINE__,
7360 ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
7361 return;
7362 }
7363 }
7364
7365 if (rate_n_flags_ver2) {
7366 const struct ieee80211_rateset *rs;
7367 uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7368 if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7369 rs = &ieee80211_std_rateset_11a;
7370 else
7371 rs = &ieee80211_std_rateset_11b;
7372 if (ridx < rs->rs_nrates)
7373 rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7374 else
7375 rval = 0;
7376 } else {
7377 plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7378
7379 rval = 0;
7380 for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7381 if (iwx_rates[i].plcp == plcp) {
7382 rval = iwx_rates[i].rate;
7383 break;
7384 }
7385 }
7386 }
7387
7388 if (rval) {
7389 uint8_t rv;
7390 for (i = 0; i < rs->rs_nrates; i++) {
7391 rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7392 if (rv == rval) {
7393 ieee80211_node_set_txrate_dot11rate(ni, i);
7394 break;
7395 }
7396 }
7397 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7398 "%s:%d new rate %d\n", __func__, __LINE__,
7399 ieee80211_node_get_txrate_dot11rate(ni));
7400 }
7401 }
7402
7403 static int
7404 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7405 uint8_t chains_static, uint8_t chains_dynamic)
7406 {
7407 struct iwx_rlc_config_cmd cmd;
7408 uint32_t cmd_id;
7409 uint8_t active_cnt, idle_cnt;
7410
7411 memset(&cmd, 0, sizeof(cmd));
7412
7413 idle_cnt = chains_static;
7414 active_cnt = chains_dynamic;
7415
7416 cmd.phy_id = htole32(phyctxt->id);
7417 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7418 IWX_PHY_RX_CHAIN_VALID_POS);
7419 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7420 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7421 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7422
7423 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7424 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7425 }
7426
7427 static int
7428 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7429 struct ieee80211_channel *chan, uint8_t chains_static,
7430 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7431 uint8_t vht_chan_width)
7432 {
7433 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7434 int err;
7435
7436 if (chan == IEEE80211_CHAN_ANYC) {
7437 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7438 DEVNAME(sc));
7439 return EIO;
7440 }
7441
7442 if (isset(sc->sc_enabled_capa,
7443 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7444 (phyctxt->channel->ic_flags & band_flags) !=
7445 (chan->ic_flags & band_flags)) {
7446 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7447 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7448 vht_chan_width);
7449 if (err) {
7450 printf("%s: could not remove PHY context "
7451 "(error %d)\n", DEVNAME(sc), err);
7452 return err;
7453 }
7454 phyctxt->channel = chan;
7455 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7456 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7457 vht_chan_width);
7458 if (err) {
7459 printf("%s: could not add PHY context "
7460 "(error %d)\n", DEVNAME(sc), err);
7461 return err;
7462 }
7463 } else {
7464 phyctxt->channel = chan;
7465 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7466 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7467 vht_chan_width);
7468 if (err) {
7469 printf("%s: could not update PHY context (error %d)\n",
7470 DEVNAME(sc), err);
7471 return err;
7472 }
7473 }
7474
7475 phyctxt->sco = sco;
7476 phyctxt->vht_chan_width = vht_chan_width;
7477
7478 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7479 phyctxt->channel->ic_ieee));
7480 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7481 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7482 phyctxt->vht_chan_width));
7483
7484 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7485 IWX_RLC_CONFIG_CMD) == 2)
7486 return iwx_phy_send_rlc(sc, phyctxt,
7487 chains_static, chains_dynamic);
7488
7489 return 0;
7490 }
7491
7492 static int
7493 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7494 {
7495 struct ieee80211com *ic = &sc->sc_ic;
7496 struct iwx_node *in;
7497 struct iwx_vap *ivp = IWX_VAP(vap);
7498 struct ieee80211_node *ni;
7499 uint32_t duration;
7500 int generation = sc->sc_generation, err;
7501
7502 IWX_ASSERT_LOCKED(sc);
7503
7504 ni = ieee80211_ref_node(vap->iv_bss);
7505 in = IWX_NODE(ni);
7506
7507 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7508 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7509 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7510 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7511 if (err)
7512 return err;
7513 } else {
7514 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7515 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7516 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7517 if (err)
7518 return err;
7519 }
7520 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7521 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7522 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7523 ether_sprintf(in->in_macaddr)));
7524
7525 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7526 if (err) {
7527 printf("%s: could not add MAC context (error %d)\n",
7528 DEVNAME(sc), err);
7529 return err;
7530 }
7531 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7532
7533 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7534 if (err) {
7535 printf("%s: could not add binding (error %d)\n",
7536 DEVNAME(sc), err);
7537 goto rm_mac_ctxt;
7538 }
7539 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7540
7541 err = iwx_add_sta_cmd(sc, in, 0);
7542 if (err) {
7543 printf("%s: could not add sta (error %d)\n",
7544 DEVNAME(sc), err);
7545 goto rm_binding;
7546 }
7547 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7548
7549 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7550 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7551 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7552 IWX_TX_RING_COUNT);
7553 if (err)
7554 goto rm_sta;
7555 return 0;
7556 }
7557
7558 err = iwx_enable_mgmt_queue(sc);
7559 if (err)
7560 goto rm_sta;
7561
7562 err = iwx_clear_statistics(sc);
7563 if (err)
7564 goto rm_mgmt_queue;
7565
7566 /*
7567 * Prevent the FW from wandering off channel during association
7568 * by "protecting" the session with a time event.
7569 */
7570 if (in->in_ni.ni_intval)
7571 duration = in->in_ni.ni_intval * 9;
7572 else
7573 duration = 900;
7574 return iwx_schedule_session_protection(sc, in, duration);
7575
7576 rm_mgmt_queue:
7577 if (generation == sc->sc_generation)
7578 iwx_disable_mgmt_queue(sc);
7579 rm_sta:
7580 if (generation == sc->sc_generation) {
7581 iwx_rm_sta_cmd(sc, in);
7582 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7583 }
7584 rm_binding:
7585 if (generation == sc->sc_generation) {
7586 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7587 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7588 }
7589 rm_mac_ctxt:
7590 if (generation == sc->sc_generation) {
7591 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7592 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7593 }
7594 return err;
7595 }
7596
7597 static int
7598 iwx_deauth(struct iwx_softc *sc)
7599 {
7600 struct ieee80211com *ic = &sc->sc_ic;
7601 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7602 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7603 int err;
7604
7605 IWX_ASSERT_LOCKED(sc);
7606
7607 iwx_unprotect_session(sc, in);
7608
7609 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7610 err = iwx_rm_sta(sc, in);
7611 if (err)
7612 return err;
7613 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7614 }
7615
7616 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7617 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7618 if (err) {
7619 printf("%s: could not remove binding (error %d)\n",
7620 DEVNAME(sc), err);
7621 return err;
7622 }
7623 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7624 }
7625
7626 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7627 IWX_FLAG_MAC_ACTIVE));
7628 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7629 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7630 if (err) {
7631 printf("%s: could not remove MAC context (error %d)\n",
7632 DEVNAME(sc), err);
7633 return err;
7634 }
7635 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7636 }
7637
7638 /* Move unused PHY context to a default channel. */
7639 //TODO uncommented in obsd, but stays on the way of auth->auth
7640 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7641 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7642 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7643 if (err)
7644 return err;
7645
7646 return 0;
7647 }
7648
7649 static int
7650 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7651 {
7652 struct ieee80211com *ic = &sc->sc_ic;
7653 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7654 struct ieee80211_node *ni = &in->in_ni;
7655 struct iwx_vap *ivp = IWX_VAP(vap);
7656 int err;
7657
7658 IWX_ASSERT_LOCKED(sc);
7659
7660 if (ni->ni_flags & IEEE80211_NODE_HT) {
7661 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7662 uint8_t sco, vht_chan_width;
7663 sco = IEEE80211_HTOP0_SCO_SCN;
7664 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7665 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7666 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7667 else
7668 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7669 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7670 ivp->phy_ctxt->channel, chains, chains,
7671 0, sco, vht_chan_width);
7672 if (err) {
7673 printf("%s: failed to update PHY\n", DEVNAME(sc));
7674 return err;
7675 }
7676 }
7677
7678 /* Update STA again to apply HT and VHT settings. */
7679 err = iwx_add_sta_cmd(sc, in, 1);
7680 if (err) {
7681 printf("%s: could not update STA (error %d)\n",
7682 DEVNAME(sc), err);
7683 return err;
7684 }
7685
7686 /* We have now been assigned an associd by the AP. */
7687 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7688 if (err) {
7689 printf("%s: failed to update MAC\n", DEVNAME(sc));
7690 return err;
7691 }
7692
7693 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7694 if (err) {
7695 printf("%s: could not set sf full on (error %d)\n",
7696 DEVNAME(sc), err);
7697 return err;
7698 }
7699
7700 err = iwx_allow_mcast(sc);
7701 if (err) {
7702 printf("%s: could not allow mcast (error %d)\n",
7703 DEVNAME(sc), err);
7704 return err;
7705 }
7706
7707 err = iwx_power_update_device(sc);
7708 if (err) {
7709 printf("%s: could not send power command (error %d)\n",
7710 DEVNAME(sc), err);
7711 return err;
7712 }
7713 #ifdef notyet
7714 /*
7715 * Disabled for now. Default beacon filter settings
7716 * prevent net80211 from getting ERP and HT protection
7717 * updates from beacons.
7718 */
7719 err = iwx_enable_beacon_filter(sc, in);
7720 if (err) {
7721 printf("%s: could not enable beacon filter\n",
7722 DEVNAME(sc));
7723 return err;
7724 }
7725 #endif
7726 err = iwx_power_mac_update_mode(sc, in);
7727 if (err) {
7728 printf("%s: could not update MAC power (error %d)\n",
7729 DEVNAME(sc), err);
7730 return err;
7731 }
7732
7733 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7734 return 0;
7735
7736 err = iwx_rs_init(sc, in);
7737 if (err) {
7738 printf("%s: could not init rate scaling (error %d)\n",
7739 DEVNAME(sc), err);
7740 return err;
7741 }
7742
7743 return 0;
7744 }
7745
7746 static int
7747 iwx_run_stop(struct iwx_softc *sc)
7748 {
7749 struct ieee80211com *ic = &sc->sc_ic;
7750 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7751 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7752 struct ieee80211_node *ni = &in->in_ni;
7753 int err, i;
7754
7755 IWX_ASSERT_LOCKED(sc);
7756
7757 err = iwx_flush_sta(sc, in);
7758 if (err) {
7759 printf("%s: could not flush Tx path (error %d)\n",
7760 DEVNAME(sc), err);
7761 return err;
7762 }
7763
7764 /*
7765 * Stop Rx BA sessions now. We cannot rely on the BA task
7766 * for this when moving out of RUN state since it runs in a
7767 * separate thread.
7768 * Note that in->in_ni (struct ieee80211_node) already represents
7769 * our new access point in case we are roaming between APs.
7770 * This means we cannot rely on struct ieee802111_node to tell
7771 * us which BA sessions exist.
7772 */
7773 // TODO agg
7774 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7775 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7776 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7777 continue;
7778 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7779 }
7780
7781 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7782 if (err)
7783 return err;
7784
7785 err = iwx_disable_beacon_filter(sc);
7786 if (err) {
7787 printf("%s: could not disable beacon filter (error %d)\n",
7788 DEVNAME(sc), err);
7789 return err;
7790 }
7791
7792 /* Mark station as disassociated. */
7793 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7794 if (err) {
7795 printf("%s: failed to update MAC\n", DEVNAME(sc));
7796 return err;
7797 }
7798
7799 return 0;
7800 }
7801
7802 static struct ieee80211_node *
7803 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7804 {
7805 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7806 M_NOWAIT | M_ZERO);
7807 }
7808
7809 #if 0
7810 int
7811 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7812 struct ieee80211_key *k)
7813 {
7814 struct iwx_softc *sc = ic->ic_softc;
7815 struct iwx_node *in = (void *)ni;
7816 struct iwx_setkey_task_arg *a;
7817 int err;
7818
7819 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7820 /* Fallback to software crypto for other ciphers. */
7821 err = ieee80211_set_key(ic, ni, k);
7822 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7823 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7824 return err;
7825 }
7826
7827 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7828 return ENOSPC;
7829
7830 a = &sc->setkey_arg[sc->setkey_cur];
7831 a->sta_id = IWX_STATION_ID;
7832 a->ni = ni;
7833 a->k = k;
7834 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7835 sc->setkey_nkeys++;
7836 iwx_add_task(sc, systq, &sc->setkey_task);
7837 return EBUSY;
7838 }
7839
7840 int
7841 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7842 struct ieee80211_key *k)
7843 {
7844 struct ieee80211com *ic = &sc->sc_ic;
7845 struct iwx_node *in = (void *)ni;
7846 struct iwx_add_sta_key_cmd cmd;
7847 uint32_t status;
7848 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7849 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7850 int err;
7851
7852 /*
7853 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7854 * Currently we only implement station mode where 'ni' is always
7855 * ic->ic_bss so there is no need to validate arguments beyond this:
7856 */
7857 KASSERT(ni == ic->ic_bss);
7858
7859 memset(&cmd, 0, sizeof(cmd));
7860
7861 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7862 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7863 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7864 IWX_STA_KEY_FLG_KEYID_MSK));
7865 if (k->k_flags & IEEE80211_KEY_GROUP) {
7866 cmd.common.key_offset = 1;
7867 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7868 } else
7869 cmd.common.key_offset = 0;
7870
7871 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7872 cmd.common.sta_id = sta_id;
7873
7874 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7875
7876 status = IWX_ADD_STA_SUCCESS;
7877 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7878 &status);
7879 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7880 return ECANCELED;
7881 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7882 err = EIO;
7883 if (err) {
7884 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7885 IEEE80211_REASON_AUTH_LEAVE);
7886 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7887 return err;
7888 }
7889
7890 if (k->k_flags & IEEE80211_KEY_GROUP)
7891 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7892 else
7893 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7894
7895 if ((in->in_flags & want_keymask) == want_keymask) {
7896 DPRINTF(("marking port %s valid\n",
7897 ether_sprintf(ni->ni_macaddr)));
7898 ni->ni_port_valid = 1;
7899 ieee80211_set_link_state(ic, LINK_STATE_UP);
7900 }
7901
7902 return 0;
7903 }
7904
7905 void
7906 iwx_setkey_task(void *arg)
7907 {
7908 struct iwx_softc *sc = arg;
7909 struct iwx_setkey_task_arg *a;
7910 int err = 0, s = splnet();
7911
7912 while (sc->setkey_nkeys > 0) {
7913 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7914 break;
7915 a = &sc->setkey_arg[sc->setkey_tail];
7916 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7917 a->sta_id = 0;
7918 a->ni = NULL;
7919 a->k = NULL;
7920 sc->setkey_tail = (sc->setkey_tail + 1) %
7921 nitems(sc->setkey_arg);
7922 sc->setkey_nkeys--;
7923 }
7924
7925 refcnt_rele_wake(&sc->task_refs);
7926 splx(s);
7927 }
7928
7929 void
7930 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7931 struct ieee80211_key *k)
7932 {
7933 struct iwx_softc *sc = ic->ic_softc;
7934 struct iwx_add_sta_key_cmd cmd;
7935
7936 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7937 /* Fallback to software crypto for other ciphers. */
7938 ieee80211_delete_key(ic, ni, k);
7939 return;
7940 }
7941
7942 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7943 return;
7944
7945 memset(&cmd, 0, sizeof(cmd));
7946
7947 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7948 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7949 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7950 IWX_STA_KEY_FLG_KEYID_MSK));
7951 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7952 if (k->k_flags & IEEE80211_KEY_GROUP)
7953 cmd.common.key_offset = 1;
7954 else
7955 cmd.common.key_offset = 0;
7956 cmd.common.sta_id = IWX_STATION_ID;
7957
7958 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7959 }
7960 #endif
7961
7962 static int
7963 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
7964 {
7965 struct ieee80211com *ic = vap->iv_ic;
7966 struct iwx_softc *sc = ic->ic_softc;
7967 enum ieee80211_state ostate = vap->iv_state;
7968 int err = 0;
7969
7970 IWX_LOCK(sc);
7971
7972 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
7973 switch (ostate) {
7974 case IEEE80211_S_RUN:
7975 err = iwx_run_stop(sc);
7976 if (err)
7977 goto out;
7978 /* FALLTHROUGH */
7979 case IEEE80211_S_ASSOC:
7980 case IEEE80211_S_AUTH:
7981 if (nstate <= IEEE80211_S_AUTH) {
7982 err = iwx_deauth(sc);
7983 if (err)
7984 goto out;
7985 }
7986 /* FALLTHROUGH */
7987 case IEEE80211_S_SCAN:
7988 case IEEE80211_S_INIT:
7989 default:
7990 break;
7991 }
7992 //
7993 // /* Die now if iwx_stop() was called while we were sleeping. */
7994 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7995 // refcnt_rele_wake(&sc->task_refs);
7996 // splx(s);
7997 // return;
7998 // }
7999 }
8000
8001 switch (nstate) {
8002 case IEEE80211_S_INIT:
8003 break;
8004
8005 case IEEE80211_S_SCAN:
8006 break;
8007
8008 case IEEE80211_S_AUTH:
8009 err = iwx_auth(vap, sc);
8010 break;
8011
8012 case IEEE80211_S_ASSOC:
8013 break;
8014
8015 case IEEE80211_S_RUN:
8016 err = iwx_run(vap, sc);
8017 break;
8018 default:
8019 break;
8020 }
8021
8022 out:
8023 IWX_UNLOCK(sc);
8024
8025 return (err);
8026 }
8027
8028 static int
8029 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8030 {
8031 struct iwx_vap *ivp = IWX_VAP(vap);
8032 struct ieee80211com *ic = vap->iv_ic;
8033 enum ieee80211_state ostate = vap->iv_state;
8034 int err;
8035
8036 /*
8037 * Prevent attempts to transition towards the same state, unless
8038 * we are scanning in which case a SCAN -> SCAN transition
8039 * triggers another scan iteration. And AUTH -> AUTH is needed
8040 * to support band-steering.
8041 */
8042 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8043 nstate != IEEE80211_S_AUTH)
8044 return 0;
8045 IEEE80211_UNLOCK(ic);
8046 err = iwx_newstate_sub(vap, nstate);
8047 IEEE80211_LOCK(ic);
8048 if (err == 0)
8049 err = ivp->iv_newstate(vap, nstate, arg);
8050
8051 return (err);
8052 }
8053
8054 static void
8055 iwx_endscan(struct iwx_softc *sc)
8056 {
8057 struct ieee80211com *ic = &sc->sc_ic;
8058 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8059
8060 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8061 return;
8062
8063 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8064
8065 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8066 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8067 }
8068
8069 /*
8070 * Aging and idle timeouts for the different possible scenarios
8071 * in default configuration
8072 */
8073 static const uint32_t
8074 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8075 {
8076 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8077 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8078 },
8079 {
8080 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8081 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8082 },
8083 {
8084 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8085 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8086 },
8087 {
8088 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8089 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8090 },
8091 {
8092 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8093 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8094 },
8095 };
8096
8097 /*
8098 * Aging and idle timeouts for the different possible scenarios
8099 * in single BSS MAC configuration.
8100 */
8101 static const uint32_t
8102 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8103 {
8104 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8105 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8106 },
8107 {
8108 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8109 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8110 },
8111 {
8112 htole32(IWX_SF_MCAST_AGING_TIMER),
8113 htole32(IWX_SF_MCAST_IDLE_TIMER)
8114 },
8115 {
8116 htole32(IWX_SF_BA_AGING_TIMER),
8117 htole32(IWX_SF_BA_IDLE_TIMER)
8118 },
8119 {
8120 htole32(IWX_SF_TX_RE_AGING_TIMER),
8121 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8122 },
8123 };
8124
8125 static void
8126 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8127 struct ieee80211_node *ni)
8128 {
8129 int i, j, watermark;
8130
8131 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8132
8133 /*
8134 * If we are in association flow - check antenna configuration
8135 * capabilities of the AP station, and choose the watermark accordingly.
8136 */
8137 if (ni) {
8138 if (ni->ni_flags & IEEE80211_NODE_HT) {
8139 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8140 int hasmimo = 0;
8141 for (i = 0; i < htrs->rs_nrates; i++) {
8142 if (htrs->rs_rates[i] > 7) {
8143 hasmimo = 1;
8144 break;
8145 }
8146 }
8147 if (hasmimo)
8148 watermark = IWX_SF_W_MARK_MIMO2;
8149 else
8150 watermark = IWX_SF_W_MARK_SISO;
8151 } else {
8152 watermark = IWX_SF_W_MARK_LEGACY;
8153 }
8154 /* default watermark value for unassociated mode. */
8155 } else {
8156 watermark = IWX_SF_W_MARK_MIMO2;
8157 }
8158 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8159
8160 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8161 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8162 sf_cmd->long_delay_timeouts[i][j] =
8163 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8164 }
8165 }
8166
8167 if (ni) {
8168 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8169 sizeof(iwx_sf_full_timeout));
8170 } else {
8171 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8172 sizeof(iwx_sf_full_timeout_def));
8173 }
8174
8175 }
8176
8177 static int
8178 iwx_sf_config(struct iwx_softc *sc, int new_state)
8179 {
8180 struct ieee80211com *ic = &sc->sc_ic;
8181 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8182 struct ieee80211_node *ni = vap->iv_bss;
8183 struct iwx_sf_cfg_cmd sf_cmd = {
8184 .state = htole32(new_state),
8185 };
8186 int err = 0;
8187
8188 switch (new_state) {
8189 case IWX_SF_UNINIT:
8190 case IWX_SF_INIT_OFF:
8191 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8192 break;
8193 case IWX_SF_FULL_ON:
8194 iwx_fill_sf_command(sc, &sf_cmd, ni);
8195 break;
8196 default:
8197 return EINVAL;
8198 }
8199
8200 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8201 sizeof(sf_cmd), &sf_cmd);
8202 return err;
8203 }
8204
8205 static int
8206 iwx_send_bt_init_conf(struct iwx_softc *sc)
8207 {
8208 struct iwx_bt_coex_cmd bt_cmd;
8209
8210 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8211
8212 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8213 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8214 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8215
8216
8217 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8218 &bt_cmd);
8219 }
8220
8221 static int
8222 iwx_send_soc_conf(struct iwx_softc *sc)
8223 {
8224 struct iwx_soc_configuration_cmd cmd;
8225 int err;
8226 uint32_t cmd_id, flags = 0;
8227
8228 memset(&cmd, 0, sizeof(cmd));
8229
8230 /*
8231 * In VER_1 of this command, the discrete value is considered
8232 * an integer; In VER_2, it's a bitmask. Since we have only 2
8233 * values in VER_1, this is backwards-compatible with VER_2,
8234 * as long as we don't set any other flag bits.
8235 */
8236 if (!sc->sc_integrated) { /* VER_1 */
8237 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8238 } else { /* VER_2 */
8239 uint8_t scan_cmd_ver;
8240 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8241 flags |= (sc->sc_ltr_delay &
8242 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8243 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8244 IWX_SCAN_REQ_UMAC);
8245 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8246 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8247 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8248 }
8249 cmd.flags = htole32(flags);
8250
8251 cmd.latency = htole32(sc->sc_xtal_latency);
8252
8253 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8254 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8255 if (err)
8256 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8257 return err;
8258 }
8259
8260 static int
8261 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8262 {
8263 struct iwx_mcc_update_cmd mcc_cmd;
8264 struct iwx_host_cmd hcmd = {
8265 .id = IWX_MCC_UPDATE_CMD,
8266 .flags = IWX_CMD_WANT_RESP,
8267 .data = { &mcc_cmd },
8268 };
8269 struct iwx_rx_packet *pkt;
8270 struct iwx_mcc_update_resp *resp;
8271 size_t resp_len;
8272 int err;
8273
8274 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8275 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8276 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8277 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8278 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8279 else
8280 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8281
8282 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8283 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8284
8285 err = iwx_send_cmd(sc, &hcmd);
8286 if (err)
8287 return err;
8288
8289 pkt = hcmd.resp_pkt;
8290 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8291 err = EIO;
8292 goto out;
8293 }
8294
8295 resp_len = iwx_rx_packet_payload_len(pkt);
8296 if (resp_len < sizeof(*resp)) {
8297 err = EIO;
8298 goto out;
8299 }
8300
8301 resp = (void *)pkt->data;
8302 if (resp_len != sizeof(*resp) +
8303 resp->n_channels * sizeof(resp->channels[0])) {
8304 err = EIO;
8305 goto out;
8306 }
8307
8308 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8309 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8310
8311 out:
8312 iwx_free_resp(sc, &hcmd);
8313
8314 return err;
8315 }
8316
8317 static int
8318 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8319 {
8320 struct iwx_temp_report_ths_cmd cmd;
8321 int err;
8322
8323 /*
8324 * In order to give responsibility for critical-temperature-kill
8325 * and TX backoff to FW we need to send an empty temperature
8326 * reporting command at init time.
8327 */
8328 memset(&cmd, 0, sizeof(cmd));
8329
8330 err = iwx_send_cmd_pdu(sc,
8331 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8332 0, sizeof(cmd), &cmd);
8333 if (err)
8334 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8335 DEVNAME(sc), err);
8336
8337 return err;
8338 }
8339
8340 static int
8341 iwx_init_hw(struct iwx_softc *sc)
8342 {
8343 struct ieee80211com *ic = &sc->sc_ic;
8344 int err = 0, i;
8345
8346 err = iwx_run_init_mvm_ucode(sc, 0);
8347 if (err)
8348 return err;
8349
8350 if (!iwx_nic_lock(sc))
8351 return EBUSY;
8352
8353 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8354 if (err) {
8355 printf("%s: could not init tx ant config (error %d)\n",
8356 DEVNAME(sc), err);
8357 goto err;
8358 }
8359
8360 if (sc->sc_tx_with_siso_diversity) {
8361 err = iwx_send_phy_cfg_cmd(sc);
8362 if (err) {
8363 printf("%s: could not send phy config (error %d)\n",
8364 DEVNAME(sc), err);
8365 goto err;
8366 }
8367 }
8368
8369 err = iwx_send_bt_init_conf(sc);
8370 if (err) {
8371 printf("%s: could not init bt coex (error %d)\n",
8372 DEVNAME(sc), err);
8373 return err;
8374 }
8375
8376 err = iwx_send_soc_conf(sc);
8377 if (err) {
8378 printf("%s: iwx_send_soc_conf failed\n", __func__);
8379 return err;
8380 }
8381
8382 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8383 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8384 err = iwx_send_dqa_cmd(sc);
8385 if (err) {
8386 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8387 "failed (error %d)\n", __func__, err);
8388 return err;
8389 }
8390 }
8391 // TODO phyctxt
8392 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8393 /*
8394 * The channel used here isn't relevant as it's
8395 * going to be overwritten in the other flows.
8396 * For now use the first channel we have.
8397 */
8398 sc->sc_phyctxt[i].id = i;
8399 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8400 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8401 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8402 if (err) {
8403 printf("%s: could not add phy context %d (error %d)\n",
8404 DEVNAME(sc), i, err);
8405 goto err;
8406 }
8407 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8408 IWX_RLC_CONFIG_CMD) == 2) {
8409 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8410 if (err) {
8411 printf("%s: could not configure RLC for PHY "
8412 "%d (error %d)\n", DEVNAME(sc), i, err);
8413 goto err;
8414 }
8415 }
8416 }
8417
8418 err = iwx_config_ltr(sc);
8419 if (err) {
8420 printf("%s: PCIe LTR configuration failed (error %d)\n",
8421 DEVNAME(sc), err);
8422 }
8423
8424 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8425 err = iwx_send_temp_report_ths_cmd(sc);
8426 if (err) {
8427 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8428 __func__);
8429 goto err;
8430 }
8431 }
8432
8433 err = iwx_power_update_device(sc);
8434 if (err) {
8435 printf("%s: could not send power command (error %d)\n",
8436 DEVNAME(sc), err);
8437 goto err;
8438 }
8439
8440 if (sc->sc_nvm.lar_enabled) {
8441 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8442 if (err) {
8443 printf("%s: could not init LAR (error %d)\n",
8444 DEVNAME(sc), err);
8445 goto err;
8446 }
8447 }
8448
8449 err = iwx_config_umac_scan_reduced(sc);
8450 if (err) {
8451 printf("%s: could not configure scan (error %d)\n",
8452 DEVNAME(sc), err);
8453 goto err;
8454 }
8455
8456 err = iwx_disable_beacon_filter(sc);
8457 if (err) {
8458 printf("%s: could not disable beacon filter (error %d)\n",
8459 DEVNAME(sc), err);
8460 goto err;
8461 }
8462
8463 err:
8464 iwx_nic_unlock(sc);
8465 return err;
8466 }
8467
8468 /* Allow multicast from our BSSID. */
8469 static int
8470 iwx_allow_mcast(struct iwx_softc *sc)
8471 {
8472 struct ieee80211com *ic = &sc->sc_ic;
8473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8474 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8475 struct iwx_mcast_filter_cmd *cmd;
8476 size_t size;
8477 int err;
8478
8479 size = roundup(sizeof(*cmd), 4);
8480 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8481 if (cmd == NULL)
8482 return ENOMEM;
8483 cmd->filter_own = 1;
8484 cmd->port_id = 0;
8485 cmd->count = 0;
8486 cmd->pass_all = 1;
8487 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8488
8489 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8490 0, size, cmd);
8491 free(cmd, M_DEVBUF);
8492 return err;
8493 }
8494
8495 static int
8496 iwx_init(struct iwx_softc *sc)
8497 {
8498 int err, generation;
8499 generation = ++sc->sc_generation;
8500 iwx_preinit(sc);
8501
8502 err = iwx_start_hw(sc);
8503 if (err) {
8504 printf("%s: iwx_start_hw failed\n", __func__);
8505 return err;
8506 }
8507
8508 err = iwx_init_hw(sc);
8509 if (err) {
8510 if (generation == sc->sc_generation)
8511 iwx_stop_device(sc);
8512 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8513 return err;
8514 }
8515
8516 sc->sc_flags |= IWX_FLAG_HW_INITED;
8517 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8518
8519 return 0;
8520 }
8521
8522 static void
8523 iwx_start(struct iwx_softc *sc)
8524 {
8525 struct ieee80211_node *ni;
8526 struct mbuf *m;
8527
8528 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8529 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8530 if (iwx_tx(sc, m, ni) != 0) {
8531 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8532 continue;
8533 }
8534 }
8535 }
8536
8537 static void
8538 iwx_stop(struct iwx_softc *sc)
8539 {
8540 struct ieee80211com *ic = &sc->sc_ic;
8541 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8542 struct iwx_vap *ivp = IWX_VAP(vap);
8543
8544 iwx_stop_device(sc);
8545
8546 /* Reset soft state. */
8547 sc->sc_generation++;
8548 ivp->phy_ctxt = NULL;
8549
8550 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8551 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8552 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8553 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8554 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8555 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8556 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8557 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8558
8559 sc->sc_rx_ba_sessions = 0;
8560 sc->ba_rx.start_tidmask = 0;
8561 sc->ba_rx.stop_tidmask = 0;
8562 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8563 sc->ba_tx.start_tidmask = 0;
8564 sc->ba_tx.stop_tidmask = 0;
8565 }
8566
8567 static void
8568 iwx_watchdog(void *arg)
8569 {
8570 struct iwx_softc *sc = arg;
8571 struct ieee80211com *ic = &sc->sc_ic;
8572 int i;
8573
8574 /*
8575 * We maintain a separate timer for each Tx queue because
8576 * Tx aggregation queues can get "stuck" while other queues
8577 * keep working. The Linux driver uses a similar workaround.
8578 */
8579 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8580 if (sc->sc_tx_timer[i] > 0) {
8581 if (--sc->sc_tx_timer[i] == 0) {
8582 printf("%s: device timeout\n", DEVNAME(sc));
8583
8584 iwx_nic_error(sc);
8585 iwx_dump_driver_status(sc);
8586 ieee80211_restart_all(ic);
8587 return;
8588 }
8589 }
8590 }
8591 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8592 }
8593
8594 /*
8595 * Note: This structure is read from the device with IO accesses,
8596 * and the reading already does the endian conversion. As it is
8597 * read with uint32_t-sized accesses, any members with a different size
8598 * need to be ordered correctly though!
8599 */
8600 struct iwx_error_event_table {
8601 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8602 uint32_t error_id; /* type of error */
8603 uint32_t trm_hw_status0; /* TRM HW status */
8604 uint32_t trm_hw_status1; /* TRM HW status */
8605 uint32_t blink2; /* branch link */
8606 uint32_t ilink1; /* interrupt link */
8607 uint32_t ilink2; /* interrupt link */
8608 uint32_t data1; /* error-specific data */
8609 uint32_t data2; /* error-specific data */
8610 uint32_t data3; /* error-specific data */
8611 uint32_t bcon_time; /* beacon timer */
8612 uint32_t tsf_low; /* network timestamp function timer */
8613 uint32_t tsf_hi; /* network timestamp function timer */
8614 uint32_t gp1; /* GP1 timer register */
8615 uint32_t gp2; /* GP2 timer register */
8616 uint32_t fw_rev_type; /* firmware revision type */
8617 uint32_t major; /* uCode version major */
8618 uint32_t minor; /* uCode version minor */
8619 uint32_t hw_ver; /* HW Silicon version */
8620 uint32_t brd_ver; /* HW board version */
8621 uint32_t log_pc; /* log program counter */
8622 uint32_t frame_ptr; /* frame pointer */
8623 uint32_t stack_ptr; /* stack pointer */
8624 uint32_t hcmd; /* last host command header */
8625 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8626 * rxtx_flag */
8627 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8628 * host_flag */
8629 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8630 * enc_flag */
8631 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8632 * time_flag */
8633 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8634 * wico interrupt */
8635 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8636 uint32_t wait_event; /* wait event() caller address */
8637 uint32_t l2p_control; /* L2pControlField */
8638 uint32_t l2p_duration; /* L2pDurationField */
8639 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8640 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8641 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8642 * (LMPM_PMG_SEL) */
8643 uint32_t u_timestamp; /* indicate when the date and time of the
8644 * compilation */
8645 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8646 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8647
8648 /*
8649 * UMAC error struct - relevant starting from family 8000 chip.
8650 * Note: This structure is read from the device with IO accesses,
8651 * and the reading already does the endian conversion. As it is
8652 * read with u32-sized accesses, any members with a different size
8653 * need to be ordered correctly though!
8654 */
8655 struct iwx_umac_error_event_table {
8656 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8657 uint32_t error_id; /* type of error */
8658 uint32_t blink1; /* branch link */
8659 uint32_t blink2; /* branch link */
8660 uint32_t ilink1; /* interrupt link */
8661 uint32_t ilink2; /* interrupt link */
8662 uint32_t data1; /* error-specific data */
8663 uint32_t data2; /* error-specific data */
8664 uint32_t data3; /* error-specific data */
8665 uint32_t umac_major;
8666 uint32_t umac_minor;
8667 uint32_t frame_pointer; /* core register 27*/
8668 uint32_t stack_pointer; /* core register 28 */
8669 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8670 uint32_t nic_isr_pref; /* ISR status register */
8671 } __packed;
8672
8673 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8674 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8675
8676 static void
8677 iwx_nic_umac_error(struct iwx_softc *sc)
8678 {
8679 struct iwx_umac_error_event_table table;
8680 uint32_t base;
8681
8682 base = sc->sc_uc.uc_umac_error_event_table;
8683
8684 if (base < 0x400000) {
8685 printf("%s: Invalid error log pointer 0x%08x\n",
8686 DEVNAME(sc), base);
8687 return;
8688 }
8689
8690 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8691 printf("%s: reading errlog failed\n", DEVNAME(sc));
8692 return;
8693 }
8694
8695 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8696 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8697 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8698 sc->sc_flags, table.valid);
8699 }
8700
8701 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8702 iwx_desc_lookup(table.error_id));
8703 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8704 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8705 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8706 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8707 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8708 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8709 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8710 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8711 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8712 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8713 table.frame_pointer);
8714 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8715 table.stack_pointer);
8716 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8717 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8718 table.nic_isr_pref);
8719 }
8720
8721 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8722 static struct {
8723 const char *name;
8724 uint8_t num;
8725 } advanced_lookup[] = {
8726 { "NMI_INTERRUPT_WDG", 0x34 },
8727 { "SYSASSERT", 0x35 },
8728 { "UCODE_VERSION_MISMATCH", 0x37 },
8729 { "BAD_COMMAND", 0x38 },
8730 { "BAD_COMMAND", 0x39 },
8731 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8732 { "FATAL_ERROR", 0x3D },
8733 { "NMI_TRM_HW_ERR", 0x46 },
8734 { "NMI_INTERRUPT_TRM", 0x4C },
8735 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8736 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8737 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8738 { "NMI_INTERRUPT_HOST", 0x66 },
8739 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8740 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8741 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8742 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8743 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8744 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8745 { "ADVANCED_SYSASSERT", 0 },
8746 };
8747
8748 static const char *
8749 iwx_desc_lookup(uint32_t num)
8750 {
8751 int i;
8752
8753 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8754 if (advanced_lookup[i].num ==
8755 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8756 return advanced_lookup[i].name;
8757
8758 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8759 return advanced_lookup[i].name;
8760 }
8761
8762 /*
8763 * Support for dumping the error log seemed like a good idea ...
8764 * but it's mostly hex junk and the only sensible thing is the
8765 * hw/ucode revision (which we know anyway). Since it's here,
8766 * I'll just leave it in, just in case e.g. the Intel guys want to
8767 * help us decipher some "ADVANCED_SYSASSERT" later.
8768 */
8769 static void
8770 iwx_nic_error(struct iwx_softc *sc)
8771 {
8772 struct iwx_error_event_table table;
8773 uint32_t base;
8774
8775 printf("%s: dumping device error log\n", DEVNAME(sc));
8776 printf("%s: GOS-3758: 1\n", __func__);
8777 base = sc->sc_uc.uc_lmac_error_event_table[0];
8778 printf("%s: GOS-3758: 2\n", __func__);
8779 if (base < 0x400000) {
8780 printf("%s: Invalid error log pointer 0x%08x\n",
8781 DEVNAME(sc), base);
8782 return;
8783 }
8784
8785 printf("%s: GOS-3758: 3\n", __func__);
8786 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8787 printf("%s: reading errlog failed\n", DEVNAME(sc));
8788 return;
8789 }
8790
8791 printf("%s: GOS-3758: 4\n", __func__);
8792 if (!table.valid) {
8793 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8794 return;
8795 }
8796
8797 printf("%s: GOS-3758: 5\n", __func__);
8798 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8799 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8800 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8801 sc->sc_flags, table.valid);
8802 }
8803
8804 printf("%s: GOS-3758: 6\n", __func__);
8805 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8806 iwx_desc_lookup(table.error_id));
8807 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8808 table.trm_hw_status0);
8809 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8810 table.trm_hw_status1);
8811 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8812 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8813 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8814 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8815 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8816 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8817 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8818 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8819 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8820 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8821 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8822 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8823 table.fw_rev_type);
8824 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8825 table.major);
8826 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8827 table.minor);
8828 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8829 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8830 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8831 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8832 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8833 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8834 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8835 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8836 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8837 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8838 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8839 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8840 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8841 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8842 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8843 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8844 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8845
8846 if (sc->sc_uc.uc_umac_error_event_table)
8847 iwx_nic_umac_error(sc);
8848 }
8849
8850 static void
8851 iwx_dump_driver_status(struct iwx_softc *sc)
8852 {
8853 struct ieee80211com *ic = &sc->sc_ic;
8854 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8855 enum ieee80211_state state = vap->iv_state;
8856 int i;
8857
8858 printf("driver status:\n");
8859 for (i = 0; i < nitems(sc->txq); i++) {
8860 struct iwx_tx_ring *ring = &sc->txq[i];
8861 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8862 "cur_hw=%-3d queued=%-3d\n",
8863 i, ring->qid, ring->cur, ring->cur_hw,
8864 ring->queued);
8865 }
8866 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8867 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8868 }
8869
8870 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8871 do { \
8872 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8873 _var_ = (void *)((_pkt_)+1); \
8874 } while (/*CONSTCOND*/0)
8875
8876 static int
8877 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8878 {
8879 int qid, idx, code;
8880
8881 qid = pkt->hdr.qid & ~0x80;
8882 idx = pkt->hdr.idx;
8883 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8884
8885 return (!(qid == 0 && idx == 0 && code == 0) &&
8886 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8887 }
8888
8889 static void
8890 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8891 {
8892 struct ieee80211com *ic = &sc->sc_ic;
8893 struct iwx_rx_packet *pkt, *nextpkt;
8894 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8895 struct mbuf *m0, *m;
8896 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8897 int qid, idx, code, handled = 1;
8898
8899 m0 = data->m;
8900 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8901 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8902 qid = pkt->hdr.qid;
8903 idx = pkt->hdr.idx;
8904 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8905
8906 if (!iwx_rx_pkt_valid(pkt))
8907 break;
8908
8909 /*
8910 * XXX Intel inside (tm)
8911 * Any commands in the LONG_GROUP could actually be in the
8912 * LEGACY group. Firmware API versions >= 50 reject commands
8913 * in group 0, forcing us to use this hack.
8914 */
8915 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8916 struct iwx_tx_ring *ring = &sc->txq[qid];
8917 struct iwx_tx_data *txdata = &ring->data[idx];
8918 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8919 code = iwx_cmd_opcode(code);
8920 }
8921
8922 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8923 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8924 break;
8925
8926 // TODO ???
8927 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8928 /* Take mbuf m0 off the RX ring. */
8929 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8930 break;
8931 }
8932 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8933 }
8934
8935 switch (code) {
8936 case IWX_REPLY_RX_PHY_CMD:
8937 /* XXX-THJ: I've not managed to hit this path in testing */
8938 iwx_rx_rx_phy_cmd(sc, pkt, data);
8939 break;
8940
8941 case IWX_REPLY_RX_MPDU_CMD: {
8942 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8943 nextoff = offset +
8944 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8945 nextpkt = (struct iwx_rx_packet *)
8946 (m0->m_data + nextoff);
8947 /* AX210 devices ship only one packet per Rx buffer. */
8948 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
8949 nextoff + minsz >= IWX_RBUF_SIZE ||
8950 !iwx_rx_pkt_valid(nextpkt)) {
8951 /* No need to copy last frame in buffer. */
8952 if (offset > 0)
8953 m_adj(m0, offset);
8954 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
8955 m0 = NULL; /* stack owns m0 now; abort loop */
8956 } else {
8957 /*
8958 * Create an mbuf which points to the current
8959 * packet. Always copy from offset zero to
8960 * preserve m_pkthdr.
8961 */
8962 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
8963 if (m == NULL) {
8964 m_freem(m0);
8965 m0 = NULL;
8966 break;
8967 }
8968 m_adj(m, offset);
8969 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
8970 }
8971 break;
8972 }
8973
8974 // case IWX_BAR_FRAME_RELEASE:
8975 // iwx_rx_bar_frame_release(sc, pkt, ml);
8976 // break;
8977 //
8978 case IWX_TX_CMD:
8979 iwx_rx_tx_cmd(sc, pkt, data);
8980 break;
8981
8982 case IWX_BA_NOTIF:
8983 iwx_rx_compressed_ba(sc, pkt);
8984 break;
8985
8986 case IWX_MISSED_BEACONS_NOTIFICATION:
8987 iwx_rx_bmiss(sc, pkt, data);
8988 DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
8989 __func__));
8990 ieee80211_beacon_miss(ic);
8991 break;
8992
8993 case IWX_MFUART_LOAD_NOTIFICATION:
8994 break;
8995
8996 case IWX_ALIVE: {
8997 struct iwx_alive_resp_v4 *resp4;
8998 struct iwx_alive_resp_v5 *resp5;
8999 struct iwx_alive_resp_v6 *resp6;
9000
9001 DPRINTF(("%s: firmware alive\n", __func__));
9002 sc->sc_uc.uc_ok = 0;
9003
9004 /*
9005 * For v5 and above, we can check the version, for older
9006 * versions we need to check the size.
9007 */
9008 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9009 IWX_ALIVE) == 6) {
9010 SYNC_RESP_STRUCT(resp6, pkt);
9011 if (iwx_rx_packet_payload_len(pkt) !=
9012 sizeof(*resp6)) {
9013 sc->sc_uc.uc_intr = 1;
9014 wakeup(&sc->sc_uc);
9015 break;
9016 }
9017 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9018 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9019 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9020 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9021 sc->sc_uc.uc_log_event_table = le32toh(
9022 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9023 sc->sc_uc.uc_umac_error_event_table = le32toh(
9024 resp6->umac_data.dbg_ptrs.error_info_addr);
9025 sc->sc_sku_id[0] =
9026 le32toh(resp6->sku_id.data[0]);
9027 sc->sc_sku_id[1] =
9028 le32toh(resp6->sku_id.data[1]);
9029 sc->sc_sku_id[2] =
9030 le32toh(resp6->sku_id.data[2]);
9031 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9032 sc->sc_uc.uc_ok = 1;
9033 }
9034 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9035 IWX_ALIVE) == 5) {
9036 SYNC_RESP_STRUCT(resp5, pkt);
9037 if (iwx_rx_packet_payload_len(pkt) !=
9038 sizeof(*resp5)) {
9039 sc->sc_uc.uc_intr = 1;
9040 wakeup(&sc->sc_uc);
9041 break;
9042 }
9043 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9044 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9045 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9046 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9047 sc->sc_uc.uc_log_event_table = le32toh(
9048 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9049 sc->sc_uc.uc_umac_error_event_table = le32toh(
9050 resp5->umac_data.dbg_ptrs.error_info_addr);
9051 sc->sc_sku_id[0] =
9052 le32toh(resp5->sku_id.data[0]);
9053 sc->sc_sku_id[1] =
9054 le32toh(resp5->sku_id.data[1]);
9055 sc->sc_sku_id[2] =
9056 le32toh(resp5->sku_id.data[2]);
9057 if (resp5->status == IWX_ALIVE_STATUS_OK)
9058 sc->sc_uc.uc_ok = 1;
9059 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9060 SYNC_RESP_STRUCT(resp4, pkt);
9061 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9062 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9063 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9064 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9065 sc->sc_uc.uc_log_event_table = le32toh(
9066 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9067 sc->sc_uc.uc_umac_error_event_table = le32toh(
9068 resp4->umac_data.dbg_ptrs.error_info_addr);
9069 if (resp4->status == IWX_ALIVE_STATUS_OK)
9070 sc->sc_uc.uc_ok = 1;
9071 } else
9072 printf("unknown payload version");
9073
9074 sc->sc_uc.uc_intr = 1;
9075 wakeup(&sc->sc_uc);
9076 break;
9077 }
9078
9079 case IWX_STATISTICS_NOTIFICATION: {
9080 struct iwx_notif_statistics *stats;
9081 SYNC_RESP_STRUCT(stats, pkt);
9082 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9083 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9084 break;
9085 }
9086
9087 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9088 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9089 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9090 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9091 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9092 break;
9093
9094 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9095 IWX_CT_KILL_NOTIFICATION): {
9096 struct iwx_ct_kill_notif *notif;
9097 SYNC_RESP_STRUCT(notif, pkt);
9098 printf("%s: device at critical temperature (%u degC), "
9099 "stopping device\n",
9100 DEVNAME(sc), le16toh(notif->temperature));
9101 sc->sc_flags |= IWX_FLAG_HW_ERR;
9102 ieee80211_restart_all(ic);
9103 break;
9104 }
9105
9106 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9107 IWX_SCD_QUEUE_CONFIG_CMD):
9108 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9109 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9110 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9111 IWX_SESSION_PROTECTION_CMD):
9112 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9113 IWX_NVM_GET_INFO):
9114 case IWX_ADD_STA_KEY:
9115 case IWX_PHY_CONFIGURATION_CMD:
9116 case IWX_TX_ANT_CONFIGURATION_CMD:
9117 case IWX_ADD_STA:
9118 case IWX_MAC_CONTEXT_CMD:
9119 case IWX_REPLY_SF_CFG_CMD:
9120 case IWX_POWER_TABLE_CMD:
9121 case IWX_LTR_CONFIG:
9122 case IWX_PHY_CONTEXT_CMD:
9123 case IWX_BINDING_CONTEXT_CMD:
9124 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9125 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9126 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9127 case IWX_REPLY_BEACON_FILTERING_CMD:
9128 case IWX_MAC_PM_POWER_TABLE:
9129 case IWX_TIME_QUOTA_CMD:
9130 case IWX_REMOVE_STA:
9131 case IWX_TXPATH_FLUSH:
9132 case IWX_BT_CONFIG:
9133 case IWX_MCC_UPDATE_CMD:
9134 case IWX_TIME_EVENT_CMD:
9135 case IWX_STATISTICS_CMD:
9136 case IWX_SCD_QUEUE_CFG: {
9137 size_t pkt_len;
9138
9139 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9140 break;
9141
9142 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9143 BUS_DMASYNC_POSTREAD);
9144
9145 pkt_len = sizeof(pkt->len_n_flags) +
9146 iwx_rx_packet_len(pkt);
9147
9148 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9149 pkt_len < sizeof(*pkt) ||
9150 pkt_len > sc->sc_cmd_resp_len[idx]) {
9151 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9152 sc->sc_cmd_resp_pkt[idx] = NULL;
9153 break;
9154 }
9155
9156 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9157 BUS_DMASYNC_POSTREAD);
9158 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9159 break;
9160 }
9161
9162 case IWX_INIT_COMPLETE_NOTIF:
9163 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9164 wakeup(&sc->sc_init_complete);
9165 break;
9166
9167 case IWX_SCAN_COMPLETE_UMAC: {
9168 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9169 struct iwx_umac_scan_complete *notif __attribute__((unused));
9170 SYNC_RESP_STRUCT(notif, pkt);
9171 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9172 notif->status));
9173 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9174 iwx_endscan(sc);
9175 break;
9176 }
9177
9178 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9179 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9180 __func__));
9181 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9182 SYNC_RESP_STRUCT(notif, pkt);
9183 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9184 notif->status));
9185 iwx_endscan(sc);
9186 break;
9187 }
9188
9189 case IWX_MCC_CHUB_UPDATE_CMD: {
9190 struct iwx_mcc_chub_notif *notif;
9191 SYNC_RESP_STRUCT(notif, pkt);
9192 iwx_mcc_update(sc, notif);
9193 break;
9194 }
9195
9196 case IWX_REPLY_ERROR: {
9197 struct iwx_error_resp *resp;
9198 SYNC_RESP_STRUCT(resp, pkt);
9199 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9200 DEVNAME(sc), le32toh(resp->error_type),
9201 resp->cmd_id);
9202 break;
9203 }
9204
9205 case IWX_TIME_EVENT_NOTIFICATION: {
9206 struct iwx_time_event_notif *notif;
9207 uint32_t action;
9208 SYNC_RESP_STRUCT(notif, pkt);
9209
9210 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9211 break;
9212 action = le32toh(notif->action);
9213 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9214 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9215 break;
9216 }
9217
9218 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9219 IWX_SESSION_PROTECTION_NOTIF): {
9220 struct iwx_session_prot_notif *notif;
9221 uint32_t status, start, conf_id;
9222
9223 SYNC_RESP_STRUCT(notif, pkt);
9224
9225 status = le32toh(notif->status);
9226 start = le32toh(notif->start);
9227 conf_id = le32toh(notif->conf_id);
9228 /* Check for end of successful PROTECT_CONF_ASSOC. */
9229 if (status == 1 && start == 0 &&
9230 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9231 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9232 break;
9233 }
9234
9235 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9236 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9237 break;
9238
9239 /*
9240 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9241 * messages. Just ignore them for now.
9242 */
9243 case IWX_DEBUG_LOG_MSG:
9244 break;
9245
9246 case IWX_MCAST_FILTER_CMD:
9247 break;
9248
9249 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9250 break;
9251
9252 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9253 break;
9254
9255 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9256 break;
9257
9258 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9259 IWX_NVM_ACCESS_COMPLETE):
9260 break;
9261
9262 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9263 break; /* happens in monitor mode; ignore for now */
9264
9265 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9266 break;
9267
9268 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9269 IWX_TLC_MNG_UPDATE_NOTIF): {
9270 struct iwx_tlc_update_notif *notif;
9271 SYNC_RESP_STRUCT(notif, pkt);
9272 (void)notif;
9273 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9274 iwx_rs_update(sc, notif);
9275 break;
9276 }
9277
9278 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9279 break;
9280
9281 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9282 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9283 break;
9284
9285 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9286 IWX_PNVM_INIT_COMPLETE):
9287 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9288 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9289 wakeup(&sc->sc_init_complete);
9290 break;
9291
9292 default:
9293 handled = 0;
9294 /* XXX wulf: Get rid of bluetooth-related spam */
9295 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9296 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9297 break;
9298 printf("%s: unhandled firmware response 0x%x/0x%x "
9299 "rx ring %d[%d]\n",
9300 DEVNAME(sc), code, pkt->len_n_flags,
9301 (qid & ~0x80), idx);
9302 break;
9303 }
9304
9305 /*
9306 * uCode sets bit 0x80 when it originates the notification,
9307 * i.e. when the notification is not a direct response to a
9308 * command sent by the driver.
9309 * For example, uCode issues IWX_REPLY_RX when it sends a
9310 * received frame to the driver.
9311 */
9312 if (handled && !(qid & (1 << 7))) {
9313 iwx_cmd_done(sc, qid, idx, code);
9314 }
9315
9316 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9317
9318 /* AX210 devices ship only one packet per Rx buffer. */
9319 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9320 break;
9321 }
9322
9323 if (m0 && m0 != data->m)
9324 m_freem(m0);
9325 }
9326
9327 static void
9328 iwx_notif_intr(struct iwx_softc *sc)
9329 {
9330 struct mbuf m;
9331 uint16_t hw;
9332
9333 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9334 BUS_DMASYNC_POSTREAD);
9335
9336 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9337 uint16_t *status = sc->rxq.stat_dma.vaddr;
9338 hw = le16toh(*status) & 0xfff;
9339 } else
9340 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9341 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9342 while (sc->rxq.cur != hw) {
9343 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9344
9345 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9346 BUS_DMASYNC_POSTREAD);
9347
9348 iwx_rx_pkt(sc, data, &m);
9349 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9350 }
9351
9352 /*
9353 * Tell the firmware what we have processed.
9354 * Seems like the hardware gets upset unless we align the write by 8??
9355 */
9356 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9357 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9358 }
9359
9360 #if 0
9361 int
9362 iwx_intr(void *arg)
9363 {
9364 struct iwx_softc *sc = arg;
9365 struct ieee80211com *ic = &sc->sc_ic;
9366 struct ifnet *ifp = IC2IFP(ic);
9367 int r1, r2, rv = 0;
9368
9369 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9370
9371 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9372 uint32_t *ict = sc->ict_dma.vaddr;
9373 int tmp;
9374
9375 tmp = htole32(ict[sc->ict_cur]);
9376 if (!tmp)
9377 goto out_ena;
9378
9379 /*
9380 * ok, there was something. keep plowing until we have all.
9381 */
9382 r1 = r2 = 0;
9383 while (tmp) {
9384 r1 |= tmp;
9385 ict[sc->ict_cur] = 0;
9386 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9387 tmp = htole32(ict[sc->ict_cur]);
9388 }
9389
9390 /* this is where the fun begins. don't ask */
9391 if (r1 == 0xffffffff)
9392 r1 = 0;
9393
9394 /* i am not expected to understand this */
9395 if (r1 & 0xc0000)
9396 r1 |= 0x8000;
9397 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9398 } else {
9399 r1 = IWX_READ(sc, IWX_CSR_INT);
9400 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9401 goto out;
9402 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9403 }
9404 if (r1 == 0 && r2 == 0) {
9405 goto out_ena;
9406 }
9407
9408 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9409
9410 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9411 #if 0
9412 int i;
9413 /* Firmware has now configured the RFH. */
9414 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9415 iwx_update_rx_desc(sc, &sc->rxq, i);
9416 #endif
9417 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9418 }
9419
9420
9421 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9422 iwx_check_rfkill(sc);
9423 rv = 1;
9424 goto out_ena;
9425 }
9426
9427 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9428 if (ifp->if_flags & IFF_DEBUG) {
9429 iwx_nic_error(sc);
9430 iwx_dump_driver_status(sc);
9431 }
9432 printf("%s: fatal firmware error\n", DEVNAME(sc));
9433 ieee80211_restart_all(ic);
9434 rv = 1;
9435 goto out;
9436
9437 }
9438
9439 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9440 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9441 iwx_stop(sc);
9442 rv = 1;
9443 goto out;
9444 }
9445
9446 /* firmware chunk loaded */
9447 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9448 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9449
9450 sc->sc_fw_chunk_done = 1;
9451 wakeup(&sc->sc_fw);
9452 }
9453
9454 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9455 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9456 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9457 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9458 }
9459 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9460 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9461 }
9462
9463 /* Disable periodic interrupt; we use it as just a one-shot. */
9464 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9465
9466 /*
9467 * Enable periodic interrupt in 8 msec only if we received
9468 * real RX interrupt (instead of just periodic int), to catch
9469 * any dangling Rx interrupt. If it was just the periodic
9470 * interrupt, there was no dangling Rx activity, and no need
9471 * to extend the periodic interrupt; one-shot is enough.
9472 */
9473 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9474 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9475 IWX_CSR_INT_PERIODIC_ENA);
9476
9477 iwx_notif_intr(sc);
9478 }
9479
9480 rv = 1;
9481
9482 out_ena:
9483 iwx_restore_interrupts(sc);
9484 out:
9485 return rv;
9486 }
9487 #endif
9488
9489 static void
9490 iwx_intr_msix(void *arg)
9491 {
9492 struct iwx_softc *sc = arg;
9493 struct ieee80211com *ic = &sc->sc_ic;
9494 uint32_t inta_fh, inta_hw;
9495 int vector = 0;
9496
9497 IWX_LOCK(sc);
9498
9499 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9500 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9501 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9502 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9503 inta_fh &= sc->sc_fh_mask;
9504 inta_hw &= sc->sc_hw_mask;
9505
9506 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9507 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9508 iwx_notif_intr(sc);
9509 }
9510
9511 /* firmware chunk loaded */
9512 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9513 sc->sc_fw_chunk_done = 1;
9514 wakeup(&sc->sc_fw);
9515 }
9516
9517 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9518 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9519 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9520 if (sc->sc_debug) {
9521 iwx_nic_error(sc);
9522 iwx_dump_driver_status(sc);
9523 }
9524 printf("%s: fatal firmware error\n", DEVNAME(sc));
9525 ieee80211_restart_all(ic);
9526 goto out;
9527 }
9528
9529 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9530 iwx_check_rfkill(sc);
9531 }
9532
9533 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9534 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9535 sc->sc_flags |= IWX_FLAG_HW_ERR;
9536 iwx_stop(sc);
9537 goto out;
9538 }
9539
9540 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9541 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9542 "%s:%d WARNING: Skipping rx desc update\n",
9543 __func__, __LINE__);
9544 #if 0
9545 /*
9546 * XXX-THJ: we don't have the dma segment handy. This is hacked
9547 * out in the fc release, return to it if we ever get this
9548 * warning.
9549 */
9550 /* Firmware has now configured the RFH. */
9551 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9552 iwx_update_rx_desc(sc, &sc->rxq, i);
9553 #endif
9554 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9555 }
9556
9557 /*
9558 * Before sending the interrupt the HW disables it to prevent
9559 * a nested interrupt. This is done by writing 1 to the corresponding
9560 * bit in the mask register. After handling the interrupt, it should be
9561 * re-enabled by clearing this bit. This register is defined as
9562 * write 1 clear (W1C) register, meaning that it's being clear
9563 * by writing 1 to the bit.
9564 */
9565 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9566 out:
9567 IWX_UNLOCK(sc);
9568 return;
9569 }
9570
9571 /*
9572 * The device info table below contains device-specific config overrides.
9573 * The most important parameter derived from this table is the name of the
9574 * firmware image to load.
9575 *
9576 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9577 * The "old" table matches devices based on PCI vendor/product IDs only.
9578 * The "new" table extends this with various device parameters derived
9579 * from MAC type, and RF type.
9580 *
9581 * In iwlwifi "old" and "new" tables share the same array, where "old"
9582 * entries contain dummy values for data defined only for "new" entries.
9583 * As of 2022, Linux developers are still in the process of moving entries
9584 * from "old" to "new" style and it looks like this effort has stalled in
9585 * in some work-in-progress state for quite a while. Linux commits moving
9586 * entries from "old" to "new" have at times been reverted due to regressions.
9587 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9588 * devices in the same driver.
9589 *
9590 * Our table below contains mostly "new" entries declared in iwlwifi
9591 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9592 * Other devices are matched based on PCI vendor/product ID as usual,
9593 * unless matching specific PCI subsystem vendor/product IDs is required.
9594 *
9595 * Some "old"-style entries are required to identify the firmware image to use.
9596 * Others might be used to print a specific marketing name into Linux dmesg,
9597 * but we can't be sure whether the corresponding devices would be matched
9598 * correctly in the absence of their entries. So we include them just in case.
9599 */
9600
9601 struct iwx_dev_info {
9602 uint16_t device;
9603 uint16_t subdevice;
9604 uint16_t mac_type;
9605 uint16_t rf_type;
9606 uint8_t mac_step;
9607 uint8_t rf_id;
9608 uint8_t no_160;
9609 uint8_t cores;
9610 uint8_t cdb;
9611 uint8_t jacket;
9612 const struct iwx_device_cfg *cfg;
9613 };
9614
9615 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9616 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9617 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9618 .mac_type = _mac_type, .rf_type = _rf_type, \
9619 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9620 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9621
9622 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9623 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9624 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9625 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9626
9627 /*
9628 * When adding entries to this table keep in mind that entries must
9629 * be listed in the same order as in the Linux driver. Code walks this
9630 * table backwards and uses the first matching entry it finds.
9631 * Device firmware must be available in fw_update(8).
9632 */
9633 static const struct iwx_dev_info iwx_dev_info_table[] = {
9634 /* So with HR */
9635 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9636 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9637 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9638 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9639 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9640 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9641 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9642 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9643 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9644 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9645 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9646 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9647 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9648 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9649 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9650 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9651 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9652 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9653 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9654 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9655 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9656 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9657 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9658 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9659 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9660 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9661 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9662 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9663 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9664 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9665 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9666 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9667 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9668 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9669 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9670 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9671
9672 /* So with GF2 */
9673 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9674 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9675 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9676 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9677 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9678 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9679 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9680 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9681 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9682 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9683 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9684 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9685
9686 /* Qu with Jf, C step */
9687 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9688 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9689 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9690 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9691 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9692 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9693 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9694 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9695 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9696 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9697 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9698 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9699 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9700 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9701 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9702 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9703 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9704 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9705 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9706 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9707 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9708 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9709 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9710 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9711 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9712 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9713 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9714 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9715 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9716 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9717 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9718 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9719 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9720 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9721 IWX_CFG_ANY,
9722 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9723 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9724 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9725 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9726 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9727 IWX_CFG_ANY,
9728 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9729
9730 /* QuZ with Jf */
9731 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9732 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9733 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9734 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9735 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9736 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9737 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9738 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9739 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9740 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9741 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9742 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9743 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9744 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9745 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9746 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9747 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9748 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9749 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9750 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9751 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9752 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9753 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9754 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9755 IWX_CFG_ANY,
9756 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9757 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9758 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9759 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9760 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9761 IWX_CFG_ANY,
9762 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9763
9764 /* Qu with Hr, B step */
9765 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9766 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9767 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9768 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9769 iwx_qu_b0_hr1_b0), /* AX101 */
9770 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9771 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9772 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9773 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9774 iwx_qu_b0_hr_b0), /* AX203 */
9775
9776 /* Qu with Hr, C step */
9777 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9778 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9779 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9780 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9781 iwx_qu_c0_hr1_b0), /* AX101 */
9782 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9783 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9784 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9785 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9786 iwx_qu_c0_hr_b0), /* AX203 */
9787 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9788 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9789 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9790 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9791 iwx_qu_c0_hr_b0), /* AX201 */
9792
9793 /* QuZ with Hr */
9794 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9795 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9796 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9797 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9798 iwx_quz_a0_hr1_b0), /* AX101 */
9799 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9800 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9801 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9802 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9803 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9804
9805 /* SoF with JF2 */
9806 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9807 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9808 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9809 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9810 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9811 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9812 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9813 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9814 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9815 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9816
9817 /* SoF with JF */
9818 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9819 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9820 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9821 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9822 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9823 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9824 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9825 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9826 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9827 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9828 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9829 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9830 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9831 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9832 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9833 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9834 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9835 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9836 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9837 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9838
9839 /* So with Hr */
9840 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9841 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9842 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9843 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9844 iwx_cfg_so_a0_hr_b0), /* AX203 */
9845 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9846 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9847 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9848 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9849 iwx_cfg_so_a0_hr_b0), /* ax101 */
9850 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9851 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9852 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9853 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9854 iwx_cfg_so_a0_hr_b0), /* ax201 */
9855
9856 /* So-F with Hr */
9857 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9858 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9859 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9860 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9861 iwx_cfg_so_a0_hr_b0), /* AX203 */
9862 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9863 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9864 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9865 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9866 iwx_cfg_so_a0_hr_b0), /* AX101 */
9867 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9868 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9869 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9870 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9871 iwx_cfg_so_a0_hr_b0), /* AX201 */
9872
9873 /* So-F with GF */
9874 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9875 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9876 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9877 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9878 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9879 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9880 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9881 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9882 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9883 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9884
9885 /* So with GF */
9886 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9887 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9888 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9889 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9890 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9891 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9892 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9893 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9894 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9895 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9896
9897 /* So with JF2 */
9898 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9899 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9900 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9901 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9902 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9903 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9904 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9905 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9906 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9907 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9908
9909 /* So with JF */
9910 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9911 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9912 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9913 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9914 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9915 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9916 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9917 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9918 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9919 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9920 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9921 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9922 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9923 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9924 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9925 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9926 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9927 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9928 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9929 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9930 };
9931
9932 static int
9933 iwx_preinit(struct iwx_softc *sc)
9934 {
9935 struct ieee80211com *ic = &sc->sc_ic;
9936 int err;
9937
9938 err = iwx_prepare_card_hw(sc);
9939 if (err) {
9940 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9941 return err;
9942 }
9943
9944 if (sc->attached) {
9945 return 0;
9946 }
9947
9948 err = iwx_start_hw(sc);
9949 if (err) {
9950 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9951 return err;
9952 }
9953
9954 err = iwx_run_init_mvm_ucode(sc, 1);
9955 iwx_stop_device(sc);
9956 if (err) {
9957 printf("%s: failed to stop device\n", DEVNAME(sc));
9958 return err;
9959 }
9960
9961 /* Print version info and MAC address on first successful fw load. */
9962 sc->attached = 1;
9963 if (sc->sc_pnvm_ver) {
9964 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
9965 "address %s\n",
9966 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9967 sc->sc_fwver, sc->sc_pnvm_ver,
9968 ether_sprintf(sc->sc_nvm.hw_addr));
9969 } else {
9970 printf("%s: hw rev 0x%x, fw %s, address %s\n",
9971 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9972 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9973 }
9974
9975 /* not all hardware can do 5GHz band */
9976 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9977 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9978 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9979
9980 return 0;
9981 }
9982
9983 static void
9984 iwx_attach_hook(void *self)
9985 {
9986 struct iwx_softc *sc = (void *)self;
9987 struct ieee80211com *ic = &sc->sc_ic;
9988 int err;
9989
9990 IWX_LOCK(sc);
9991 err = iwx_preinit(sc);
9992 IWX_UNLOCK(sc);
9993 if (err != 0)
9994 goto out;
9995
9996 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
9997 ic->ic_channels);
9998
9999 ieee80211_ifattach(ic);
10000 ic->ic_vap_create = iwx_vap_create;
10001 ic->ic_vap_delete = iwx_vap_delete;
10002 ic->ic_raw_xmit = iwx_raw_xmit;
10003 ic->ic_node_alloc = iwx_node_alloc;
10004 ic->ic_scan_start = iwx_scan_start;
10005 ic->ic_scan_end = iwx_scan_end;
10006 ic->ic_update_mcast = iwx_update_mcast;
10007 ic->ic_getradiocaps = iwx_init_channel_map;
10008
10009 ic->ic_set_channel = iwx_set_channel;
10010 ic->ic_scan_curchan = iwx_scan_curchan;
10011 ic->ic_scan_mindwell = iwx_scan_mindwell;
10012 ic->ic_wme.wme_update = iwx_wme_update;
10013 ic->ic_parent = iwx_parent;
10014 ic->ic_transmit = iwx_transmit;
10015
10016 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10017 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10018 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10019 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10020
10021 sc->sc_addba_request = ic->ic_addba_request;
10022 ic->ic_addba_request = iwx_addba_request;
10023 sc->sc_addba_response = ic->ic_addba_response;
10024 ic->ic_addba_response = iwx_addba_response;
10025
10026 iwx_radiotap_attach(sc);
10027 ieee80211_announce(ic);
10028 out:
10029 config_intrhook_disestablish(&sc->sc_preinit_hook);
10030 }
10031
10032 const struct iwx_device_cfg *
10033 iwx_find_device_cfg(struct iwx_softc *sc)
10034 {
10035 uint16_t sdev_id, mac_type, rf_type;
10036 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10037 int i;
10038
10039 sdev_id = pci_get_subdevice(sc->sc_dev);
10040 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10041 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10042 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10043 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10044 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10045
10046 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10047 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10048 cores = IWX_SUBDEVICE_CORES(sdev_id);
10049
10050 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10051 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10052
10053 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10054 dev_info->device != sc->sc_pid)
10055 continue;
10056
10057 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10058 dev_info->subdevice != sdev_id)
10059 continue;
10060
10061 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10062 dev_info->mac_type != mac_type)
10063 continue;
10064
10065 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10066 dev_info->mac_step != mac_step)
10067 continue;
10068
10069 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10070 dev_info->rf_type != rf_type)
10071 continue;
10072
10073 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10074 dev_info->cdb != cdb)
10075 continue;
10076
10077 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10078 dev_info->jacket != jacket)
10079 continue;
10080
10081 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10082 dev_info->rf_id != rf_id)
10083 continue;
10084
10085 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10086 dev_info->no_160 != no_160)
10087 continue;
10088
10089 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10090 dev_info->cores != cores)
10091 continue;
10092
10093 return dev_info->cfg;
10094 }
10095
10096 return NULL;
10097 }
10098
10099 static int
10100 iwx_probe(device_t dev)
10101 {
10102 int i;
10103
10104 for (i = 0; i < nitems(iwx_devices); i++) {
10105 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10106 pci_get_device(dev) == iwx_devices[i].device) {
10107 device_set_desc(dev, iwx_devices[i].name);
10108
10109 /*
10110 * Due to significant existing deployments using
10111 * iwlwifi lower the priority of iwx.
10112 *
10113 * This inverts the advice in bus.h where drivers
10114 * supporting newer hardware should return
10115 * BUS_PROBE_DEFAULT and drivers for older devices
10116 * return BUS_PROBE_LOW_PRIORITY.
10117 *
10118 */
10119 return (BUS_PROBE_LOW_PRIORITY);
10120 }
10121 }
10122
10123 return (ENXIO);
10124 }
10125
10126 static int
10127 iwx_attach(device_t dev)
10128 {
10129 struct iwx_softc *sc = device_get_softc(dev);
10130 struct ieee80211com *ic = &sc->sc_ic;
10131 const struct iwx_device_cfg *cfg;
10132 int err;
10133 int txq_i, i, j;
10134 size_t ctxt_info_size;
10135 int rid;
10136 int count;
10137 int error;
10138 sc->sc_dev = dev;
10139 sc->sc_pid = pci_get_device(dev);
10140 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10141
10142 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10143 IWX_LOCK_INIT(sc);
10144 mbufq_init(&sc->sc_snd, ifqmaxlen);
10145 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10146 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10147 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10148 taskqueue_thread_enqueue, &sc->sc_tq);
10149 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10150 if (error != 0) {
10151 device_printf(dev, "can't start taskq thread, error %d\n",
10152 error);
10153 return (ENXIO);
10154 }
10155
10156 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10157 if (sc->sc_cap_off == 0) {
10158 device_printf(dev, "PCIe capability structure not found!\n");
10159 return (ENXIO);
10160 }
10161
10162 /*
10163 * We disable the RETRY_TIMEOUT register (0x41) to keep
10164 * PCI Tx retries from interfering with C3 CPU state.
10165 */
10166 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10167
10168 if (pci_msix_count(dev)) {
10169 sc->sc_msix = 1;
10170 } else {
10171 device_printf(dev, "no MSI-X found\n");
10172 return (ENXIO);
10173 }
10174
10175 pci_enable_busmaster(dev);
10176 rid = PCIR_BAR(0);
10177 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10178 RF_ACTIVE);
10179 if (sc->sc_mem == NULL) {
10180 device_printf(sc->sc_dev, "can't map mem space\n");
10181 return (ENXIO);
10182 }
10183 sc->sc_st = rman_get_bustag(sc->sc_mem);
10184 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10185
10186 count = 1;
10187 rid = 0;
10188 if (pci_alloc_msix(dev, &count) == 0)
10189 rid = 1;
10190 DPRINTF(("%s: count=%d\n", __func__, count));
10191 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10192 (rid != 0 ? 0 : RF_SHAREABLE));
10193 if (sc->sc_irq == NULL) {
10194 device_printf(dev, "can't map interrupt\n");
10195 return (ENXIO);
10196 }
10197 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10198 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10199 if (error != 0) {
10200 device_printf(dev, "can't establish interrupt\n");
10201 return (ENXIO);
10202 }
10203
10204 /* Clear pending interrupts. */
10205 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10206 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10207 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10208
10209 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10210 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10211 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10212 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10213
10214 /*
10215 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10216 * changed, and now the revision step also includes bit 0-1 (no more
10217 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10218 * in the old format.
10219 */
10220 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10221 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10222
10223 switch (sc->sc_pid) {
10224 case PCI_PRODUCT_INTEL_WL_22500_1:
10225 sc->sc_fwname = IWX_CC_A_FW;
10226 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10227 sc->sc_integrated = 0;
10228 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10229 sc->sc_low_latency_xtal = 0;
10230 sc->sc_xtal_latency = 0;
10231 sc->sc_tx_with_siso_diversity = 0;
10232 sc->sc_uhb_supported = 0;
10233 break;
10234 case PCI_PRODUCT_INTEL_WL_22500_2:
10235 case PCI_PRODUCT_INTEL_WL_22500_5:
10236 /* These devices should be QuZ only. */
10237 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10238 device_printf(dev, "unsupported AX201 adapter\n");
10239 return (ENXIO);
10240 }
10241 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10242 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10243 sc->sc_integrated = 1;
10244 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10245 sc->sc_low_latency_xtal = 0;
10246 sc->sc_xtal_latency = 500;
10247 sc->sc_tx_with_siso_diversity = 0;
10248 sc->sc_uhb_supported = 0;
10249 break;
10250 case PCI_PRODUCT_INTEL_WL_22500_3:
10251 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10252 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10253 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10254 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10255 else
10256 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10257 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10258 sc->sc_integrated = 1;
10259 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10260 sc->sc_low_latency_xtal = 0;
10261 sc->sc_xtal_latency = 500;
10262 sc->sc_tx_with_siso_diversity = 0;
10263 sc->sc_uhb_supported = 0;
10264 break;
10265 case PCI_PRODUCT_INTEL_WL_22500_4:
10266 case PCI_PRODUCT_INTEL_WL_22500_7:
10267 case PCI_PRODUCT_INTEL_WL_22500_8:
10268 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10269 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10270 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10271 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10272 else
10273 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10274 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10275 sc->sc_integrated = 1;
10276 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10277 sc->sc_low_latency_xtal = 0;
10278 sc->sc_xtal_latency = 1820;
10279 sc->sc_tx_with_siso_diversity = 0;
10280 sc->sc_uhb_supported = 0;
10281 break;
10282 case PCI_PRODUCT_INTEL_WL_22500_6:
10283 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10284 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10285 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10286 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10287 else
10288 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10289 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10290 sc->sc_integrated = 1;
10291 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10292 sc->sc_low_latency_xtal = 1;
10293 sc->sc_xtal_latency = 12000;
10294 sc->sc_tx_with_siso_diversity = 0;
10295 sc->sc_uhb_supported = 0;
10296 break;
10297 case PCI_PRODUCT_INTEL_WL_22500_9:
10298 case PCI_PRODUCT_INTEL_WL_22500_10:
10299 case PCI_PRODUCT_INTEL_WL_22500_11:
10300 case PCI_PRODUCT_INTEL_WL_22500_13:
10301 /* _14 is an MA device, not yet supported */
10302 case PCI_PRODUCT_INTEL_WL_22500_15:
10303 case PCI_PRODUCT_INTEL_WL_22500_16:
10304 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10305 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10306 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10307 sc->sc_integrated = 0;
10308 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10309 sc->sc_low_latency_xtal = 0;
10310 sc->sc_xtal_latency = 0;
10311 sc->sc_tx_with_siso_diversity = 0;
10312 sc->sc_uhb_supported = 1;
10313 break;
10314 case PCI_PRODUCT_INTEL_WL_22500_12:
10315 case PCI_PRODUCT_INTEL_WL_22500_17:
10316 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10317 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10318 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10319 sc->sc_integrated = 1;
10320 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10321 sc->sc_low_latency_xtal = 1;
10322 sc->sc_xtal_latency = 12000;
10323 sc->sc_tx_with_siso_diversity = 0;
10324 sc->sc_uhb_supported = 0;
10325 sc->sc_imr_enabled = 1;
10326 break;
10327 default:
10328 device_printf(dev, "unknown adapter type\n");
10329 return (ENXIO);
10330 }
10331
10332 cfg = iwx_find_device_cfg(sc);
10333 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10334 if (cfg) {
10335 sc->sc_fwname = cfg->fw_name;
10336 sc->sc_pnvm_name = cfg->pnvm_name;
10337 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10338 sc->sc_uhb_supported = cfg->uhb_supported;
10339 if (cfg->xtal_latency) {
10340 sc->sc_xtal_latency = cfg->xtal_latency;
10341 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10342 }
10343 }
10344
10345 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10346
10347 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10348 sc->sc_umac_prph_offset = 0x300000;
10349 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10350 } else
10351 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10352
10353 /* Allocate DMA memory for loading firmware. */
10354 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10355 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10356 else
10357 ctxt_info_size = sizeof(struct iwx_context_info);
10358 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10359 ctxt_info_size, 1);
10360 if (err) {
10361 device_printf(dev,
10362 "could not allocate memory for loading firmware\n");
10363 return (ENXIO);
10364 }
10365
10366 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10367 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10368 sizeof(struct iwx_prph_scratch), 1);
10369 if (err) {
10370 device_printf(dev,
10371 "could not allocate prph scratch memory\n");
10372 goto fail1;
10373 }
10374
10375 /*
10376 * Allocate prph information. The driver doesn't use this.
10377 * We use the second half of this page to give the device
10378 * some dummy TR/CR tail pointers - which shouldn't be
10379 * necessary as we don't use this, but the hardware still
10380 * reads/writes there and we can't let it go do that with
10381 * a NULL pointer.
10382 */
10383 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10384 ("iwx_prph_info has wrong size"));
10385 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10386 PAGE_SIZE, 1);
10387 if (err) {
10388 device_printf(dev,
10389 "could not allocate prph info memory\n");
10390 goto fail1;
10391 }
10392 }
10393
10394 /* Allocate interrupt cause table (ICT).*/
10395 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10396 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10397 if (err) {
10398 device_printf(dev, "could not allocate ICT table\n");
10399 goto fail1;
10400 }
10401
10402 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10403 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10404 if (err) {
10405 device_printf(dev, "could not allocate TX ring %d\n",
10406 txq_i);
10407 goto fail4;
10408 }
10409 }
10410
10411 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10412 if (err) {
10413 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10414 goto fail4;
10415 }
10416
10417 #ifdef IWX_DEBUG
10418 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10420 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10421
10422 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10423 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10424 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10425 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10426 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10427 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10428
10429 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10430 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10431 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10432
10433 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10434 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10435 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10436 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10437 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10438 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10439 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10440 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10441 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10442 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10443 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10444 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10445 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10447 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10448 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10449 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10450 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10451 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10452 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10453 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10454 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10456 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10457 #endif
10458 ic->ic_softc = sc;
10459 ic->ic_name = device_get_nameunit(sc->sc_dev);
10460 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10461 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10462
10463 /* Set device capabilities. */
10464 ic->ic_caps =
10465 IEEE80211_C_STA |
10466 IEEE80211_C_MONITOR |
10467 IEEE80211_C_WPA | /* WPA/RSN */
10468 IEEE80211_C_WME |
10469 IEEE80211_C_PMGT |
10470 IEEE80211_C_SHSLOT | /* short slot time supported */
10471 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10472 IEEE80211_C_BGSCAN /* capable of bg scanning */
10473 ;
10474 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10475 /* Enable seqno offload */
10476 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
10477
10478 ic->ic_txstream = 2;
10479 ic->ic_rxstream = 2;
10480 ic->ic_htcaps |= IEEE80211_HTC_HT
10481 | IEEE80211_HTCAP_SMPS_OFF
10482 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10483 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10484 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10485 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10486 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10487 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10488
10489 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10490
10491 /*
10492 * XXX: setupcurchan() expects vhtcaps to be non-zero
10493 * https://bugs.freebsd.org/274156
10494 */
10495 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10496 | IEEE80211_VHTCAP_SHORT_GI_80
10497 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10498 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10499 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10500
10501 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10502 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10503 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10504 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10505 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10506 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10507 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10508 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10509 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10510 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10511 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10512
10513 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10514 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10515 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10516 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10517 rxba->sc = sc;
10518 for (j = 0; j < nitems(rxba->entries); j++)
10519 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10520 }
10521
10522 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10523 sc->sc_preinit_hook.ich_arg = sc;
10524 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10525 device_printf(dev,
10526 "config_intrhook_establish failed\n");
10527 goto fail4;
10528 }
10529
10530 return (0);
10531
10532 fail4:
10533 while (--txq_i >= 0)
10534 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10535 iwx_free_rx_ring(sc, &sc->rxq);
10536 if (sc->ict_dma.vaddr != NULL)
10537 iwx_dma_contig_free(&sc->ict_dma);
10538
10539 fail1:
10540 iwx_dma_contig_free(&sc->ctxt_info_dma);
10541 iwx_dma_contig_free(&sc->prph_scratch_dma);
10542 iwx_dma_contig_free(&sc->prph_info_dma);
10543 return (ENXIO);
10544 }
10545
10546 static int
10547 iwx_detach(device_t dev)
10548 {
10549 struct iwx_softc *sc = device_get_softc(dev);
10550 int txq_i;
10551
10552 iwx_stop_device(sc);
10553
10554 taskqueue_drain_all(sc->sc_tq);
10555 taskqueue_free(sc->sc_tq);
10556
10557 ieee80211_ifdetach(&sc->sc_ic);
10558
10559 callout_drain(&sc->watchdog_to);
10560
10561 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10562 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10563 iwx_free_rx_ring(sc, &sc->rxq);
10564
10565 if (sc->sc_fwp != NULL) {
10566 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10567 sc->sc_fwp = NULL;
10568 }
10569
10570 if (sc->sc_pnvm != NULL) {
10571 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10572 sc->sc_pnvm = NULL;
10573 }
10574
10575 if (sc->sc_irq != NULL) {
10576 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10577 bus_release_resource(dev, SYS_RES_IRQ,
10578 rman_get_rid(sc->sc_irq), sc->sc_irq);
10579 pci_release_msi(dev);
10580 }
10581 if (sc->sc_mem != NULL)
10582 bus_release_resource(dev, SYS_RES_MEMORY,
10583 rman_get_rid(sc->sc_mem), sc->sc_mem);
10584
10585 IWX_LOCK_DESTROY(sc);
10586
10587 return (0);
10588 }
10589
10590 static void
10591 iwx_radiotap_attach(struct iwx_softc *sc)
10592 {
10593 struct ieee80211com *ic = &sc->sc_ic;
10594
10595 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10596 "->%s begin\n", __func__);
10597
10598 ieee80211_radiotap_attach(ic,
10599 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10600 IWX_TX_RADIOTAP_PRESENT,
10601 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10602 IWX_RX_RADIOTAP_PRESENT);
10603
10604 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10605 "->%s end\n", __func__);
10606 }
10607
10608 struct ieee80211vap *
10609 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10610 enum ieee80211_opmode opmode, int flags,
10611 const uint8_t bssid[IEEE80211_ADDR_LEN],
10612 const uint8_t mac[IEEE80211_ADDR_LEN])
10613 {
10614 struct iwx_vap *ivp;
10615 struct ieee80211vap *vap;
10616
10617 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10618 return NULL;
10619 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10620 vap = &ivp->iv_vap;
10621 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10622 vap->iv_bmissthreshold = 10; /* override default */
10623 /* Override with driver methods. */
10624 ivp->iv_newstate = vap->iv_newstate;
10625 vap->iv_newstate = iwx_newstate;
10626
10627 ivp->id = IWX_DEFAULT_MACID;
10628 ivp->color = IWX_DEFAULT_COLOR;
10629
10630 ivp->have_wme = TRUE;
10631 ivp->ps_disabled = FALSE;
10632
10633 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10634 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10635
10636 /* h/w crypto support */
10637 vap->iv_key_alloc = iwx_key_alloc;
10638 vap->iv_key_delete = iwx_key_delete;
10639 vap->iv_key_set = iwx_key_set;
10640 vap->iv_key_update_begin = iwx_key_update_begin;
10641 vap->iv_key_update_end = iwx_key_update_end;
10642
10643 ieee80211_ratectl_init(vap);
10644 /* Complete setup. */
10645 ieee80211_vap_attach(vap, ieee80211_media_change,
10646 ieee80211_media_status, mac);
10647 ic->ic_opmode = opmode;
10648
10649 return vap;
10650 }
10651
10652 static void
10653 iwx_vap_delete(struct ieee80211vap *vap)
10654 {
10655 struct iwx_vap *ivp = IWX_VAP(vap);
10656
10657 ieee80211_ratectl_deinit(vap);
10658 ieee80211_vap_detach(vap);
10659 free(ivp, M_80211_VAP);
10660 }
10661
10662 static void
10663 iwx_parent(struct ieee80211com *ic)
10664 {
10665 struct iwx_softc *sc = ic->ic_softc;
10666 IWX_LOCK(sc);
10667
10668 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10669 iwx_stop(sc);
10670 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10671 } else {
10672 iwx_init(sc);
10673 ieee80211_start_all(ic);
10674 }
10675 IWX_UNLOCK(sc);
10676 }
10677
10678 static int
10679 iwx_suspend(device_t dev)
10680 {
10681 struct iwx_softc *sc = device_get_softc(dev);
10682 struct ieee80211com *ic = &sc->sc_ic;
10683
10684 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10685 ieee80211_suspend_all(ic);
10686
10687 iwx_stop(sc);
10688 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10689 }
10690 return (0);
10691 }
10692
10693 static int
10694 iwx_resume(device_t dev)
10695 {
10696 struct iwx_softc *sc = device_get_softc(dev);
10697 struct ieee80211com *ic = &sc->sc_ic;
10698 int err;
10699
10700 /*
10701 * We disable the RETRY_TIMEOUT register (0x41) to keep
10702 * PCI Tx retries from interfering with C3 CPU state.
10703 */
10704 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10705
10706 IWX_LOCK(sc);
10707
10708 err = iwx_init(sc);
10709 if (err) {
10710 iwx_stop_device(sc);
10711 IWX_UNLOCK(sc);
10712 return err;
10713 }
10714
10715 IWX_UNLOCK(sc);
10716
10717 ieee80211_resume_all(ic);
10718 return (0);
10719 }
10720
10721 static void
10722 iwx_scan_start(struct ieee80211com *ic)
10723 {
10724 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10725 struct iwx_softc *sc = ic->ic_softc;
10726 int err;
10727
10728 IWX_LOCK(sc);
10729 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10730 err = iwx_scan(sc);
10731 else
10732 err = iwx_bgscan(ic);
10733 IWX_UNLOCK(sc);
10734 if (err)
10735 ieee80211_cancel_scan(vap);
10736
10737 return;
10738 }
10739
10740 static void
10741 iwx_update_mcast(struct ieee80211com *ic)
10742 {
10743 }
10744
10745 static void
10746 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10747 {
10748 }
10749
10750 static void
10751 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10752 {
10753 }
10754
10755 static void
10756 iwx_scan_end(struct ieee80211com *ic)
10757 {
10758 iwx_endscan(ic->ic_softc);
10759 }
10760
10761 static void
10762 iwx_set_channel(struct ieee80211com *ic)
10763 {
10764 #if 0
10765 struct iwx_softc *sc = ic->ic_softc;
10766 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10767
10768 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10769 iwx_phy_ctxt_task((void *)sc);
10770 #endif
10771 }
10772
10773 static void
10774 iwx_endscan_cb(void *arg, int pending)
10775 {
10776 struct iwx_softc *sc = arg;
10777 struct ieee80211com *ic = &sc->sc_ic;
10778
10779 DPRINTF(("scan ended\n"));
10780 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10781 }
10782
10783 static int
10784 iwx_wme_update(struct ieee80211com *ic)
10785 {
10786 return 0;
10787 }
10788
10789 static int
10790 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10791 const struct ieee80211_bpf_params *params)
10792 {
10793 struct ieee80211com *ic = ni->ni_ic;
10794 struct iwx_softc *sc = ic->ic_softc;
10795 int err;
10796
10797 IWX_LOCK(sc);
10798 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10799 err = iwx_tx(sc, m, ni);
10800 IWX_UNLOCK(sc);
10801 return err;
10802 } else {
10803 IWX_UNLOCK(sc);
10804 return EIO;
10805 }
10806 }
10807
10808 static int
10809 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10810 {
10811 struct iwx_softc *sc = ic->ic_softc;
10812 int error;
10813
10814 // TODO: mbufq_enqueue in iwm
10815 // TODO dequeue in iwm_start, counters, locking
10816 IWX_LOCK(sc);
10817 error = mbufq_enqueue(&sc->sc_snd, m);
10818 if (error) {
10819 IWX_UNLOCK(sc);
10820 return (error);
10821 }
10822
10823 iwx_start(sc);
10824 IWX_UNLOCK(sc);
10825 return (0);
10826 }
10827
10828 static int
10829 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10830 int baparamset, int batimeout, int baseqctl)
10831 {
10832 struct ieee80211com *ic = ni->ni_ic;
10833 struct iwx_softc *sc = ic->ic_softc;
10834 int tid;
10835
10836 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10837 sc->ni_rx_ba[tid].ba_winstart =
10838 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10839 sc->ni_rx_ba[tid].ba_winsize =
10840 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10841 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10842
10843 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10844 tid >= IWX_MAX_TID_COUNT)
10845 return ENOSPC;
10846
10847 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10848 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10849 return EBUSY;
10850 }
10851 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10852
10853 sc->ba_rx.start_tidmask |= (1 << tid);
10854 DPRINTF(("%s: tid=%i\n", __func__, tid));
10855 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10856 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10857 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10858
10859 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10860
10861 // TODO:misha move to ba_task (serialize)
10862 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10863
10864 return (0);
10865 }
10866
10867 static void
10868 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10869 {
10870 return;
10871 }
10872
10873 static int
10874 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10875 int dialogtoken, int baparamset, int batimeout)
10876 {
10877 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10878 int tid;
10879
10880 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10881 DPRINTF(("%s: tid=%i\n", __func__, tid));
10882 sc->ba_tx.start_tidmask |= (1 << tid);
10883 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10884 return 0;
10885 }
10886
10887
10888 static int
10889 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10890 int code, int baparamset, int batimeout)
10891 {
10892 return 0;
10893 }
10894
10895 static void
10896 iwx_key_update_begin(struct ieee80211vap *vap)
10897 {
10898 return;
10899 }
10900
10901 static void
10902 iwx_key_update_end(struct ieee80211vap *vap)
10903 {
10904 return;
10905 }
10906
10907 static int
10908 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10909 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
10910 {
10911
10912 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
10913 return 1;
10914 }
10915 if (!(&vap->iv_nw_keys[0] <= k &&
10916 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
10917 /*
10918 * Not in the global key table, the driver should handle this
10919 * by allocating a slot in the h/w key table/cache. In
10920 * lieu of that return key slot 0 for any unicast key
10921 * request. We disallow the request if this is a group key.
10922 * This default policy does the right thing for legacy hardware
10923 * with a 4 key table. It also handles devices that pass
10924 * packets through untouched when marked with the WEP bit
10925 * and key index 0.
10926 */
10927 if (k->wk_flags & IEEE80211_KEY_GROUP)
10928 return 0;
10929 *keyix = 0; /* NB: use key index 0 for ucast key */
10930 } else {
10931 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
10932 }
10933 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
10934 return 1;
10935 }
10936
10937 static int
10938 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
10939 {
10940 struct ieee80211com *ic = vap->iv_ic;
10941 struct iwx_softc *sc = ic->ic_softc;
10942 struct iwx_add_sta_key_cmd cmd;
10943 uint32_t status;
10944 int err;
10945 int id;
10946
10947 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
10948 return 1;
10949 }
10950
10951 IWX_LOCK(sc);
10952 /*
10953 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
10954 * Currently we only implement station mode where 'ni' is always
10955 * ic->ic_bss so there is no need to validate arguments beyond this:
10956 */
10957
10958 memset(&cmd, 0, sizeof(cmd));
10959
10960 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10961 DPRINTF(("%s: adding group key\n", __func__));
10962 } else {
10963 DPRINTF(("%s: adding key\n", __func__));
10964 }
10965 if (k >= &vap->iv_nw_keys[0] &&
10966 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
10967 id = (k - vap->iv_nw_keys);
10968 else
10969 id = (0);
10970 DPRINTF(("%s: setting keyid=%i\n", __func__, id));
10971 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
10972 IWX_STA_KEY_FLG_WEP_KEY_MAP |
10973 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
10974 IWX_STA_KEY_FLG_KEYID_MSK));
10975 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10976 cmd.common.key_offset = 1;
10977 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
10978 } else {
10979 cmd.common.key_offset = 0;
10980 }
10981 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
10982 k->wk_keylen));
10983 DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
10984 for (int i=0; i<k->wk_keylen; i++) {
10985 DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
10986 }
10987 cmd.common.sta_id = IWX_STATION_ID;
10988
10989 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
10990 DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
10991
10992 status = IWX_ADD_STA_SUCCESS;
10993 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
10994 &status);
10995 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
10996 err = EIO;
10997 if (err) {
10998 printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
10999 IWX_UNLOCK(sc);
11000 return err;
11001 } else
11002 DPRINTF(("%s: key added successfully\n", __func__));
11003 IWX_UNLOCK(sc);
11004 return 1;
11005 }
11006
11007 static int
11008 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11009 {
11010 return 1;
11011 }
11012
11013 static device_method_t iwx_pci_methods[] = {
11014 /* Device interface */
11015 DEVMETHOD(device_probe, iwx_probe),
11016 DEVMETHOD(device_attach, iwx_attach),
11017 DEVMETHOD(device_detach, iwx_detach),
11018 DEVMETHOD(device_suspend, iwx_suspend),
11019 DEVMETHOD(device_resume, iwx_resume),
11020
11021 DEVMETHOD_END
11022 };
11023
11024 static driver_t iwx_pci_driver = {
11025 "iwx",
11026 iwx_pci_methods,
11027 sizeof (struct iwx_softc)
11028 };
11029
11030 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11031 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11032 iwx_devices, nitems(iwx_devices));
11033 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11034 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11035 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11036