xref: /freebsd/sys/dev/iwx/if_iwx.c (revision 92f340d137ba5d6db7610ba1dae35842e2c9c8ea)
1 /*-
2  * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3  */
4 
5 /*	$OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $	*/
6 
7 /*
8  *
9  * Copyright (c) 2025 The FreeBSD Foundation
10  *
11  * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12  * under sponsorship from the FreeBSD Foundation.
13  *
14  * Permission to use, copy, modify, and distribute this software for any
15  * purpose with or without fee is hereby granted, provided that the above
16  * copyright notice and this permission notice appear in all copies.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25  *
26  */
27 
28 /*-
29  * Copyright (c) 2024 Future Crew, LLC
30  *   Author: Mikhail Pchelin <misha@FreeBSD.org>
31  *
32  * Permission to use, copy, modify, and distribute this software for any
33  * purpose with or without fee is hereby granted, provided that the above
34  * copyright notice and this permission notice appear in all copies.
35  *
36  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43  */
44 
45 /*
46  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47  *   Author: Stefan Sperling <stsp@openbsd.org>
48  * Copyright (c) 2014 Fixup Software Ltd.
49  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50  *
51  * Permission to use, copy, modify, and distribute this software for any
52  * purpose with or without fee is hereby granted, provided that the above
53  * copyright notice and this permission notice appear in all copies.
54  *
55  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62  */
63 
64 /*-
65  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66  * which were used as the reference documentation for this implementation.
67  *
68  ******************************************************************************
69  *
70  * This file is provided under a dual BSD/GPLv2 license.  When using or
71  * redistributing this file, you may do so under either license.
72  *
73  * GPL LICENSE SUMMARY
74  *
75  * Copyright(c) 2017 Intel Deutschland GmbH
76  * Copyright(c) 2018 - 2019 Intel Corporation
77  *
78  * This program is free software; you can redistribute it and/or modify
79  * it under the terms of version 2 of the GNU General Public License as
80  * published by the Free Software Foundation.
81  *
82  * This program is distributed in the hope that it will be useful, but
83  * WITHOUT ANY WARRANTY; without even the implied warranty of
84  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
85  * General Public License for more details.
86  *
87  * BSD LICENSE
88  *
89  * Copyright(c) 2017 Intel Deutschland GmbH
90  * Copyright(c) 2018 - 2019 Intel Corporation
91  * All rights reserved.
92  *
93  * Redistribution and use in source and binary forms, with or without
94  * modification, are permitted provided that the following conditions
95  * are met:
96  *
97  *  * Redistributions of source code must retain the above copyright
98  *    notice, this list of conditions and the following disclaimer.
99  *  * Redistributions in binary form must reproduce the above copyright
100  *    notice, this list of conditions and the following disclaimer in
101  *    the documentation and/or other materials provided with the
102  *    distribution.
103  *  * Neither the name Intel Corporation nor the names of its
104  *    contributors may be used to endorse or promote products derived
105  *    from this software without specific prior written permission.
106  *
107  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118  *
119  *****************************************************************************
120  */
121 
122 /*-
123  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124  *
125  * Permission to use, copy, modify, and distribute this software for any
126  * purpose with or without fee is hereby granted, provided that the above
127  * copyright notice and this permission notice appear in all copies.
128  *
129  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136  */
137 
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157 
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161 
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164 
165 #include <net/bpf.h>
166 
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171 
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174 
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180 
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183 
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186 
187 #define DEVNAME(_sc)    (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189 
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192 
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195 
196 #include <dev/iwx/if_iwx_debug.h>
197 
198 #define PCI_VENDOR_INTEL		0x8086
199 #define	PCI_PRODUCT_INTEL_WL_22500_1	0x2723		/* Wi-Fi 6 AX200 */
200 #define	PCI_PRODUCT_INTEL_WL_22500_2	0x02f0		/* Wi-Fi 6 AX201 */
201 #define	PCI_PRODUCT_INTEL_WL_22500_3	0xa0f0		/* Wi-Fi 6 AX201 */
202 #define	PCI_PRODUCT_INTEL_WL_22500_4	0x34f0		/* Wi-Fi 6 AX201 */
203 #define	PCI_PRODUCT_INTEL_WL_22500_5	0x06f0		/* Wi-Fi 6 AX201 */
204 #define	PCI_PRODUCT_INTEL_WL_22500_6	0x43f0		/* Wi-Fi 6 AX201 */
205 #define	PCI_PRODUCT_INTEL_WL_22500_7	0x3df0		/* Wi-Fi 6 AX201 */
206 #define	PCI_PRODUCT_INTEL_WL_22500_8	0x4df0		/* Wi-Fi 6 AX201 */
207 #define	PCI_PRODUCT_INTEL_WL_22500_9	0x2725		/* Wi-Fi 6 AX210 */
208 #define	PCI_PRODUCT_INTEL_WL_22500_10	0x2726		/* Wi-Fi 6 AX211 */
209 #define	PCI_PRODUCT_INTEL_WL_22500_11	0x51f0		/* Wi-Fi 6 AX211 */
210 #define	PCI_PRODUCT_INTEL_WL_22500_12	0x7a70		/* Wi-Fi 6 AX211 */
211 #define	PCI_PRODUCT_INTEL_WL_22500_13	0x7af0		/* Wi-Fi 6 AX211 */
212 #define	PCI_PRODUCT_INTEL_WL_22500_14	0x7e40		/* Wi-Fi 6 AX210 */
213 #define	PCI_PRODUCT_INTEL_WL_22500_15	0x7f70		/* Wi-Fi 6 AX211 */
214 #define	PCI_PRODUCT_INTEL_WL_22500_16	0x54f0		/* Wi-Fi 6 AX211 */
215 #define	PCI_PRODUCT_INTEL_WL_22500_17	0x51f1		/* Wi-Fi 6 AX211 */
216 
217 static const struct iwx_devices {
218 	uint16_t		device;
219 	char			*name;
220 } iwx_devices[] = {
221 	{ PCI_PRODUCT_INTEL_WL_22500_1,		"Wi-Fi 6 AX200"	},
222 	{ PCI_PRODUCT_INTEL_WL_22500_2,		"Wi-Fi 6 AX201"	},
223 	{ PCI_PRODUCT_INTEL_WL_22500_3,		"Wi-Fi 6 AX201"	},
224 	{ PCI_PRODUCT_INTEL_WL_22500_4,		"Wi-Fi 6 AX201"	},
225 	{ PCI_PRODUCT_INTEL_WL_22500_5,		"Wi-Fi 6 AX201"	},
226 	{ PCI_PRODUCT_INTEL_WL_22500_6,		"Wi-Fi 6 AX201"	},
227 	{ PCI_PRODUCT_INTEL_WL_22500_7,		"Wi-Fi 6 AX201"	},
228 	{ PCI_PRODUCT_INTEL_WL_22500_8,		"Wi-Fi 6 AX201"	},
229 	{ PCI_PRODUCT_INTEL_WL_22500_9,		"Wi-Fi 6 AX210"	},
230 	{ PCI_PRODUCT_INTEL_WL_22500_10,	"Wi-Fi 6 AX211"	},
231 	{ PCI_PRODUCT_INTEL_WL_22500_11,	"Wi-Fi 6 AX211"	},
232 	{ PCI_PRODUCT_INTEL_WL_22500_12,	"Wi-Fi 6 AX211"	},
233 	{ PCI_PRODUCT_INTEL_WL_22500_13,	"Wi-Fi 6 AX211"	},
234 	{ PCI_PRODUCT_INTEL_WL_22500_14,	"Wi-Fi 6 AX210"	},
235 	{ PCI_PRODUCT_INTEL_WL_22500_15,	"Wi-Fi 6 AX211"	},
236 	{ PCI_PRODUCT_INTEL_WL_22500_16,	"Wi-Fi 6 AX211"	},
237 	{ PCI_PRODUCT_INTEL_WL_22500_17,	"Wi-Fi 6 AX211"	},
238 };
239 
240 static const uint8_t iwx_nvm_channels_8000[] = {
241 	/* 2.4 GHz */
242 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
243 	/* 5 GHz */
244 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
245 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
246 	149, 153, 157, 161, 165, 169, 173, 177, 181
247 };
248 
249 static const uint8_t iwx_nvm_channels_uhb[] = {
250 	/* 2.4 GHz */
251 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
252 	/* 5 GHz */
253 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
254 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
255 	149, 153, 157, 161, 165, 169, 173, 177, 181,
256 	/* 6-7 GHz */
257 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
258 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
259 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
260 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
261 };
262 
263 #define IWX_NUM_2GHZ_CHANNELS	14
264 #define IWX_NUM_5GHZ_CHANNELS	37
265 
266 const struct iwx_rate {
267 	uint16_t rate;
268 	uint8_t plcp;
269 	uint8_t ht_plcp;
270 } iwx_rates[] = {
271 		/* Legacy */		/* HT */
272 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
273 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
274 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
275 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
277 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
278 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
279 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
280 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
281 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
282 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
283 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
284 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
285 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
286 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
287 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
288 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
289 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
290 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
291 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
292 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
293 };
294 #define IWX_RIDX_CCK	0
295 #define IWX_RIDX_OFDM	4
296 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
297 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
298 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
299 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
300 
301 /* Convert an MCS index into an iwx_rates[] index. */
302 const int iwx_mcs2ridx[] = {
303 	IWX_RATE_MCS_0_INDEX,
304 	IWX_RATE_MCS_1_INDEX,
305 	IWX_RATE_MCS_2_INDEX,
306 	IWX_RATE_MCS_3_INDEX,
307 	IWX_RATE_MCS_4_INDEX,
308 	IWX_RATE_MCS_5_INDEX,
309 	IWX_RATE_MCS_6_INDEX,
310 	IWX_RATE_MCS_7_INDEX,
311 	IWX_RATE_MCS_8_INDEX,
312 	IWX_RATE_MCS_9_INDEX,
313 	IWX_RATE_MCS_10_INDEX,
314 	IWX_RATE_MCS_11_INDEX,
315 	IWX_RATE_MCS_12_INDEX,
316 	IWX_RATE_MCS_13_INDEX,
317 	IWX_RATE_MCS_14_INDEX,
318 	IWX_RATE_MCS_15_INDEX,
319 };
320 
321 static uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
322 static uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
323 static int	iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
324 #if 0
325 static int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
326 static int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
327 #endif
328 static int	iwx_apply_debug_destination(struct iwx_softc *);
329 static void	iwx_set_ltr(struct iwx_softc *);
330 static int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
331 static int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
332 	    const struct iwx_fw_sects *);
333 static void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
334 static void	iwx_ctxt_info_free_paging(struct iwx_softc *);
335 static int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
336 	    struct iwx_context_info_dram *);
337 static void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
338 static int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
339 	    const uint8_t *, size_t);
340 static int	iwx_set_default_calib(struct iwx_softc *, const void *);
341 static void	iwx_fw_info_free(struct iwx_fw_info *);
342 static int	iwx_read_firmware(struct iwx_softc *);
343 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
344 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
345 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
346 static void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
347 static void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
348 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
349 static void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
351 static int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
352 static int	iwx_nic_lock(struct iwx_softc *);
353 static void	iwx_nic_assert_locked(struct iwx_softc *);
354 static void	iwx_nic_unlock(struct iwx_softc *);
355 static int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
356 	    uint32_t);
357 static int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
358 static int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
359 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
360 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
361     bus_size_t, bus_size_t);
362 static void iwx_dma_contig_free(struct iwx_dma_info *);
363 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
364 static void	iwx_disable_rx_dma(struct iwx_softc *);
365 static void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
367 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
368 static void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
369 static void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
370 static void	iwx_enable_rfkill_int(struct iwx_softc *);
371 static int	iwx_check_rfkill(struct iwx_softc *);
372 static void	iwx_enable_interrupts(struct iwx_softc *);
373 static void	iwx_enable_fwload_interrupt(struct iwx_softc *);
374 #if 0
375 static void	iwx_restore_interrupts(struct iwx_softc *);
376 #endif
377 static void	iwx_disable_interrupts(struct iwx_softc *);
378 static void	iwx_ict_reset(struct iwx_softc *);
379 static int	iwx_set_hw_ready(struct iwx_softc *);
380 static int	iwx_prepare_card_hw(struct iwx_softc *);
381 static int	iwx_force_power_gating(struct iwx_softc *);
382 static void	iwx_apm_config(struct iwx_softc *);
383 static int	iwx_apm_init(struct iwx_softc *);
384 static void	iwx_apm_stop(struct iwx_softc *);
385 static int	iwx_allow_mcast(struct iwx_softc *);
386 static void	iwx_init_msix_hw(struct iwx_softc *);
387 static void	iwx_conf_msix_hw(struct iwx_softc *, int);
388 static int	iwx_clear_persistence_bit(struct iwx_softc *);
389 static int	iwx_start_hw(struct iwx_softc *);
390 static void	iwx_stop_device(struct iwx_softc *);
391 static void	iwx_nic_config(struct iwx_softc *);
392 static int	iwx_nic_rx_init(struct iwx_softc *);
393 static int	iwx_nic_init(struct iwx_softc *);
394 static int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
395 static int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
396 static void	iwx_post_alive(struct iwx_softc *);
397 static int	iwx_schedule_session_protection(struct iwx_softc *,
398     struct iwx_node *, uint32_t);
399 static void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
400 static void	iwx_init_channel_map(struct ieee80211com *, int, int *,
401     struct ieee80211_channel[]);
402 static int	iwx_mimo_enabled(struct iwx_softc *);
403 static void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
404 	    uint16_t);
405 static void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
406 static void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
407 	    uint16_t, uint16_t, int, int);
408 static void	iwx_sta_tx_agg_start(struct iwx_softc *,
409     struct ieee80211_node *, uint8_t);
410 static void	iwx_ba_rx_task(void *, int);
411 static void	iwx_ba_tx_task(void *, int);
412 static void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
413 static int	iwx_is_valid_mac_addr(const uint8_t *);
414 static void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
415 static int	iwx_nvm_get(struct iwx_softc *);
416 static int	iwx_load_firmware(struct iwx_softc *);
417 static int	iwx_start_fw(struct iwx_softc *);
418 static int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
419 static int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
420 static void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
421 static int	iwx_load_pnvm(struct iwx_softc *);
422 static int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
423 static int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
424 static int	iwx_load_ucode_wait_alive(struct iwx_softc *);
425 static int	iwx_send_dqa_cmd(struct iwx_softc *);
426 static int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
427 static int	iwx_config_ltr(struct iwx_softc *);
428 static void 	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
429 static int 	iwx_rx_addbuf(struct iwx_softc *, int, int);
430 static int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
431 static void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
432     struct iwx_rx_data *);
433 static int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
434 static int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
435 #if 0
436 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
437 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
438 #endif
439 static void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
440     int, int, uint32_t, uint8_t);
441 static void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
442 static void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
443     struct iwx_tx_data *);
444 static void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
445 static void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
446 	    struct iwx_rx_data *);
447 static void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
448 static void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
449     struct iwx_rx_data *);
450 static int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
451 static uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
452 static int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
453     struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
454 #if 0
455 static int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
456     uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
457 #endif
458 static int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
459     uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
460 static int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
461 static int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
462 	    const void *);
463 static int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
464 	    uint32_t *);
465 static int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
466 	    const void *, uint32_t *);
467 static void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
468 static void	iwx_cmd_done(struct iwx_softc *, int, int, int);
469 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
470 static uint32_t iwx_fw_rateidx_cck(uint8_t);
471 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
472     struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
473     struct mbuf *);
474 static void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
475 	    uint16_t, uint16_t);
476 static int	iwx_tx(struct iwx_softc *, struct mbuf *,
477     struct ieee80211_node *);
478 static int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
479 static int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
480 static int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
481 static int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
482 	    struct iwx_beacon_filter_cmd *);
483 static int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
484     int);
485 static void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
486 	    struct iwx_mac_power_cmd *);
487 static int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
488 static int	iwx_power_update_device(struct iwx_softc *);
489 #if 0
490 static int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
491 #endif
492 static int	iwx_disable_beacon_filter(struct iwx_softc *);
493 static int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
494 static int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
495 static int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
496 static int	iwx_fill_probe_req(struct iwx_softc *,
497     struct iwx_scan_probe_req *);
498 static int	iwx_config_umac_scan_reduced(struct iwx_softc *);
499 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
500 static void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
501 	    struct iwx_scan_general_params_v10 *, int);
502 static void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
503 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
504 static void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
505 	    struct iwx_scan_channel_params_v6 *, uint32_t, int);
506 static int	iwx_umac_scan_v14(struct iwx_softc *, int);
507 static void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
508 static uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
509 static int	iwx_rval2ridx(int);
510 static void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
511     int *);
512 static void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
513 	    struct iwx_mac_ctx_cmd *, uint32_t);
514 static void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
515 	    struct iwx_mac_data_sta *, int);
516 static int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
517     uint32_t, int);
518 static int	iwx_clear_statistics(struct iwx_softc *);
519 static int	iwx_scan(struct iwx_softc *);
520 static int	iwx_bgscan(struct ieee80211com *);
521 static int	iwx_enable_mgmt_queue(struct iwx_softc *);
522 static int	iwx_disable_mgmt_queue(struct iwx_softc *);
523 static int	iwx_rs_rval2idx(uint8_t);
524 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
525     int);
526 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
527 static int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
528 static int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
529 static int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
530 static int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
531 	    uint8_t, uint8_t);
532 static int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
533 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
534 	    uint8_t);
535 static int	iwx_auth(struct ieee80211vap *, struct iwx_softc *);
536 static int	iwx_deauth(struct iwx_softc *);
537 static int	iwx_run(struct ieee80211vap *, struct iwx_softc *);
538 static int	iwx_run_stop(struct iwx_softc *);
539 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
540     const uint8_t[IEEE80211_ADDR_LEN]);
541 #if 0
542 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
543 	    struct ieee80211_key *);
544 void	iwx_setkey_task(void *);
545 void	iwx_delete_key(struct ieee80211com *,
546 	    struct ieee80211_node *, struct ieee80211_key *);
547 #endif
548 static int	iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
549 static void	iwx_endscan(struct iwx_softc *);
550 static void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
551 	    struct ieee80211_node *);
552 static int	iwx_sf_config(struct iwx_softc *, int);
553 static int	iwx_send_bt_init_conf(struct iwx_softc *);
554 static int	iwx_send_soc_conf(struct iwx_softc *);
555 static int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
556 static int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
557 static int	iwx_init_hw(struct iwx_softc *);
558 static int	iwx_init(struct iwx_softc *);
559 static void	iwx_stop(struct iwx_softc *);
560 static void	iwx_watchdog(void *);
561 static const char *iwx_desc_lookup(uint32_t);
562 static void	iwx_nic_error(struct iwx_softc *);
563 static void	iwx_dump_driver_status(struct iwx_softc *);
564 static void	iwx_nic_umac_error(struct iwx_softc *);
565 static void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
566 static int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
567 static void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
568 	    struct mbuf *);
569 static void	iwx_notif_intr(struct iwx_softc *);
570 #if 0
571 /* XXX-THJ - I don't have hardware for this */
572 static int	iwx_intr(void *);
573 #endif
574 static void	iwx_intr_msix(void *);
575 static int	iwx_preinit(struct iwx_softc *);
576 static void	iwx_attach_hook(void *);
577 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
578 static int	iwx_probe(device_t);
579 static int	iwx_attach(device_t);
580 static int	iwx_detach(device_t);
581 
582 /* FreeBSD specific glue */
583 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
584     { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
585 
586 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
587     { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
588 
589 #if IWX_DEBUG
590 #define DPRINTF(x)	do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
591 #else
592 #define DPRINTF(x)	do { ; } while (0)
593 #endif
594 
595 /* FreeBSD specific functions */
596 static struct	ieee80211vap * iwx_vap_create(struct ieee80211com *,
597     const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
598     const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
599 static void	iwx_vap_delete(struct ieee80211vap *);
600 static void	iwx_parent(struct ieee80211com *);
601 static void	iwx_scan_start(struct ieee80211com *);
602 static void	iwx_scan_end(struct ieee80211com *);
603 static void	iwx_update_mcast(struct ieee80211com *ic);
604 static void	iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
605 static void	iwx_scan_mindwell(struct ieee80211_scan_state *);
606 static void	iwx_set_channel(struct ieee80211com *);
607 static void	iwx_endscan_cb(void *, int );
608 static int	iwx_wme_update(struct ieee80211com *);
609 static int	iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
610     const struct ieee80211_bpf_params *);
611 static int	iwx_transmit(struct ieee80211com *, struct mbuf *);
612 static void	iwx_start(struct iwx_softc *);
613 static int	iwx_ampdu_rx_start(struct ieee80211_node *,
614     struct ieee80211_rx_ampdu *, int, int, int);
615 static void	iwx_ampdu_rx_stop(struct ieee80211_node *,
616     struct ieee80211_rx_ampdu *);
617 static int	iwx_addba_request(struct ieee80211_node *,
618     struct ieee80211_tx_ampdu *, int, int, int);
619 static int	iwx_addba_response(struct ieee80211_node *,
620     struct ieee80211_tx_ampdu *, int, int, int);
621 static void	iwx_key_update_begin(struct ieee80211vap *);
622 static void	iwx_key_update_end(struct ieee80211vap *);
623 static int	iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
624     ieee80211_keyix *,ieee80211_keyix *);
625 static int	iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
626 static int	iwx_key_delete(struct ieee80211vap *,
627     const struct ieee80211_key *);
628 static int	iwx_suspend(device_t);
629 static int	iwx_resume(device_t);
630 static void	iwx_radiotap_attach(struct iwx_softc *);
631 
632 /* OpenBSD compat defines */
633 #define IEEE80211_HTOP0_SCO_SCN 0
634 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
635 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
636 
637 #define IEEE80211_HT_RATESET_SISO 0
638 #define IEEE80211_HT_RATESET_MIMO2 2
639 
640 const struct ieee80211_rateset ieee80211_std_rateset_11a =
641 	{ 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
642 
643 const struct ieee80211_rateset ieee80211_std_rateset_11b =
644 	{ 4, { 2, 4, 11, 22 } };
645 
646 const struct ieee80211_rateset ieee80211_std_rateset_11g =
647 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
648 
649 inline int
650 ieee80211_has_addr4(const struct ieee80211_frame *wh)
651 {
652 	return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
653 	    IEEE80211_FC1_DIR_DSTODS;
654 }
655 
656 static uint8_t
657 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
658 {
659 	const struct iwx_fw_cmd_version *entry;
660 	int i;
661 
662 	for (i = 0; i < sc->n_cmd_versions; i++) {
663 		entry = &sc->cmd_versions[i];
664 		if (entry->group == grp && entry->cmd == cmd)
665 			return entry->cmd_ver;
666 	}
667 
668 	return IWX_FW_CMD_VER_UNKNOWN;
669 }
670 
671 uint8_t
672 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
673 {
674 	const struct iwx_fw_cmd_version *entry;
675 	int i;
676 
677 	for (i = 0; i < sc->n_cmd_versions; i++) {
678 		entry = &sc->cmd_versions[i];
679 		if (entry->group == grp && entry->cmd == cmd)
680 			return entry->notif_ver;
681 	}
682 
683 	return IWX_FW_CMD_VER_UNKNOWN;
684 }
685 
686 static int
687 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
688 {
689 	const struct iwx_fw_cscheme_list *l = (const void *)data;
690 
691 	if (dlen < sizeof(*l) ||
692 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
693 		return EINVAL;
694 
695 	/* we don't actually store anything for now, always use s/w crypto */
696 
697 	return 0;
698 }
699 
700 static int
701 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
702     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
703 {
704 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
705 	if (err) {
706 		printf("%s: could not allocate context info DMA memory\n",
707 		    DEVNAME(sc));
708 		return err;
709 	}
710 
711 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
712 
713 	return 0;
714 }
715 
716 static void
717 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
718 {
719 	struct iwx_self_init_dram *dram = &sc->init_dram;
720 	int i;
721 
722 	if (!dram->paging)
723 		return;
724 
725 	/* free paging*/
726 	for (i = 0; i < dram->paging_cnt; i++)
727 		iwx_dma_contig_free(&dram->paging[i]);
728 
729 	free(dram->paging, M_DEVBUF);
730 	dram->paging_cnt = 0;
731 	dram->paging = NULL;
732 }
733 
734 static int
735 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
736 {
737 	int i = 0;
738 
739 	while (start < fws->fw_count &&
740 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
741 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
742 		start++;
743 		i++;
744 	}
745 
746 	return i;
747 }
748 
749 static int
750 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
751     struct iwx_context_info_dram *ctxt_dram)
752 {
753 	struct iwx_self_init_dram *dram = &sc->init_dram;
754 	int i, ret, fw_cnt = 0;
755 
756 	KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
757 
758 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
759 	/* add 1 due to separator */
760 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
761 	/* add 2 due to separators */
762 	dram->paging_cnt = iwx_get_num_sections(fws,
763 	    dram->lmac_cnt + dram->umac_cnt + 2);
764 
765 	IWX_UNLOCK(sc);
766 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
767 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
768 	if (!dram->fw) {
769 		printf("%s: could not allocate memory for firmware sections\n",
770 		    DEVNAME(sc));
771 		IWX_LOCK(sc);
772 		return ENOMEM;
773 	}
774 
775 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
776 	    M_DEVBUF, M_ZERO | M_WAITOK);
777 	IWX_LOCK(sc);
778 	if (!dram->paging) {
779 		printf("%s: could not allocate memory for firmware paging\n",
780 		    DEVNAME(sc));
781 		return ENOMEM;
782 	}
783 
784 	/* initialize lmac sections */
785 	for (i = 0; i < dram->lmac_cnt; i++) {
786 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
787 						   &dram->fw[fw_cnt]);
788 		if (ret)
789 			return ret;
790 		ctxt_dram->lmac_img[i] =
791 			htole64(dram->fw[fw_cnt].paddr);
792 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
793 		    "%s: firmware LMAC section %d at 0x%llx size %lld\n",
794 		    __func__, i,
795 		    (unsigned long long)dram->fw[fw_cnt].paddr,
796 		    (unsigned long long)dram->fw[fw_cnt].size);
797 		fw_cnt++;
798 	}
799 
800 	/* initialize umac sections */
801 	for (i = 0; i < dram->umac_cnt; i++) {
802 		/* access FW with +1 to make up for lmac separator */
803 		ret = iwx_ctxt_info_alloc_dma(sc,
804 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
805 		if (ret)
806 			return ret;
807 		ctxt_dram->umac_img[i] =
808 			htole64(dram->fw[fw_cnt].paddr);
809 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
810 		    "%s: firmware UMAC section %d at 0x%llx size %lld\n",
811 		    __func__, i,
812 		    (unsigned long long)dram->fw[fw_cnt].paddr,
813 		    (unsigned long long)dram->fw[fw_cnt].size);
814 		fw_cnt++;
815 	}
816 
817 	/*
818 	 * Initialize paging.
819 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
820 	 * stored separately.
821 	 * This is since the timing of its release is different -
822 	 * while fw memory can be released on alive, the paging memory can be
823 	 * freed only when the device goes down.
824 	 * Given that, the logic here in accessing the fw image is a bit
825 	 * different - fw_cnt isn't changing so loop counter is added to it.
826 	 */
827 	for (i = 0; i < dram->paging_cnt; i++) {
828 		/* access FW with +2 to make up for lmac & umac separators */
829 		int fw_idx = fw_cnt + i + 2;
830 
831 		ret = iwx_ctxt_info_alloc_dma(sc,
832 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
833 		if (ret)
834 			return ret;
835 
836 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
837 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
838 		    "%s: firmware paging section %d at 0x%llx size %lld\n",
839 		    __func__, i,
840 		    (unsigned long long)dram->paging[i].paddr,
841 		    (unsigned long long)dram->paging[i].size);
842 	}
843 
844 	return 0;
845 }
846 
847 static void
848 iwx_fw_version_str(char *buf, size_t bufsize,
849     uint32_t major, uint32_t minor, uint32_t api)
850 {
851 	/*
852 	 * Starting with major version 35 the Linux driver prints the minor
853 	 * version in hexadecimal.
854 	 */
855 	if (major >= 35)
856 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
857 	else
858 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
859 }
860 #if 0
861 static int
862 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
863     uint8_t min_power)
864 {
865 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
866 	uint32_t size = 0;
867 	uint8_t power;
868 	int err;
869 
870 	if (fw_mon->size)
871 		return 0;
872 
873 	for (power = max_power; power >= min_power; power--) {
874 		size = (1 << power);
875 
876 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
877 		if (err)
878 			continue;
879 
880 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
881 		    "%s: allocated 0x%08x bytes for firmware monitor.\n",
882 		    DEVNAME(sc), size);
883 		break;
884 	}
885 
886 	if (err) {
887 		fw_mon->size = 0;
888 		return err;
889 	}
890 
891 	if (power != max_power)
892 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
893 		    "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
894 		    DEVNAME(sc), (unsigned long)(1 << (power - 10)),
895 		    (unsigned long)(1 << (max_power - 10)));
896 
897 	return 0;
898 }
899 
900 static int
901 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
902 {
903 	if (!max_power) {
904 		/* default max_power is maximum */
905 		max_power = 26;
906 	} else {
907 		max_power += 11;
908 	}
909 
910 	if (max_power > 26) {
911 		 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
912 		     "%s: External buffer size for monitor is too big %d, "
913 		     "check the FW TLV\n", DEVNAME(sc), max_power);
914 		return 0;
915 	}
916 
917 	if (sc->fw_mon.size)
918 		return 0;
919 
920 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
921 }
922 #endif
923 
924 static int
925 iwx_apply_debug_destination(struct iwx_softc *sc)
926 {
927 #if 0
928 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
929 	int i, err;
930 	uint8_t mon_mode, size_power, base_shift, end_shift;
931 	uint32_t base_reg, end_reg;
932 
933 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
934 	mon_mode = dest_v1->monitor_mode;
935 	size_power = dest_v1->size_power;
936 	base_reg = le32toh(dest_v1->base_reg);
937 	end_reg = le32toh(dest_v1->end_reg);
938 	base_shift = dest_v1->base_shift;
939 	end_shift = dest_v1->end_shift;
940 
941 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
942 
943 	if (mon_mode == EXTERNAL_MODE) {
944 		err = iwx_alloc_fw_monitor(sc, size_power);
945 		if (err)
946 			return err;
947 	}
948 
949 	if (!iwx_nic_lock(sc))
950 		return EBUSY;
951 
952 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
953 		uint32_t addr, val;
954 		uint8_t op;
955 
956 		addr = le32toh(dest_v1->reg_ops[i].addr);
957 		val = le32toh(dest_v1->reg_ops[i].val);
958 		op = dest_v1->reg_ops[i].op;
959 
960 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
961 		switch (op) {
962 		case CSR_ASSIGN:
963 			IWX_WRITE(sc, addr, val);
964 			break;
965 		case CSR_SETBIT:
966 			IWX_SETBITS(sc, addr, (1 << val));
967 			break;
968 		case CSR_CLEARBIT:
969 			IWX_CLRBITS(sc, addr, (1 << val));
970 			break;
971 		case PRPH_ASSIGN:
972 			iwx_write_prph(sc, addr, val);
973 			break;
974 		case PRPH_SETBIT:
975 			err = iwx_set_bits_prph(sc, addr, (1 << val));
976 			if (err)
977 				return err;
978 			break;
979 		case PRPH_CLEARBIT:
980 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
981 			if (err)
982 				return err;
983 			break;
984 		case PRPH_BLOCKBIT:
985 			if (iwx_read_prph(sc, addr) & (1 << val))
986 				goto monitor;
987 			break;
988 		default:
989 			DPRINTF(("%s: FW debug - unknown OP %d\n",
990 			    DEVNAME(sc), op));
991 			break;
992 		}
993 	}
994 
995 monitor:
996 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
997 		iwx_write_prph(sc, le32toh(base_reg),
998 		    sc->fw_mon.paddr >> base_shift);
999 		iwx_write_prph(sc, end_reg,
1000 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1001 		    >> end_shift);
1002 	}
1003 
1004 	iwx_nic_unlock(sc);
1005 	return 0;
1006 #else
1007 	return 0;
1008 #endif
1009 }
1010 
1011 static void
1012 iwx_set_ltr(struct iwx_softc *sc)
1013 {
1014 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1015 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1016 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1017 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1018 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1019 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1020 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1021 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1022 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1023 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1024 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1025 
1026 	/*
1027 	 * To workaround hardware latency issues during the boot process,
1028 	 * initialize the LTR to ~250 usec (see ltr_val above).
1029 	 * The firmware initializes this again later (to a smaller value).
1030 	 */
1031 	if (!sc->sc_integrated) {
1032 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1033 	} else if (sc->sc_integrated &&
1034 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1035 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1036 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
1037 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1038 	}
1039 }
1040 
1041 int
1042 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1043 {
1044 	struct iwx_context_info *ctxt_info;
1045 	struct iwx_context_info_rbd_cfg *rx_cfg;
1046 	uint32_t control_flags = 0;
1047 	uint64_t paddr;
1048 	int err;
1049 
1050 	ctxt_info = sc->ctxt_info_dma.vaddr;
1051 	memset(ctxt_info, 0, sizeof(*ctxt_info));
1052 
1053 	ctxt_info->version.version = 0;
1054 	ctxt_info->version.mac_id =
1055 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1056 	/* size is in DWs */
1057 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1058 
1059 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1060 	    ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1061 
1062 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1063 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1064 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1065 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1066 	ctxt_info->control.control_flags = htole32(control_flags);
1067 
1068 	/* initialize RX default queue */
1069 	rx_cfg = &ctxt_info->rbd_cfg;
1070 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1071 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1072 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1073 
1074 	/* initialize TX command queue */
1075 	ctxt_info->hcmd_cfg.cmd_queue_addr =
1076 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1077 	ctxt_info->hcmd_cfg.cmd_queue_size =
1078 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1079 
1080 	/* allocate ucode sections in dram and set addresses */
1081 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1082 	if (err) {
1083 		iwx_ctxt_info_free_fw_img(sc);
1084 		return err;
1085 	}
1086 
1087 	/* Configure debug, if exists */
1088 	if (sc->sc_fw.dbg_dest_tlv_v1) {
1089 #if 1
1090 		err = iwx_apply_debug_destination(sc);
1091 		if (err) {
1092 			iwx_ctxt_info_free_fw_img(sc);
1093 			return err;
1094 		}
1095 #endif
1096 	}
1097 
1098 	/*
1099 	 * Write the context info DMA base address. The device expects a
1100 	 * 64-bit address but a simple bus_space_write_8 to this register
1101 	 * won't work on some devices, such as the AX201.
1102 	 */
1103 	paddr = sc->ctxt_info_dma.paddr;
1104 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1105 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1106 
1107 	/* kick FW self load */
1108 	if (!iwx_nic_lock(sc)) {
1109 		iwx_ctxt_info_free_fw_img(sc);
1110 		return EBUSY;
1111 	}
1112 
1113 	iwx_set_ltr(sc);
1114 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1115 	iwx_nic_unlock(sc);
1116 
1117 	/* Context info will be released upon alive or failure to get one */
1118 
1119 	return 0;
1120 }
1121 
1122 static int
1123 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1124 {
1125 	struct iwx_context_info_gen3 *ctxt_info_gen3;
1126 	struct iwx_prph_scratch *prph_scratch;
1127 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1128 	uint16_t cb_size;
1129 	uint32_t control_flags, scratch_size;
1130 	uint64_t paddr;
1131 	int err;
1132 
1133 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1134 		printf("%s: no image loader found in firmware file\n",
1135 		    DEVNAME(sc));
1136 		iwx_ctxt_info_free_fw_img(sc);
1137 		return EINVAL;
1138 	}
1139 
1140 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1141 	    sc->sc_fw.iml_len, 1);
1142 	if (err) {
1143 		printf("%s: could not allocate DMA memory for "
1144 		    "firmware image loader\n", DEVNAME(sc));
1145 		iwx_ctxt_info_free_fw_img(sc);
1146 		return ENOMEM;
1147 	}
1148 
1149 	prph_scratch = sc->prph_scratch_dma.vaddr;
1150 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1151 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1152 	prph_sc_ctrl->version.version = 0;
1153 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1154 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1155 
1156 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1157 	    IWX_PRPH_SCRATCH_MTR_MODE |
1158 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1159 	if (sc->sc_imr_enabled)
1160 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1161 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1162 
1163 	/* initialize RX default queue */
1164 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1165 	    htole64(sc->rxq.free_desc_dma.paddr);
1166 
1167 	/* allocate ucode sections in dram and set addresses */
1168 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1169 	if (err) {
1170 		iwx_dma_contig_free(&sc->iml_dma);
1171 		iwx_ctxt_info_free_fw_img(sc);
1172 		return err;
1173 	}
1174 
1175 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1176 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1177 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1178 	ctxt_info_gen3->prph_scratch_base_addr =
1179 	    htole64(sc->prph_scratch_dma.paddr);
1180 	scratch_size = sizeof(*prph_scratch);
1181 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1182 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1183 	    htole64(sc->rxq.stat_dma.paddr);
1184 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1185 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1186 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1187 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1188 	ctxt_info_gen3->mtr_base_addr =
1189 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1190 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1191 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1192 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1193 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1194 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1195 
1196 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1197 
1198 	paddr = sc->ctxt_info_dma.paddr;
1199 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1200 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1201 
1202 	paddr = sc->iml_dma.paddr;
1203 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1204 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1205 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1206 
1207 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1208 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1209 
1210 	IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1211 	    "%s:%d kicking fw to get going\n", __func__, __LINE__);
1212 
1213 	/* kick FW self load */
1214 	if (!iwx_nic_lock(sc)) {
1215 		iwx_dma_contig_free(&sc->iml_dma);
1216 		iwx_ctxt_info_free_fw_img(sc);
1217 		return EBUSY;
1218 	}
1219 	iwx_set_ltr(sc);
1220 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1221 	iwx_nic_unlock(sc);
1222 
1223 	/* Context info will be released upon alive or failure to get one */
1224 	return 0;
1225 }
1226 
1227 static void
1228 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1229 {
1230 	struct iwx_self_init_dram *dram = &sc->init_dram;
1231 	int i;
1232 
1233 	if (!dram->fw)
1234 		return;
1235 
1236 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1237 		iwx_dma_contig_free(&dram->fw[i]);
1238 
1239 	free(dram->fw, M_DEVBUF);
1240 	dram->lmac_cnt = 0;
1241 	dram->umac_cnt = 0;
1242 	dram->fw = NULL;
1243 }
1244 
1245 static int
1246 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1247     const uint8_t *data, size_t dlen)
1248 {
1249 	struct iwx_fw_sects *fws;
1250 	struct iwx_fw_onesect *fwone;
1251 
1252 	if (type >= IWX_UCODE_TYPE_MAX)
1253 		return EINVAL;
1254 	if (dlen < sizeof(uint32_t))
1255 		return EINVAL;
1256 
1257 	fws = &sc->sc_fw.fw_sects[type];
1258 	IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1259 	    "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1260 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1261 		return EINVAL;
1262 
1263 	fwone = &fws->fw_sect[fws->fw_count];
1264 
1265 	/* first 32bit are device load offset */
1266 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1267 
1268 	/* rest is data */
1269 	fwone->fws_data = data + sizeof(uint32_t);
1270 	fwone->fws_len = dlen - sizeof(uint32_t);
1271 
1272 	fws->fw_count++;
1273 	fws->fw_totlen += fwone->fws_len;
1274 
1275 	return 0;
1276 }
1277 
1278 #define IWX_DEFAULT_SCAN_CHANNELS	40
1279 /* Newer firmware might support more channels. Raise this value if needed. */
1280 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1281 
1282 struct iwx_tlv_calib_data {
1283 	uint32_t ucode_type;
1284 	struct iwx_tlv_calib_ctrl calib;
1285 } __packed;
1286 
1287 static int
1288 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1289 {
1290 	const struct iwx_tlv_calib_data *def_calib = data;
1291 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1292 
1293 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1294 		return EINVAL;
1295 
1296 	sc->sc_default_calib[ucode_type].flow_trigger =
1297 	    def_calib->calib.flow_trigger;
1298 	sc->sc_default_calib[ucode_type].event_trigger =
1299 	    def_calib->calib.event_trigger;
1300 
1301 	return 0;
1302 }
1303 
1304 static void
1305 iwx_fw_info_free(struct iwx_fw_info *fw)
1306 {
1307 	free(fw->fw_rawdata, M_DEVBUF);
1308 	fw->fw_rawdata = NULL;
1309 	fw->fw_rawsize = 0;
1310 	/* don't touch fw->fw_status */
1311 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1312 	free(fw->iml, M_DEVBUF);
1313 	fw->iml = NULL;
1314 	fw->iml_len = 0;
1315 }
1316 
1317 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1318 
1319 static int
1320 iwx_read_firmware(struct iwx_softc *sc)
1321 {
1322 	struct iwx_fw_info *fw = &sc->sc_fw;
1323 	const struct iwx_tlv_ucode_header *uhdr;
1324 	struct iwx_ucode_tlv tlv;
1325 	uint32_t tlv_type;
1326 	const uint8_t *data;
1327 	int err = 0;
1328 	size_t len;
1329 	const struct firmware *fwp;
1330 
1331 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1332 		return 0;
1333 
1334 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1335 	fwp = firmware_get(sc->sc_fwname);
1336 	sc->sc_fwp = fwp;
1337 
1338 	if (fwp == NULL) {
1339 		printf("%s: could not read firmware %s\n",
1340 		    DEVNAME(sc), sc->sc_fwname);
1341 		err = ENOENT;
1342 		goto out;
1343 	}
1344 
1345 	IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1346 		__func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1347 
1348 
1349 	sc->sc_capaflags = 0;
1350 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1351 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1352 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1353 	sc->n_cmd_versions = 0;
1354 
1355 	uhdr = (const void *)(fwp->data);
1356 	if (*(const uint32_t *)fwp->data != 0
1357 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1358 		printf("%s: invalid firmware %s\n",
1359 		    DEVNAME(sc), sc->sc_fwname);
1360 		err = EINVAL;
1361 		goto out;
1362 	}
1363 
1364 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1365 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1366 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1367 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1368 
1369 	data = uhdr->data;
1370 	len = fwp->datasize - sizeof(*uhdr);
1371 
1372 	while (len >= sizeof(tlv)) {
1373 		size_t tlv_len;
1374 		const void *tlv_data;
1375 
1376 		memcpy(&tlv, data, sizeof(tlv));
1377 		tlv_len = le32toh(tlv.length);
1378 		tlv_type = le32toh(tlv.type);
1379 
1380 		len -= sizeof(tlv);
1381 		data += sizeof(tlv);
1382 		tlv_data = data;
1383 
1384 		if (len < tlv_len) {
1385 			printf("%s: firmware too short: %zu bytes\n",
1386 			    DEVNAME(sc), len);
1387 			err = EINVAL;
1388 			goto parse_out;
1389 		}
1390 
1391 		switch (tlv_type) {
1392 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1393 			if (tlv_len < sizeof(uint32_t)) {
1394 				err = EINVAL;
1395 				goto parse_out;
1396 			}
1397 			sc->sc_capa_max_probe_len
1398 			    = le32toh(*(const uint32_t *)tlv_data);
1399 			if (sc->sc_capa_max_probe_len >
1400 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1401 				err = EINVAL;
1402 				goto parse_out;
1403 			}
1404 			break;
1405 		case IWX_UCODE_TLV_PAN:
1406 			if (tlv_len) {
1407 				err = EINVAL;
1408 				goto parse_out;
1409 			}
1410 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1411 			break;
1412 		case IWX_UCODE_TLV_FLAGS:
1413 			if (tlv_len < sizeof(uint32_t)) {
1414 				err = EINVAL;
1415 				goto parse_out;
1416 			}
1417 			/*
1418 			 * Apparently there can be many flags, but Linux driver
1419 			 * parses only the first one, and so do we.
1420 			 *
1421 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1422 			 * Intentional or a bug?  Observations from
1423 			 * current firmware file:
1424 			 *  1) TLV_PAN is parsed first
1425 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1426 			 * ==> this resets TLV_PAN to itself... hnnnk
1427 			 */
1428 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1429 			break;
1430 		case IWX_UCODE_TLV_CSCHEME:
1431 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1432 			if (err)
1433 				goto parse_out;
1434 			break;
1435 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1436 			uint32_t num_cpu;
1437 			if (tlv_len != sizeof(uint32_t)) {
1438 				err = EINVAL;
1439 				goto parse_out;
1440 			}
1441 			num_cpu = le32toh(*(const uint32_t *)tlv_data);
1442 			if (num_cpu < 1 || num_cpu > 2) {
1443 				err = EINVAL;
1444 				goto parse_out;
1445 			}
1446 			break;
1447 		}
1448 		case IWX_UCODE_TLV_SEC_RT:
1449 			err = iwx_firmware_store_section(sc,
1450 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1451 			if (err)
1452 				goto parse_out;
1453 			break;
1454 		case IWX_UCODE_TLV_SEC_INIT:
1455 			err = iwx_firmware_store_section(sc,
1456 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1457 			if (err)
1458 				goto parse_out;
1459 			break;
1460 		case IWX_UCODE_TLV_SEC_WOWLAN:
1461 			err = iwx_firmware_store_section(sc,
1462 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1463 			if (err)
1464 				goto parse_out;
1465 			break;
1466 		case IWX_UCODE_TLV_DEF_CALIB:
1467 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1468 				err = EINVAL;
1469 				goto parse_out;
1470 			}
1471 			err = iwx_set_default_calib(sc, tlv_data);
1472 			if (err)
1473 				goto parse_out;
1474 			break;
1475 		case IWX_UCODE_TLV_PHY_SKU:
1476 			if (tlv_len != sizeof(uint32_t)) {
1477 				err = EINVAL;
1478 				goto parse_out;
1479 			}
1480 			sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1481 			break;
1482 
1483 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1484 			const struct iwx_ucode_api *api;
1485 			int idx, i;
1486 			if (tlv_len != sizeof(*api)) {
1487 				err = EINVAL;
1488 				goto parse_out;
1489 			}
1490 			api = (const struct iwx_ucode_api *)tlv_data;
1491 			idx = le32toh(api->api_index);
1492 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1493 				err = EINVAL;
1494 				goto parse_out;
1495 			}
1496 			for (i = 0; i < 32; i++) {
1497 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1498 					continue;
1499 				setbit(sc->sc_ucode_api, i + (32 * idx));
1500 			}
1501 			break;
1502 		}
1503 
1504 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1505 			const struct iwx_ucode_capa *capa;
1506 			int idx, i;
1507 			if (tlv_len != sizeof(*capa)) {
1508 				err = EINVAL;
1509 				goto parse_out;
1510 			}
1511 			capa = (const struct iwx_ucode_capa *)tlv_data;
1512 			idx = le32toh(capa->api_index);
1513 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1514 				goto parse_out;
1515 			}
1516 			for (i = 0; i < 32; i++) {
1517 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1518 					continue;
1519 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1520 			}
1521 			break;
1522 		}
1523 
1524 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1525 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1526 			/* ignore, not used by current driver */
1527 			break;
1528 
1529 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1530 			err = iwx_firmware_store_section(sc,
1531 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1532 			    tlv_len);
1533 			if (err)
1534 				goto parse_out;
1535 			break;
1536 
1537 		case IWX_UCODE_TLV_PAGING:
1538 			if (tlv_len != sizeof(uint32_t)) {
1539 				err = EINVAL;
1540 				goto parse_out;
1541 			}
1542 			break;
1543 
1544 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1545 			if (tlv_len != sizeof(uint32_t)) {
1546 				err = EINVAL;
1547 				goto parse_out;
1548 			}
1549 			sc->sc_capa_n_scan_channels =
1550 			  le32toh(*(const uint32_t *)tlv_data);
1551 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1552 				err = ERANGE;
1553 				goto parse_out;
1554 			}
1555 			break;
1556 
1557 		case IWX_UCODE_TLV_FW_VERSION:
1558 			if (tlv_len != sizeof(uint32_t) * 3) {
1559 				err = EINVAL;
1560 				goto parse_out;
1561 			}
1562 
1563 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1564 			    le32toh(((const uint32_t *)tlv_data)[0]),
1565 			    le32toh(((const uint32_t *)tlv_data)[1]),
1566 			    le32toh(((const uint32_t *)tlv_data)[2]));
1567 			break;
1568 
1569 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1570 			const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1571 
1572 			fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1573 			if (*fw->dbg_dest_ver != 0) {
1574 				err = EINVAL;
1575 				goto parse_out;
1576 			}
1577 
1578 			if (fw->dbg_dest_tlv_init)
1579 				break;
1580 			fw->dbg_dest_tlv_init = true;
1581 
1582 			dest_v1 = (const void *)tlv_data;
1583 			fw->dbg_dest_tlv_v1 = dest_v1;
1584 			fw->n_dest_reg = tlv_len -
1585 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1586 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1587 			IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1588 			    "%s: found debug dest; n_dest_reg=%d\n",
1589 			    __func__, fw->n_dest_reg);
1590 			break;
1591 		}
1592 
1593 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1594 			const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1595 
1596 			if (!fw->dbg_dest_tlv_init ||
1597 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1598 			    fw->dbg_conf_tlv[conf->id] != NULL)
1599 				break;
1600 
1601 			IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1602 			    "Found debug configuration: %d\n", conf->id);
1603 			fw->dbg_conf_tlv[conf->id] = conf;
1604 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1605 			break;
1606 		}
1607 
1608 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1609 			const struct iwx_umac_debug_addrs *dbg_ptrs =
1610 				(const void *)tlv_data;
1611 
1612 			if (tlv_len != sizeof(*dbg_ptrs)) {
1613 				err = EINVAL;
1614 				goto parse_out;
1615 			}
1616 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1617 				break;
1618 			sc->sc_uc.uc_umac_error_event_table =
1619 				le32toh(dbg_ptrs->error_info_addr) &
1620 				~IWX_FW_ADDR_CACHE_CONTROL;
1621 			sc->sc_uc.error_event_table_tlv_status |=
1622 				IWX_ERROR_EVENT_TABLE_UMAC;
1623 			break;
1624 		}
1625 
1626 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1627 			const struct iwx_lmac_debug_addrs *dbg_ptrs =
1628 				(const void *)tlv_data;
1629 
1630 			if (tlv_len != sizeof(*dbg_ptrs)) {
1631 				err = EINVAL;
1632 				goto parse_out;
1633 			}
1634 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1635 				break;
1636 			sc->sc_uc.uc_lmac_error_event_table[0] =
1637 				le32toh(dbg_ptrs->error_event_table_ptr) &
1638 				~IWX_FW_ADDR_CACHE_CONTROL;
1639 			sc->sc_uc.error_event_table_tlv_status |=
1640 				IWX_ERROR_EVENT_TABLE_LMAC1;
1641 			break;
1642 		}
1643 
1644 		case IWX_UCODE_TLV_FW_MEM_SEG:
1645 			break;
1646 
1647 		case IWX_UCODE_TLV_IML:
1648 			if (sc->sc_fw.iml != NULL) {
1649 				free(fw->iml, M_DEVBUF);
1650 				fw->iml_len = 0;
1651 			}
1652 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1653 			    M_WAITOK | M_ZERO);
1654 			if (sc->sc_fw.iml == NULL) {
1655 				err = ENOMEM;
1656 				goto parse_out;
1657 			}
1658 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1659 			sc->sc_fw.iml_len = tlv_len;
1660 			break;
1661 
1662 		case IWX_UCODE_TLV_CMD_VERSIONS:
1663 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1664 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1665 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1666 			}
1667 			if (sc->n_cmd_versions != 0) {
1668 				err = EINVAL;
1669 				goto parse_out;
1670 			}
1671 			if (tlv_len > sizeof(sc->cmd_versions)) {
1672 				err = EINVAL;
1673 				goto parse_out;
1674 			}
1675 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1676 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1677 			break;
1678 
1679 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1680 			break;
1681 
1682 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1683 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1684 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1685 		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1686 			break;
1687 
1688 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1689 		case 58:
1690 		case 0x1000003:
1691 		case 0x1000004:
1692 			break;
1693 
1694 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1695 		case 0x1000000:
1696 		case 0x1000002:
1697 			break;
1698 
1699 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1700 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1701 		case IWX_UCODE_TLV_TYPE_HCMD:
1702 		case IWX_UCODE_TLV_TYPE_REGIONS:
1703 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1704 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1705 		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1706 		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1707 		case IWX_UCODE_TLV_CURRENT_PC:
1708 			break;
1709 
1710 		/* undocumented TLV found in iwx-cc-a0-67 image */
1711 		case 0x100000b:
1712 			break;
1713 
1714 		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1715 		case 0x101:
1716 			break;
1717 
1718 		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1719 		case 0x100000c:
1720 			break;
1721 
1722 		/* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1723 		case 69:
1724 			break;
1725 
1726 		default:
1727 			err = EINVAL;
1728 			goto parse_out;
1729 		}
1730 
1731 		/*
1732 		 * Check for size_t overflow and ignore missing padding at
1733 		 * end of firmware file.
1734 		 */
1735 		if (roundup(tlv_len, 4) > len)
1736 			break;
1737 
1738 		len -= roundup(tlv_len, 4);
1739 		data += roundup(tlv_len, 4);
1740 	}
1741 
1742 	KASSERT(err == 0, ("unhandled fw parse error"));
1743 
1744 parse_out:
1745 	if (err) {
1746 		printf("%s: firmware parse error %d, "
1747 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1748 	}
1749 
1750 out:
1751 	if (err) {
1752 		fw->fw_status = IWX_FW_STATUS_NONE;
1753 		if (fw->fw_rawdata != NULL)
1754 			iwx_fw_info_free(fw);
1755 	} else
1756 		fw->fw_status = IWX_FW_STATUS_DONE;
1757 	return err;
1758 }
1759 
1760 static uint32_t
1761 iwx_prph_addr_mask(struct iwx_softc *sc)
1762 {
1763 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1764 		return 0x00ffffff;
1765 	else
1766 		return 0x000fffff;
1767 }
1768 
1769 static uint32_t
1770 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1771 {
1772 	uint32_t mask = iwx_prph_addr_mask(sc);
1773 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1774 	IWX_BARRIER_READ_WRITE(sc);
1775 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1776 }
1777 
1778 uint32_t
1779 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1780 {
1781 	iwx_nic_assert_locked(sc);
1782 	return iwx_read_prph_unlocked(sc, addr);
1783 }
1784 
1785 static void
1786 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1787 {
1788 	uint32_t mask = iwx_prph_addr_mask(sc);
1789 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1790 	IWX_BARRIER_WRITE(sc);
1791 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1792 }
1793 
1794 static void
1795 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1796 {
1797 	iwx_nic_assert_locked(sc);
1798 	iwx_write_prph_unlocked(sc, addr, val);
1799 }
1800 
1801 static uint32_t
1802 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1803 {
1804 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1805 }
1806 
1807 static void
1808 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1809 {
1810 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1811 }
1812 
1813 static int
1814 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1815 {
1816 	int offs, err = 0;
1817 	uint32_t *vals = buf;
1818 
1819 	if (iwx_nic_lock(sc)) {
1820 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1821 		for (offs = 0; offs < dwords; offs++)
1822 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1823 		iwx_nic_unlock(sc);
1824 	} else {
1825 		err = EBUSY;
1826 	}
1827 	return err;
1828 }
1829 
1830 static int
1831 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1832     int timo)
1833 {
1834 	for (;;) {
1835 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1836 			return 1;
1837 		}
1838 		if (timo < 10) {
1839 			return 0;
1840 		}
1841 		timo -= 10;
1842 		DELAY(10);
1843 	}
1844 }
1845 
1846 static int
1847 iwx_nic_lock(struct iwx_softc *sc)
1848 {
1849 	if (sc->sc_nic_locks > 0) {
1850 		iwx_nic_assert_locked(sc);
1851 		sc->sc_nic_locks++;
1852 		return 1; /* already locked */
1853 	}
1854 
1855 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1856 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1857 
1858 	DELAY(2);
1859 
1860 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1861 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1862 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1863 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1864 		sc->sc_nic_locks++;
1865 		return 1;
1866 	}
1867 
1868 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1869 	return 0;
1870 }
1871 
1872 static void
1873 iwx_nic_assert_locked(struct iwx_softc *sc)
1874 {
1875 	if (sc->sc_nic_locks <= 0)
1876 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1877 }
1878 
1879 static void
1880 iwx_nic_unlock(struct iwx_softc *sc)
1881 {
1882 	if (sc->sc_nic_locks > 0) {
1883 		if (--sc->sc_nic_locks == 0)
1884 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1885 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1886 	} else
1887 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1888 }
1889 
1890 static int
1891 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1892     uint32_t mask)
1893 {
1894 	uint32_t val;
1895 
1896 	if (iwx_nic_lock(sc)) {
1897 		val = iwx_read_prph(sc, reg) & mask;
1898 		val |= bits;
1899 		iwx_write_prph(sc, reg, val);
1900 		iwx_nic_unlock(sc);
1901 		return 0;
1902 	}
1903 	return EBUSY;
1904 }
1905 
1906 static int
1907 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1908 {
1909 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1910 }
1911 
1912 static int
1913 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1914 {
1915 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1916 }
1917 
1918 static void
1919 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1920 {
1921         if (error != 0)
1922                 return;
1923 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1924 	*(bus_addr_t *)arg = segs[0].ds_addr;
1925 }
1926 
1927 static int
1928 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1929     bus_size_t size, bus_size_t alignment)
1930 {
1931 	int error;
1932 
1933 	dma->tag = NULL;
1934 	dma->map = NULL;
1935 	dma->size = size;
1936 	dma->vaddr = NULL;
1937 
1938 	error = bus_dma_tag_create(tag, alignment,
1939             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1940             1, size, 0, NULL, NULL, &dma->tag);
1941         if (error != 0)
1942                 goto fail;
1943 
1944         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1945             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1946         if (error != 0)
1947                 goto fail;
1948 
1949         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1950             iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1951         if (error != 0) {
1952 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1953 		dma->vaddr = NULL;
1954 		goto fail;
1955 	}
1956 
1957 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1958 
1959 	return 0;
1960 
1961 fail:
1962 	iwx_dma_contig_free(dma);
1963 	return error;
1964 }
1965 
1966 static void
1967 iwx_dma_contig_free(struct iwx_dma_info *dma)
1968 {
1969 	if (dma->vaddr != NULL) {
1970 		bus_dmamap_sync(dma->tag, dma->map,
1971 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1972 		bus_dmamap_unload(dma->tag, dma->map);
1973 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1974 		dma->vaddr = NULL;
1975 	}
1976 	if (dma->tag != NULL) {
1977 		bus_dma_tag_destroy(dma->tag);
1978 		dma->tag = NULL;
1979 	}
1980 }
1981 
1982 static int
1983 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1984 {
1985 	bus_size_t size;
1986 	int i, err;
1987 
1988 	ring->cur = 0;
1989 
1990 	/* Allocate RX descriptors (256-byte aligned). */
1991 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1992 		size = sizeof(struct iwx_rx_transfer_desc);
1993 	else
1994 		size = sizeof(uint64_t);
1995 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1996 	    size * IWX_RX_MQ_RING_COUNT, 256);
1997 	if (err) {
1998 		device_printf(sc->sc_dev,
1999 		    "could not allocate RX ring DMA memory\n");
2000 		goto fail;
2001 	}
2002 	ring->desc = ring->free_desc_dma.vaddr;
2003 
2004 	/* Allocate RX status area (16-byte aligned). */
2005 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2006 		size = sizeof(uint16_t);
2007 	else
2008 		size = sizeof(*ring->stat);
2009 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2010 	if (err) {
2011 		device_printf(sc->sc_dev,
2012 		    "could not allocate RX status DMA memory\n");
2013 		goto fail;
2014 	}
2015 	ring->stat = ring->stat_dma.vaddr;
2016 
2017 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2018 		size = sizeof(struct iwx_rx_completion_desc);
2019 	else
2020 		size = sizeof(uint32_t);
2021 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2022 	    size * IWX_RX_MQ_RING_COUNT, 256);
2023 	if (err) {
2024 		device_printf(sc->sc_dev,
2025 		    "could not allocate RX ring DMA memory\n");
2026 		goto fail;
2027 	}
2028 
2029 	err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2030 	    BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2031 	    0, NULL, NULL, &ring->data_dmat);
2032 
2033 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2034 		struct iwx_rx_data *data = &ring->data[i];
2035 
2036 		memset(data, 0, sizeof(*data));
2037 		err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2038 		if (err) {
2039 			device_printf(sc->sc_dev,
2040 			    "could not create RX buf DMA map\n");
2041 			goto fail;
2042 		}
2043 
2044 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2045 		if (err)
2046 			goto fail;
2047 	}
2048 	return 0;
2049 
2050 fail:	iwx_free_rx_ring(sc, ring);
2051 	return err;
2052 }
2053 
2054 static void
2055 iwx_disable_rx_dma(struct iwx_softc *sc)
2056 {
2057 	int ntries;
2058 
2059 	if (iwx_nic_lock(sc)) {
2060 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2061 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2062 		else
2063 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2064 		for (ntries = 0; ntries < 1000; ntries++) {
2065 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2066 				if (iwx_read_umac_prph(sc,
2067 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2068 					break;
2069 			} else {
2070 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2071 				    IWX_RXF_DMA_IDLE)
2072 					break;
2073 			}
2074 			DELAY(10);
2075 		}
2076 		iwx_nic_unlock(sc);
2077 	}
2078 }
2079 
2080 static void
2081 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2082 {
2083 	ring->cur = 0;
2084 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2085 	    BUS_DMASYNC_PREWRITE);
2086 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2087 		uint16_t *status = sc->rxq.stat_dma.vaddr;
2088 		*status = 0;
2089 	} else
2090 		memset(ring->stat, 0, sizeof(*ring->stat));
2091 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2092 	    BUS_DMASYNC_POSTWRITE);
2093 
2094 }
2095 
2096 static void
2097 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2098 {
2099 	int i;
2100 
2101 	iwx_dma_contig_free(&ring->free_desc_dma);
2102 	iwx_dma_contig_free(&ring->stat_dma);
2103 	iwx_dma_contig_free(&ring->used_desc_dma);
2104 
2105 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2106 		struct iwx_rx_data *data = &ring->data[i];
2107 		if (data->m != NULL) {
2108 			bus_dmamap_sync(ring->data_dmat, data->map,
2109 			    BUS_DMASYNC_POSTREAD);
2110 			bus_dmamap_unload(ring->data_dmat, data->map);
2111 			m_freem(data->m);
2112 			data->m = NULL;
2113 		}
2114 		if (data->map != NULL) {
2115 			bus_dmamap_destroy(ring->data_dmat, data->map);
2116 			data->map = NULL;
2117 		}
2118 	}
2119 	if (ring->data_dmat != NULL) {
2120 		bus_dma_tag_destroy(ring->data_dmat);
2121 		ring->data_dmat = NULL;
2122 	}
2123 }
2124 
2125 static int
2126 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2127 {
2128 	bus_addr_t paddr;
2129 	bus_size_t size;
2130 	int i, err;
2131 	size_t bc_tbl_size;
2132 	bus_size_t bc_align;
2133 	size_t mapsize;
2134 
2135 	ring->qid = qid;
2136 	ring->queued = 0;
2137 	ring->cur = 0;
2138 	ring->cur_hw = 0;
2139 	ring->tail = 0;
2140 	ring->tail_hw = 0;
2141 
2142 	/* Allocate TX descriptors (256-byte aligned). */
2143 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2144 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2145 	if (err) {
2146 		device_printf(sc->sc_dev,
2147 		    "could not allocate TX ring DMA memory\n");
2148 		goto fail;
2149 	}
2150 	ring->desc = ring->desc_dma.vaddr;
2151 
2152 	/*
2153 	 * The hardware supports up to 512 Tx rings which is more
2154 	 * than we currently need.
2155 	 *
2156 	 * In DQA mode we use 1 command queue + 1 default queue for
2157 	 * management, control, and non-QoS data frames.
2158 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2159 	 *
2160 	 * Tx aggregation requires additional queues, one queue per TID for
2161 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2162 	 * Firmware may assign its own internal IDs for these queues
2163 	 * depending on which TID gets aggregation enabled first.
2164 	 * The driver maintains a table mapping driver-side queue IDs
2165 	 * to firmware-side queue IDs.
2166 	 */
2167 
2168 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2169 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2170 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2171 		bc_align = 128;
2172 	} else {
2173 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2174 		bc_align = 64;
2175 	}
2176 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2177 	    bc_align);
2178 	if (err) {
2179 		device_printf(sc->sc_dev,
2180 		    "could not allocate byte count table DMA memory\n");
2181 		goto fail;
2182 	}
2183 
2184 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2185 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2186 	    IWX_FIRST_TB_SIZE_ALIGN);
2187 	if (err) {
2188 		device_printf(sc->sc_dev,
2189 		    "could not allocate cmd DMA memory\n");
2190 		goto fail;
2191 	}
2192 	ring->cmd = ring->cmd_dma.vaddr;
2193 
2194 	/* FW commands may require more mapped space than packets. */
2195 	if (qid == IWX_DQA_CMD_QUEUE)
2196 		mapsize = (sizeof(struct iwx_cmd_header) +
2197 		    IWX_MAX_CMD_PAYLOAD_SIZE);
2198 	else
2199 		mapsize = MCLBYTES;
2200 	err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2201 	    BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2202 	    mapsize, 0, NULL, NULL, &ring->data_dmat);
2203 
2204 	paddr = ring->cmd_dma.paddr;
2205 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2206 		struct iwx_tx_data *data = &ring->data[i];
2207 
2208 		data->cmd_paddr = paddr;
2209 		paddr += sizeof(struct iwx_device_cmd);
2210 
2211 		err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2212 		if (err) {
2213 			device_printf(sc->sc_dev,
2214 			    "could not create TX buf DMA map\n");
2215 			goto fail;
2216 		}
2217 	}
2218 	KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2219 	return 0;
2220 
2221 fail:
2222 	return err;
2223 }
2224 
2225 static void
2226 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2227 {
2228 	int i;
2229 
2230 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2231 		struct iwx_tx_data *data = &ring->data[i];
2232 
2233 		if (data->m != NULL) {
2234 			bus_dmamap_sync(ring->data_dmat, data->map,
2235 			    BUS_DMASYNC_POSTWRITE);
2236 			bus_dmamap_unload(ring->data_dmat, data->map);
2237 			m_freem(data->m);
2238 			data->m = NULL;
2239 		}
2240 	}
2241 
2242 	/* Clear byte count table. */
2243 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2244 
2245 	/* Clear TX descriptors. */
2246 	memset(ring->desc, 0, ring->desc_dma.size);
2247 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2248 	    BUS_DMASYNC_PREWRITE);
2249 	sc->qfullmsk &= ~(1 << ring->qid);
2250 	sc->qenablemsk &= ~(1 << ring->qid);
2251 	for (i = 0; i < nitems(sc->aggqid); i++) {
2252 		if (sc->aggqid[i] == ring->qid) {
2253 			sc->aggqid[i] = 0;
2254 			break;
2255 		}
2256 	}
2257 	ring->queued = 0;
2258 	ring->cur = 0;
2259 	ring->cur_hw = 0;
2260 	ring->tail = 0;
2261 	ring->tail_hw = 0;
2262 	ring->tid = 0;
2263 }
2264 
2265 static void
2266 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2267 {
2268 	int i;
2269 
2270 	iwx_dma_contig_free(&ring->desc_dma);
2271 	iwx_dma_contig_free(&ring->cmd_dma);
2272 	iwx_dma_contig_free(&ring->bc_tbl);
2273 
2274 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2275 		struct iwx_tx_data *data = &ring->data[i];
2276 
2277 		if (data->m != NULL) {
2278 			bus_dmamap_sync(ring->data_dmat, data->map,
2279 			    BUS_DMASYNC_POSTWRITE);
2280 			bus_dmamap_unload(ring->data_dmat, data->map);
2281 			m_freem(data->m);
2282 			data->m = NULL;
2283 		}
2284 		if (data->map != NULL) {
2285 			bus_dmamap_destroy(ring->data_dmat, data->map);
2286 			data->map = NULL;
2287 		}
2288 	}
2289 	if (ring->data_dmat != NULL) {
2290 		bus_dma_tag_destroy(ring->data_dmat);
2291 		ring->data_dmat = NULL;
2292 	}
2293 }
2294 
2295 static void
2296 iwx_enable_rfkill_int(struct iwx_softc *sc)
2297 {
2298 	if (!sc->sc_msix) {
2299 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2300 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2301 	} else {
2302 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2303 		    sc->sc_fh_init_mask);
2304 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2305 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2306 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2307 	}
2308 
2309 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2310 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2311 }
2312 
2313 static int
2314 iwx_check_rfkill(struct iwx_softc *sc)
2315 {
2316 	uint32_t v;
2317 	int rv;
2318 
2319 	/*
2320 	 * "documentation" is not really helpful here:
2321 	 *  27:	HW_RF_KILL_SW
2322 	 *	Indicates state of (platform's) hardware RF-Kill switch
2323 	 *
2324 	 * But apparently when it's off, it's on ...
2325 	 */
2326 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2327 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2328 	if (rv) {
2329 		sc->sc_flags |= IWX_FLAG_RFKILL;
2330 	} else {
2331 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2332 	}
2333 
2334 	return rv;
2335 }
2336 
2337 static void
2338 iwx_enable_interrupts(struct iwx_softc *sc)
2339 {
2340 	if (!sc->sc_msix) {
2341 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2342 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2343 	} else {
2344 		/*
2345 		 * fh/hw_mask keeps all the unmasked causes.
2346 		 * Unlike msi, in msix cause is enabled when it is unset.
2347 		 */
2348 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2349 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2350 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2351 		    ~sc->sc_fh_mask);
2352 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2353 		    ~sc->sc_hw_mask);
2354 	}
2355 }
2356 
2357 static void
2358 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2359 {
2360 	if (!sc->sc_msix) {
2361 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2362 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2363 	} else {
2364 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2365 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2366 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2367 		/*
2368 		 * Leave all the FH causes enabled to get the ALIVE
2369 		 * notification.
2370 		 */
2371 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2372 		    ~sc->sc_fh_init_mask);
2373 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2374 	}
2375 }
2376 
2377 #if 0
2378 static void
2379 iwx_restore_interrupts(struct iwx_softc *sc)
2380 {
2381 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2382 }
2383 #endif
2384 
2385 static void
2386 iwx_disable_interrupts(struct iwx_softc *sc)
2387 {
2388 	if (!sc->sc_msix) {
2389 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2390 
2391 		/* acknowledge all interrupts */
2392 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2393 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2394 	} else {
2395 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2396 		    sc->sc_fh_init_mask);
2397 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2398 		    sc->sc_hw_init_mask);
2399 	}
2400 }
2401 
2402 static void
2403 iwx_ict_reset(struct iwx_softc *sc)
2404 {
2405 	iwx_disable_interrupts(sc);
2406 
2407 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2408 	sc->ict_cur = 0;
2409 
2410 	/* Set physical address of ICT (4KB aligned). */
2411 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2412 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2413 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2414 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2415 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2416 
2417 	/* Switch to ICT interrupt mode in driver. */
2418 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2419 
2420 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2421 	iwx_enable_interrupts(sc);
2422 }
2423 
2424 #define IWX_HW_READY_TIMEOUT 50
2425 static int
2426 iwx_set_hw_ready(struct iwx_softc *sc)
2427 {
2428 	int ready;
2429 
2430 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2431 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2432 
2433 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2434 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2435 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2436 	    IWX_HW_READY_TIMEOUT);
2437 	if (ready)
2438 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2439 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2440 
2441 	DPRINTF(("%s: ready=%d\n", __func__, ready));
2442 	return ready;
2443 }
2444 #undef IWX_HW_READY_TIMEOUT
2445 
2446 static int
2447 iwx_prepare_card_hw(struct iwx_softc *sc)
2448 {
2449 	int t = 0;
2450 	int ntries;
2451 
2452 	if (iwx_set_hw_ready(sc))
2453 		return 0;
2454 
2455 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2456 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2457 	DELAY(1000);
2458 
2459 	for (ntries = 0; ntries < 10; ntries++) {
2460 		/* If HW is not ready, prepare the conditions to check again */
2461 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2462 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2463 
2464 		do {
2465 			if (iwx_set_hw_ready(sc))
2466 				return 0;
2467 			DELAY(200);
2468 			t += 200;
2469 		} while (t < 150000);
2470 		DELAY(25000);
2471 	}
2472 
2473 	return ETIMEDOUT;
2474 }
2475 
2476 static int
2477 iwx_force_power_gating(struct iwx_softc *sc)
2478 {
2479 	int err;
2480 
2481 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2482 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2483 	if (err)
2484 		return err;
2485 	DELAY(20);
2486 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2487 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2488 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2489 	if (err)
2490 		return err;
2491 	DELAY(20);
2492 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2493 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2494 	return err;
2495 }
2496 
2497 static void
2498 iwx_apm_config(struct iwx_softc *sc)
2499 {
2500 	uint16_t lctl, cap;
2501 	int pcie_ptr;
2502 	int error;
2503 
2504 	/*
2505 	 * L0S states have been found to be unstable with our devices
2506 	 * and in newer hardware they are not officially supported at
2507 	 * all, so we must always set the L0S_DISABLED bit.
2508 	 */
2509 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2510 
2511 	error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2512 	if (error != 0) {
2513 		printf("can't fill pcie_ptr\n");
2514 		return;
2515 	}
2516 
2517 	lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2518 	    sizeof(lctl));
2519 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2520 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2521 #define PCI_PCIE_DCSR2 0x28
2522 	cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2523 	    sizeof(lctl));
2524 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2525 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2526 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2527 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2528 	    DEVNAME(sc),
2529 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2530 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2531 #undef PCI_PCIE_LCSR_ASPM_L0S
2532 #undef PCI_PCIE_DCSR2
2533 #undef PCI_PCIE_DCSR2_LTREN
2534 #undef PCI_PCIE_LCSR_ASPM_L1
2535 }
2536 
2537 /*
2538  * Start up NIC's basic functionality after it has been reset
2539  * e.g. after platform boot or shutdown.
2540  * NOTE:  This does not load uCode nor start the embedded processor
2541  */
2542 static int
2543 iwx_apm_init(struct iwx_softc *sc)
2544 {
2545 	int err = 0;
2546 
2547 	/*
2548 	 * Disable L0s without affecting L1;
2549 	 *  don't wait for ICH L0s (ICH bug W/A)
2550 	 */
2551 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2552 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2553 
2554 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2555 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2556 
2557 	/*
2558 	 * Enable HAP INTA (interrupt from management bus) to
2559 	 * wake device's PCI Express link L1a -> L0s
2560 	 */
2561 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2562 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2563 
2564 	iwx_apm_config(sc);
2565 
2566 	/*
2567 	 * Set "initialization complete" bit to move adapter from
2568 	 * D0U* --> D0A* (powered-up active) state.
2569 	 */
2570 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2571 
2572 	/*
2573 	 * Wait for clock stabilization; once stabilized, access to
2574 	 * device-internal resources is supported, e.g. iwx_write_prph()
2575 	 * and accesses to uCode SRAM.
2576 	 */
2577 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2578 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2579 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2580 		printf("%s: timeout waiting for clock stabilization\n",
2581 		    DEVNAME(sc));
2582 		err = ETIMEDOUT;
2583 		goto out;
2584 	}
2585  out:
2586 	if (err)
2587 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2588 	return err;
2589 }
2590 
2591 static void
2592 iwx_apm_stop(struct iwx_softc *sc)
2593 {
2594 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2595 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2596 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2597 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2598 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2599 	DELAY(1000);
2600 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2601 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2602 	DELAY(5000);
2603 
2604 	/* stop device's busmaster DMA activity */
2605 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2606 
2607 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2608 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2609 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2610 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2611 
2612 	/*
2613 	 * Clear "initialization complete" bit to move adapter from
2614 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2615 	 */
2616 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2617 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2618 }
2619 
2620 static void
2621 iwx_init_msix_hw(struct iwx_softc *sc)
2622 {
2623 	iwx_conf_msix_hw(sc, 0);
2624 
2625 	if (!sc->sc_msix)
2626 		return;
2627 
2628 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2629 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2630 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2631 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2632 }
2633 
2634 static void
2635 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2636 {
2637 	int vector = 0;
2638 
2639 	if (!sc->sc_msix) {
2640 		/* Newer chips default to MSIX. */
2641 		if (!stopped && iwx_nic_lock(sc)) {
2642 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2643 			    IWX_UREG_CHICK_MSI_ENABLE);
2644 			iwx_nic_unlock(sc);
2645 		}
2646 		return;
2647 	}
2648 
2649 	if (!stopped && iwx_nic_lock(sc)) {
2650 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2651 		    IWX_UREG_CHICK_MSIX_ENABLE);
2652 		iwx_nic_unlock(sc);
2653 	}
2654 
2655 	/* Disable all interrupts */
2656 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2657 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2658 
2659 	/* Map fallback-queue (command/mgmt) to a single vector */
2660 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2661 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2662 	/* Map RSS queue (data) to the same vector */
2663 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2664 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2665 
2666 	/* Enable the RX queues cause interrupts */
2667 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2668 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2669 
2670 	/* Map non-RX causes to the same vector */
2671 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2672 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2673 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2674 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2676 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2678 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2680 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2682 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2684 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2686 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2688 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2690 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2692 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2694 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2696 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2698 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2700 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 
2702 	/* Enable non-RX causes interrupts */
2703 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2704 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2705 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2706 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2707 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2708 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2709 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2710 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2711 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2712 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2713 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2714 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2715 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2716 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2717 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2718 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2719 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2720 }
2721 
2722 static int
2723 iwx_clear_persistence_bit(struct iwx_softc *sc)
2724 {
2725 	uint32_t hpm, wprot;
2726 
2727 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2728 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2729 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2730 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2731 			printf("%s: cannot clear persistence bit\n",
2732 			    DEVNAME(sc));
2733 			return EPERM;
2734 		}
2735 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2736 		    hpm & ~IWX_PERSISTENCE_BIT);
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 static int
2743 iwx_start_hw(struct iwx_softc *sc)
2744 {
2745 	int err;
2746 
2747 	err = iwx_prepare_card_hw(sc);
2748 	if (err)
2749 		return err;
2750 
2751 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2752 		err = iwx_clear_persistence_bit(sc);
2753 		if (err)
2754 			return err;
2755 	}
2756 
2757 	/* Reset the entire device */
2758 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2759 	DELAY(5000);
2760 
2761 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2762 	    sc->sc_integrated) {
2763 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2764 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2765 		DELAY(20);
2766 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2767 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2768 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2769 			printf("%s: timeout waiting for clock stabilization\n",
2770 			    DEVNAME(sc));
2771 			return ETIMEDOUT;
2772 		}
2773 
2774 		err = iwx_force_power_gating(sc);
2775 		if (err)
2776 			return err;
2777 
2778 		/* Reset the entire device */
2779 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2780 		DELAY(5000);
2781 	}
2782 
2783 	err = iwx_apm_init(sc);
2784 	if (err)
2785 		return err;
2786 
2787 	iwx_init_msix_hw(sc);
2788 
2789 	iwx_enable_rfkill_int(sc);
2790 	iwx_check_rfkill(sc);
2791 
2792 	return 0;
2793 }
2794 
2795 static void
2796 iwx_stop_device(struct iwx_softc *sc)
2797 {
2798 	int i;
2799 
2800 	iwx_disable_interrupts(sc);
2801 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2802 
2803 	iwx_disable_rx_dma(sc);
2804 	iwx_reset_rx_ring(sc, &sc->rxq);
2805 	for (i = 0; i < nitems(sc->txq); i++)
2806 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2807 #if 0
2808 	/* XXX-THJ: Tidy up BA state on stop */
2809 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2810 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2811 		if (ba->ba_state != IEEE80211_BA_AGREED)
2812 			continue;
2813 		ieee80211_delba_request(ic, ni, 0, 1, i);
2814 	}
2815 #endif
2816 	/* Make sure (redundant) we've released our request to stay awake */
2817 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2818 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2819 	if (sc->sc_nic_locks > 0)
2820 		printf("%s: %d active NIC locks forcefully cleared\n",
2821 		    DEVNAME(sc), sc->sc_nic_locks);
2822 	sc->sc_nic_locks = 0;
2823 
2824 	/* Stop the device, and put it in low power state */
2825 	iwx_apm_stop(sc);
2826 
2827 	/* Reset the on-board processor. */
2828 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2829 	DELAY(5000);
2830 
2831 	/*
2832 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2833 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2834 	 * that enables radio won't fire on the correct irq, and the
2835 	 * driver won't be able to handle the interrupt.
2836 	 * Configure the IVAR table again after reset.
2837 	 */
2838 	iwx_conf_msix_hw(sc, 1);
2839 
2840 	/*
2841 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2842 	 * Clear the interrupt again.
2843 	 */
2844 	iwx_disable_interrupts(sc);
2845 
2846 	/* Even though we stop the HW we still want the RF kill interrupt. */
2847 	iwx_enable_rfkill_int(sc);
2848 	iwx_check_rfkill(sc);
2849 
2850 	iwx_prepare_card_hw(sc);
2851 
2852 	iwx_ctxt_info_free_paging(sc);
2853 	iwx_dma_contig_free(&sc->pnvm_dma);
2854 }
2855 
2856 static void
2857 iwx_nic_config(struct iwx_softc *sc)
2858 {
2859 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2860 	uint32_t mask, val, reg_val = 0;
2861 
2862 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2863 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2864 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2865 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2866 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2867 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2868 
2869 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2870 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2871 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2872 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2873 
2874 	/* radio configuration */
2875 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2876 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2877 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2878 
2879 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2880 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2881 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2882 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2883 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2884 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2885 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2886 
2887 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2888 	val &= ~mask;
2889 	val |= reg_val;
2890 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2891 }
2892 
2893 static int
2894 iwx_nic_rx_init(struct iwx_softc *sc)
2895 {
2896 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2897 
2898 	/*
2899 	 * We don't configure the RFH; the firmware will do that.
2900 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2901 	 */
2902 	return 0;
2903 }
2904 
2905 static int
2906 iwx_nic_init(struct iwx_softc *sc)
2907 {
2908 	int err;
2909 
2910 	iwx_apm_init(sc);
2911 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2912 		iwx_nic_config(sc);
2913 
2914 	err = iwx_nic_rx_init(sc);
2915 	if (err)
2916 		return err;
2917 
2918 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2919 
2920 	return 0;
2921 }
2922 
2923 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2924 const uint8_t iwx_ac_to_tx_fifo[] = {
2925 	IWX_GEN2_EDCA_TX_FIFO_BE,
2926 	IWX_GEN2_EDCA_TX_FIFO_BK,
2927 	IWX_GEN2_EDCA_TX_FIFO_VI,
2928 	IWX_GEN2_EDCA_TX_FIFO_VO,
2929 };
2930 
2931 static int
2932 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2933     int num_slots)
2934 {
2935 	struct iwx_rx_packet *pkt;
2936 	struct iwx_tx_queue_cfg_rsp *resp;
2937 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2938 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2939 	struct iwx_host_cmd hcmd = {
2940 		.flags = IWX_CMD_WANT_RESP,
2941 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2942 	};
2943 	struct iwx_tx_ring *ring = &sc->txq[qid];
2944 	int err, fwqid, cmd_ver;
2945 	uint32_t wr_idx;
2946 	size_t resp_len;
2947 
2948 	DPRINTF(("%s: tid=%i\n", __func__, tid));
2949 	DPRINTF(("%s: qid=%i\n", __func__, qid));
2950 	iwx_reset_tx_ring(sc, ring);
2951 
2952 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2953 	    IWX_SCD_QUEUE_CONFIG_CMD);
2954 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2955 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2956 		cmd_v0.sta_id = sta_id;
2957 		cmd_v0.tid = tid;
2958 		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2959 		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2960 		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2961 		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2962 		hcmd.id = IWX_SCD_QUEUE_CFG;
2963 		hcmd.data[0] = &cmd_v0;
2964 		hcmd.len[0] = sizeof(cmd_v0);
2965 	} else if (cmd_ver == 3) {
2966 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2967 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2968 		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2969 		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2970 		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2971 		cmd_v3.u.add.flags = htole32(0);
2972 		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2973 		cmd_v3.u.add.tid = tid;
2974 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2975 		    IWX_SCD_QUEUE_CONFIG_CMD);
2976 		hcmd.data[0] = &cmd_v3;
2977 		hcmd.len[0] = sizeof(cmd_v3);
2978 	} else {
2979 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2980 		    DEVNAME(sc), cmd_ver);
2981 		return ENOTSUP;
2982 	}
2983 
2984 	err = iwx_send_cmd(sc, &hcmd);
2985 	if (err)
2986 		return err;
2987 
2988 	pkt = hcmd.resp_pkt;
2989 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2990 		err = EIO;
2991 		goto out;
2992 	}
2993 
2994 	resp_len = iwx_rx_packet_payload_len(pkt);
2995 	if (resp_len != sizeof(*resp)) {
2996 		err = EIO;
2997 		goto out;
2998 	}
2999 
3000 	resp = (void *)pkt->data;
3001 	fwqid = le16toh(resp->queue_number);
3002 	wr_idx = le16toh(resp->write_pointer);
3003 
3004 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3005 	if (fwqid != qid) {
3006 		DPRINTF(("%s: === fwqid != qid\n", __func__));
3007 		err = EIO;
3008 		goto out;
3009 	}
3010 
3011 	if (wr_idx != ring->cur_hw) {
3012 		DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3013 		err = EIO;
3014 		goto out;
3015 	}
3016 
3017 	sc->qenablemsk |= (1 << qid);
3018 	ring->tid = tid;
3019 out:
3020 	iwx_free_resp(sc, &hcmd);
3021 	return err;
3022 }
3023 
3024 static int
3025 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3026 {
3027 	struct iwx_rx_packet *pkt;
3028 	struct iwx_tx_queue_cfg_rsp *resp;
3029 	struct iwx_tx_queue_cfg_cmd cmd_v0;
3030 	struct iwx_scd_queue_cfg_cmd cmd_v3;
3031 	struct iwx_host_cmd hcmd = {
3032 		.flags = IWX_CMD_WANT_RESP,
3033 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3034 	};
3035 	struct iwx_tx_ring *ring = &sc->txq[qid];
3036 	int err, cmd_ver;
3037 
3038 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3039 	    IWX_SCD_QUEUE_CONFIG_CMD);
3040 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3041 		memset(&cmd_v0, 0, sizeof(cmd_v0));
3042 		cmd_v0.sta_id = sta_id;
3043 		cmd_v0.tid = tid;
3044 		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3045 		cmd_v0.cb_size = htole32(0);
3046 		cmd_v0.byte_cnt_addr = htole64(0);
3047 		cmd_v0.tfdq_addr = htole64(0);
3048 		hcmd.id = IWX_SCD_QUEUE_CFG;
3049 		hcmd.data[0] = &cmd_v0;
3050 		hcmd.len[0] = sizeof(cmd_v0);
3051 	} else if (cmd_ver == 3) {
3052 		memset(&cmd_v3, 0, sizeof(cmd_v3));
3053 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3054 		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3055 		cmd_v3.u.remove.tid = tid;
3056 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3057 		    IWX_SCD_QUEUE_CONFIG_CMD);
3058 		hcmd.data[0] = &cmd_v3;
3059 		hcmd.len[0] = sizeof(cmd_v3);
3060 	} else {
3061 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3062 		    DEVNAME(sc), cmd_ver);
3063 		return ENOTSUP;
3064 	}
3065 
3066 	err = iwx_send_cmd(sc, &hcmd);
3067 	if (err)
3068 		return err;
3069 
3070 	pkt = hcmd.resp_pkt;
3071 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3072 		err = EIO;
3073 		goto out;
3074 	}
3075 
3076 	sc->qenablemsk &= ~(1 << qid);
3077 	iwx_reset_tx_ring(sc, ring);
3078 out:
3079 	iwx_free_resp(sc, &hcmd);
3080 	return err;
3081 }
3082 
3083 static void
3084 iwx_post_alive(struct iwx_softc *sc)
3085 {
3086 	int txcmd_ver;
3087 
3088 	iwx_ict_reset(sc);
3089 
3090 	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3091 	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3092 		sc->sc_rate_n_flags_version = 2;
3093 	else
3094 		sc->sc_rate_n_flags_version = 1;
3095 
3096 	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3097 }
3098 
3099 static int
3100 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3101     uint32_t duration_tu)
3102 {
3103 
3104 	struct iwx_session_prot_cmd cmd = {
3105 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3106 		    in->in_color)),
3107 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
3108 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3109 		.duration_tu = htole32(duration_tu),
3110 	};
3111 	uint32_t cmd_id;
3112 	int err;
3113 
3114 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3115 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3116 	if (!err)
3117 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3118 	return err;
3119 }
3120 
3121 static void
3122 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3123 {
3124 	struct iwx_session_prot_cmd cmd = {
3125 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3126 		    in->in_color)),
3127 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3128 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3129 		.duration_tu = 0,
3130 	};
3131 	uint32_t cmd_id;
3132 
3133 	/* Do nothing if the time event has already ended. */
3134 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3135 		return;
3136 
3137 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3138 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3139 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3140 }
3141 
3142 /*
3143  * NVM read access and content parsing.  We do not support
3144  * external NVM or writing NVM.
3145  */
3146 
3147 static uint8_t
3148 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3149 {
3150 	uint8_t tx_ant;
3151 
3152 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3153 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3154 
3155 	if (sc->sc_nvm.valid_tx_ant)
3156 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3157 
3158 	return tx_ant;
3159 }
3160 
3161 static uint8_t
3162 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3163 {
3164 	uint8_t rx_ant;
3165 
3166 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3167 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3168 
3169 	if (sc->sc_nvm.valid_rx_ant)
3170 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3171 
3172 	return rx_ant;
3173 }
3174 
3175 static void
3176 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3177     struct ieee80211_channel chans[])
3178 {
3179 	struct iwx_softc *sc = ic->ic_softc;
3180 	struct iwx_nvm_data *data = &sc->sc_nvm;
3181 	uint8_t bands[IEEE80211_MODE_BYTES];
3182 	const uint8_t *nvm_channels;
3183 	uint32_t ch_flags;
3184 	int ch_idx, nchan;
3185 
3186 	if (sc->sc_uhb_supported) {
3187 		nchan = nitems(iwx_nvm_channels_uhb);
3188 		nvm_channels = iwx_nvm_channels_uhb;
3189 	} else {
3190 		nchan = nitems(iwx_nvm_channels_8000);
3191 		nvm_channels = iwx_nvm_channels_8000;
3192 	}
3193 
3194 	/* 2.4Ghz; 1-13: 11b/g channels. */
3195 	if (!data->sku_cap_band_24GHz_enable)
3196 		goto band_5;
3197 
3198 	memset(bands, 0, sizeof(bands));
3199 	setbit(bands, IEEE80211_MODE_11B);
3200 	setbit(bands, IEEE80211_MODE_11G);
3201 	setbit(bands, IEEE80211_MODE_11NG);
3202 	for (ch_idx = 0;
3203 	    ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3204 	    ch_idx++) {
3205 
3206 		uint32_t nflags = 0;
3207 		int cflags = 0;
3208 
3209 		if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3210 			ch_flags = le32_to_cpup(
3211 			    sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3212 		} else {
3213 			ch_flags = le16_to_cpup(
3214 			    sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3215 		}
3216 		if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3217 			continue;
3218 
3219 	          if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3220                   cflags |= NET80211_CBW_FLAG_HT40;
3221 
3222 		/* XXX-BZ nflags RADAR/DFS/INDOOR */
3223 
3224 		/* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3225 		nvm_channels[ch_idx],
3226 		ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3227 		/* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3228 		nflags, bands, cflags);
3229 	}
3230 
3231 band_5:
3232 	/* 5Ghz */
3233 	if (!data->sku_cap_band_52GHz_enable)
3234 		goto band_6;
3235 
3236 
3237 	memset(bands, 0, sizeof(bands));
3238 	setbit(bands, IEEE80211_MODE_11A);
3239 	setbit(bands, IEEE80211_MODE_11NA);
3240 	setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3241 
3242 	for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3243 	    ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3244 	    ch_idx++) {
3245 		uint32_t nflags = 0;
3246 		int cflags = 0;
3247 
3248 		if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3249 			ch_flags = le32_to_cpup(
3250 			    sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3251 		else
3252 			ch_flags = le16_to_cpup(
3253 			    sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3254 
3255 		if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3256 		continue;
3257 
3258 		if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3259 			cflags |= NET80211_CBW_FLAG_HT40;
3260 		if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3261 			cflags |= NET80211_CBW_FLAG_VHT80;
3262 		if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3263 			cflags |= NET80211_CBW_FLAG_VHT160;
3264 
3265 		/* XXX-BZ nflags RADAR/DFS/INDOOR */
3266 
3267 		/* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3268 		nvm_channels[ch_idx],
3269 		ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3270 		/* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3271 		nflags, bands, cflags);
3272 	}
3273 band_6:
3274 	/* 6GHz one day ... */
3275 	return;
3276 }
3277 
3278 static int
3279 iwx_mimo_enabled(struct iwx_softc *sc)
3280 {
3281 
3282 	return !sc->sc_nvm.sku_cap_mimo_disable;
3283 }
3284 
3285 static void
3286 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3287     uint16_t ssn, uint16_t buf_size)
3288 {
3289 	reorder_buf->head_sn = ssn;
3290 	reorder_buf->num_stored = 0;
3291 	reorder_buf->buf_size = buf_size;
3292 	reorder_buf->last_amsdu = 0;
3293 	reorder_buf->last_sub_index = 0;
3294 	reorder_buf->removed = 0;
3295 	reorder_buf->valid = 0;
3296 	reorder_buf->consec_oldsn_drops = 0;
3297 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3298 	reorder_buf->consec_oldsn_prev_drop = 0;
3299 }
3300 
3301 static void
3302 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3303 {
3304 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3305 
3306 	reorder_buf->removed = 1;
3307 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3308 }
3309 
3310 #define IWX_MAX_RX_BA_SESSIONS 16
3311 
3312 static struct iwx_rxba_data *
3313 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3314 {
3315 	int i;
3316 
3317 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3318 		if (sc->sc_rxba_data[i].baid ==
3319 		    IWX_RX_REORDER_DATA_INVALID_BAID)
3320 			continue;
3321 		if (sc->sc_rxba_data[i].tid == tid)
3322 			return &sc->sc_rxba_data[i];
3323 	}
3324 
3325 	return NULL;
3326 }
3327 
3328 static int
3329 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3330     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3331     uint8_t *baid)
3332 {
3333 	struct iwx_rx_baid_cfg_cmd cmd;
3334 	uint32_t new_baid = 0;
3335 	int err;
3336 
3337 	IWX_ASSERT_LOCKED(sc);
3338 
3339 	memset(&cmd, 0, sizeof(cmd));
3340 
3341 	if (start) {
3342 		cmd.action = IWX_RX_BAID_ACTION_ADD;
3343 		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3344 		cmd.alloc.tid = tid;
3345 		cmd.alloc.ssn = htole16(ssn);
3346 		cmd.alloc.win_size = htole16(winsize);
3347 	} else {
3348 		struct iwx_rxba_data *rxba;
3349 
3350 		rxba = iwx_find_rxba_data(sc, tid);
3351 		if (rxba == NULL)
3352 			return ENOENT;
3353 		*baid = rxba->baid;
3354 
3355 		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3356 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3357 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3358 			cmd.remove_v1.baid = rxba->baid;
3359 		} else {
3360 			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3361 			cmd.remove.tid = tid;
3362 		}
3363 	}
3364 
3365 	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3366 	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3367 	if (err)
3368 		return err;
3369 
3370 	if (start) {
3371 		if (new_baid >= nitems(sc->sc_rxba_data))
3372 			return ERANGE;
3373 		*baid = new_baid;
3374 	}
3375 
3376 	return 0;
3377 }
3378 
3379 static void
3380 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3381     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3382 {
3383 	int err;
3384 	struct iwx_rxba_data *rxba = NULL;
3385 	uint8_t baid = 0;
3386 
3387 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3388 		return;
3389 	}
3390 
3391 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3392 		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3393 		    timeout_val, start, &baid);
3394 	} else {
3395 		panic("sta_rx_agg unsupported hw");
3396 	}
3397 	if (err) {
3398 		DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3399 		return;
3400 	} else
3401 		DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3402 
3403 	rxba = &sc->sc_rxba_data[baid];
3404 
3405 	/* Deaggregation is done in hardware. */
3406 	if (start) {
3407 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3408 			return;
3409 		}
3410 		rxba->sta_id = IWX_STATION_ID;
3411 		rxba->tid = tid;
3412 		rxba->baid = baid;
3413 		rxba->timeout = timeout_val;
3414 		getmicrouptime(&rxba->last_rx);
3415 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3416 		    winsize);
3417 		if (timeout_val != 0) {
3418 			DPRINTF(("%s: timeout_val != 0\n", __func__));
3419 			return;
3420 		}
3421 	} else
3422 		iwx_clear_reorder_buffer(sc, rxba);
3423 
3424 	if (start) {
3425 		sc->sc_rx_ba_sessions++;
3426 	} else if (sc->sc_rx_ba_sessions > 0)
3427 		sc->sc_rx_ba_sessions--;
3428 }
3429 
3430 static void
3431 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3432     uint8_t tid)
3433 {
3434 	int err, qid;
3435 
3436 	qid = sc->aggqid[tid];
3437 	if (qid == 0) {
3438 		/* Firmware should pick the next unused Tx queue. */
3439 		qid = fls(sc->qenablemsk);
3440 	}
3441 
3442 	DPRINTF(("%s: qid=%i\n", __func__, qid));
3443 
3444 	/*
3445 	 * Simply enable the queue.
3446 	 * Firmware handles Tx Ba session setup and teardown.
3447 	 */
3448 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3449 		if (!iwx_nic_lock(sc)) {
3450 			return;
3451 		}
3452 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3453 		    IWX_TX_RING_COUNT);
3454 		iwx_nic_unlock(sc);
3455 		if (err) {
3456 			printf("%s: could not enable Tx queue %d "
3457 			    "(error %d)\n", DEVNAME(sc), qid, err);
3458 			return;
3459 		}
3460 	}
3461 	ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3462 	DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3463 	sc->aggqid[tid] = qid;
3464 }
3465 
3466 static void
3467 iwx_ba_rx_task(void *arg, int npending __unused)
3468 {
3469 	struct iwx_softc *sc = arg;
3470 	struct ieee80211com *ic = &sc->sc_ic;
3471 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3472 	struct ieee80211_node *ni = vap->iv_bss;
3473 	int tid;
3474 
3475 	IWX_LOCK(sc);
3476 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3477 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3478 			break;
3479 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3480 			struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3481 			DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3482 			    ba->ba_flags));
3483 			if (ba->ba_flags == IWX_BA_DONE) {
3484 				DPRINTF(("%s: ampdu for tid %i already added\n",
3485 				    __func__, tid));
3486 				break;
3487 			}
3488 
3489 			DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3490 			    tid));
3491 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3492 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3493 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3494 			ba->ba_flags = IWX_BA_DONE;
3495 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3496 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3497 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3498 		}
3499 	}
3500 	IWX_UNLOCK(sc);
3501 }
3502 
3503 static void
3504 iwx_ba_tx_task(void *arg, int npending __unused)
3505 {
3506 	struct iwx_softc *sc = arg;
3507 	struct ieee80211com *ic = &sc->sc_ic;
3508 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3509 	struct ieee80211_node *ni = vap->iv_bss;
3510 	int tid;
3511 
3512 	IWX_LOCK(sc);
3513 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3514 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3515 			break;
3516 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3517 			DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
3518 			    tid));
3519 			iwx_sta_tx_agg_start(sc, ni, tid);
3520 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3521 			sc->sc_flags |= IWX_FLAG_AMPDUTX;
3522 		}
3523 	}
3524 
3525 	IWX_UNLOCK(sc);
3526 }
3527 
3528 static void
3529 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3530 {
3531 	uint32_t mac_addr0, mac_addr1;
3532 
3533 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3534 
3535 	if (!iwx_nic_lock(sc))
3536 		return;
3537 
3538 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3539 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3540 
3541 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3542 
3543 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3544 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3545 		iwx_nic_unlock(sc);
3546 		return;
3547 	}
3548 
3549 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3550 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3551 
3552 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3553 
3554 	iwx_nic_unlock(sc);
3555 }
3556 
3557 static int
3558 iwx_is_valid_mac_addr(const uint8_t *addr)
3559 {
3560 	static const uint8_t reserved_mac[] = {
3561 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3562 	};
3563 
3564 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3565 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3566 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3567 	    !ETHER_IS_MULTICAST(addr));
3568 }
3569 
3570 static void
3571 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3572 {
3573 	const uint8_t *hw_addr;
3574 
3575 	hw_addr = (const uint8_t *)&mac_addr0;
3576 	dest[0] = hw_addr[3];
3577 	dest[1] = hw_addr[2];
3578 	dest[2] = hw_addr[1];
3579 	dest[3] = hw_addr[0];
3580 
3581 	hw_addr = (const uint8_t *)&mac_addr1;
3582 	dest[4] = hw_addr[1];
3583 	dest[5] = hw_addr[0];
3584 }
3585 
3586 static int
3587 iwx_nvm_get(struct iwx_softc *sc)
3588 {
3589 	struct iwx_nvm_get_info cmd = {};
3590 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3591 	struct iwx_host_cmd hcmd = {
3592 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3593 		.data = { &cmd, },
3594 		.len = { sizeof(cmd) },
3595 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3596 		    IWX_NVM_GET_INFO)
3597 	};
3598 	int err = 0;
3599 	uint32_t mac_flags;
3600 	/*
3601 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3602 	 * in v3, except for the channel profile part of the
3603 	 * regulatory.  So we can just access the new struct, with the
3604 	 * exception of the latter.
3605 	 */
3606 	struct iwx_nvm_get_info_rsp *rsp;
3607 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3608 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3609 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3610 
3611 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3612 	err = iwx_send_cmd(sc, &hcmd);
3613 	if (err) {
3614 		printf("%s: failed to send cmd (error %d)", __func__, err);
3615 		return err;
3616 	}
3617 
3618 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3619 		printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3620 		    iwx_rx_packet_payload_len(hcmd.resp_pkt));
3621 		printf("%s: resp_len=%zu\n", __func__, resp_len);
3622 		err = EIO;
3623 		goto out;
3624 	}
3625 
3626 	memset(nvm, 0, sizeof(*nvm));
3627 
3628 	iwx_set_mac_addr_from_csr(sc, nvm);
3629 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3630 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3631 		err = EINVAL;
3632 		goto out;
3633 	}
3634 
3635 	rsp = (void *)hcmd.resp_pkt->data;
3636 
3637 	/* Initialize general data */
3638 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3639 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3640 
3641 	/* Initialize MAC sku data */
3642 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3643 	nvm->sku_cap_11ac_enable =
3644 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3645 	nvm->sku_cap_11n_enable =
3646 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3647 	nvm->sku_cap_11ax_enable =
3648 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3649 	nvm->sku_cap_band_24GHz_enable =
3650 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3651 	nvm->sku_cap_band_52GHz_enable =
3652 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3653 	nvm->sku_cap_mimo_disable =
3654 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3655 
3656 	/* Initialize PHY sku data */
3657 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3658 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3659 
3660 	if (le32toh(rsp->regulatory.lar_enabled) &&
3661 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3662 		nvm->lar_enabled = 1;
3663 	}
3664 
3665 	memcpy(&sc->sc_rsp_info, rsp, resp_len);
3666 	if (v4) {
3667 		sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3668 	} else {
3669 		sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3670 	}
3671 out:
3672 	iwx_free_resp(sc, &hcmd);
3673 	return err;
3674 }
3675 
3676 static int
3677 iwx_load_firmware(struct iwx_softc *sc)
3678 {
3679 	struct iwx_fw_sects *fws;
3680 	int err;
3681 
3682 	IWX_ASSERT_LOCKED(sc)
3683 
3684 	sc->sc_uc.uc_intr = 0;
3685 	sc->sc_uc.uc_ok = 0;
3686 
3687 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3688 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3689 		err = iwx_ctxt_info_gen3_init(sc, fws);
3690 	else
3691 		err = iwx_ctxt_info_init(sc, fws);
3692 	if (err) {
3693 		printf("%s: could not init context info\n", DEVNAME(sc));
3694 		return err;
3695 	}
3696 
3697 	/* wait for the firmware to load */
3698 	err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3699 	if (err || !sc->sc_uc.uc_ok) {
3700 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3701 		iwx_ctxt_info_free_paging(sc);
3702 	}
3703 
3704 	iwx_dma_contig_free(&sc->iml_dma);
3705 	iwx_ctxt_info_free_fw_img(sc);
3706 
3707 	if (!sc->sc_uc.uc_ok)
3708 		return EINVAL;
3709 
3710 	return err;
3711 }
3712 
3713 static int
3714 iwx_start_fw(struct iwx_softc *sc)
3715 {
3716 	int err;
3717 
3718 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3719 
3720 	iwx_disable_interrupts(sc);
3721 
3722 	/* make sure rfkill handshake bits are cleared */
3723 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3724 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3725 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3726 
3727 	/* clear (again), then enable firmware load interrupt */
3728 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3729 
3730 	err = iwx_nic_init(sc);
3731 	if (err) {
3732 		printf("%s: unable to init nic\n", DEVNAME(sc));
3733 		return err;
3734 	}
3735 
3736 	iwx_enable_fwload_interrupt(sc);
3737 
3738 	return iwx_load_firmware(sc);
3739 }
3740 
3741 static int
3742 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3743     size_t len)
3744 {
3745 	const struct iwx_ucode_tlv *tlv;
3746 	uint32_t sha1 = 0;
3747 	uint16_t mac_type = 0, rf_id = 0;
3748 	uint8_t *pnvm_data = NULL, *tmp;
3749 	int hw_match = 0;
3750 	uint32_t size = 0;
3751 	int err;
3752 
3753 	while (len >= sizeof(*tlv)) {
3754 		uint32_t tlv_len, tlv_type;
3755 
3756 		len -= sizeof(*tlv);
3757 		tlv = (const void *)data;
3758 
3759 		tlv_len = le32toh(tlv->length);
3760 		tlv_type = le32toh(tlv->type);
3761 
3762 		if (len < tlv_len) {
3763 			printf("%s: invalid TLV len: %zd/%u\n",
3764 			    DEVNAME(sc), len, tlv_len);
3765 			err = EINVAL;
3766 			goto out;
3767 		}
3768 
3769 		data += sizeof(*tlv);
3770 
3771 		switch (tlv_type) {
3772 		case IWX_UCODE_TLV_PNVM_VERSION:
3773 			if (tlv_len < sizeof(uint32_t))
3774 				break;
3775 
3776 			sha1 = le32_to_cpup((const uint32_t *)data);
3777 			break;
3778 		case IWX_UCODE_TLV_HW_TYPE:
3779 			if (tlv_len < 2 * sizeof(uint16_t))
3780 				break;
3781 
3782 			if (hw_match)
3783 				break;
3784 
3785 			mac_type = le16_to_cpup((const uint16_t *)data);
3786 			rf_id = le16_to_cpup((const uint16_t *)(data +
3787 			    sizeof(uint16_t)));
3788 
3789 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3790 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3791 				hw_match = 1;
3792 			break;
3793 		case IWX_UCODE_TLV_SEC_RT: {
3794 			const struct iwx_pnvm_section *section;
3795 			uint32_t data_len;
3796 
3797 			section = (const void *)data;
3798 			data_len = tlv_len - sizeof(*section);
3799 
3800 			/* TODO: remove, this is a deprecated separator */
3801 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3802 				break;
3803 
3804 			tmp = malloc(size + data_len, M_DEVBUF,
3805 			    M_WAITOK | M_ZERO);
3806 			if (tmp == NULL) {
3807 				err = ENOMEM;
3808 				goto out;
3809 			}
3810 			// XXX:misha pnvm_data is NULL and size is 0 at first pass
3811 			memcpy(tmp, pnvm_data, size);
3812 			memcpy(tmp + size, section->data, data_len);
3813 			free(pnvm_data, M_DEVBUF);
3814 			pnvm_data = tmp;
3815 			size += data_len;
3816 			break;
3817 		}
3818 		case IWX_UCODE_TLV_PNVM_SKU:
3819 			/* New PNVM section started, stop parsing. */
3820 			goto done;
3821 		default:
3822 			break;
3823 		}
3824 
3825 		if (roundup(tlv_len, 4) > len)
3826 			break;
3827 		len -= roundup(tlv_len, 4);
3828 		data += roundup(tlv_len, 4);
3829 	}
3830 done:
3831 	if (!hw_match || size == 0) {
3832 		err = ENOENT;
3833 		goto out;
3834 	}
3835 
3836 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3837 	if (err) {
3838 		printf("%s: could not allocate DMA memory for PNVM\n",
3839 		    DEVNAME(sc));
3840 		err = ENOMEM;
3841 		goto out;
3842 	}
3843 	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3844 	iwx_ctxt_info_gen3_set_pnvm(sc);
3845 	sc->sc_pnvm_ver = sha1;
3846 out:
3847 	free(pnvm_data, M_DEVBUF);
3848 	return err;
3849 }
3850 
3851 static int
3852 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3853 {
3854 	const struct iwx_ucode_tlv *tlv;
3855 
3856 	while (len >= sizeof(*tlv)) {
3857 		uint32_t tlv_len, tlv_type;
3858 
3859 		len -= sizeof(*tlv);
3860 		tlv = (const void *)data;
3861 
3862 		tlv_len = le32toh(tlv->length);
3863 		tlv_type = le32toh(tlv->type);
3864 
3865 		if (len < tlv_len || roundup(tlv_len, 4) > len)
3866 			return EINVAL;
3867 
3868 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3869 			const struct iwx_sku_id *sku_id =
3870 				(const void *)(data + sizeof(*tlv));
3871 
3872 			data += sizeof(*tlv) + roundup(tlv_len, 4);
3873 			len -= roundup(tlv_len, 4);
3874 
3875 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3876 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3877 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3878 			    iwx_pnvm_handle_section(sc, data, len) == 0)
3879 				return 0;
3880 		} else {
3881 			data += sizeof(*tlv) + roundup(tlv_len, 4);
3882 			len -= roundup(tlv_len, 4);
3883 		}
3884 	}
3885 
3886 	return ENOENT;
3887 }
3888 
3889 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3890 static void
3891 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3892 {
3893 	struct iwx_prph_scratch *prph_scratch;
3894 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3895 
3896 	prph_scratch = sc->prph_scratch_dma.vaddr;
3897 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3898 
3899 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3900 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3901 
3902 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3903 }
3904 
3905 /*
3906  * Load platform-NVM (non-volatile-memory) data from the filesystem.
3907  * This data apparently contains regulatory information and affects device
3908  * channel configuration.
3909  * The SKU of AX210 devices tells us which PNVM file section is needed.
3910  * Pre-AX210 devices store NVM data onboard.
3911  */
3912 static int
3913 iwx_load_pnvm(struct iwx_softc *sc)
3914 {
3915 	const int wait_flags = IWX_PNVM_COMPLETE;
3916 	int err = 0;
3917 	const struct firmware *pnvm;
3918 
3919 	if (sc->sc_sku_id[0] == 0 &&
3920 	    sc->sc_sku_id[1] == 0 &&
3921 	    sc->sc_sku_id[2] == 0)
3922 		return 0;
3923 
3924 	if (sc->sc_pnvm_name) {
3925 		if (sc->pnvm_dma.vaddr == NULL) {
3926 			IWX_UNLOCK(sc);
3927 			pnvm = firmware_get(sc->sc_pnvm_name);
3928 			if (pnvm == NULL) {
3929 				printf("%s: could not read %s (error %d)\n",
3930 				    DEVNAME(sc), sc->sc_pnvm_name, err);
3931 				IWX_LOCK(sc);
3932 				return EINVAL;
3933 			}
3934 			sc->sc_pnvm = pnvm;
3935 
3936 			err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3937 			IWX_LOCK(sc);
3938 			if (err && err != ENOENT) {
3939 				return EINVAL;
3940 			}
3941 		} else
3942 			iwx_ctxt_info_gen3_set_pnvm(sc);
3943 	}
3944 
3945 	if (!iwx_nic_lock(sc)) {
3946 		return EBUSY;
3947 	}
3948 
3949 	/*
3950 	 * If we don't have a platform NVM file simply ask firmware
3951 	 * to proceed without it.
3952 	 */
3953 
3954 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3955 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3956 
3957 	/* Wait for the pnvm complete notification from firmware. */
3958 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3959 		err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3960 		if (err)
3961 			break;
3962 	}
3963 
3964 	iwx_nic_unlock(sc);
3965 
3966 	return err;
3967 }
3968 
3969 static int
3970 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3971 {
3972 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3973 		.valid = htole32(valid_tx_ant),
3974 	};
3975 
3976 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3977 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3978 }
3979 
3980 static int
3981 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3982 {
3983 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3984 
3985 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3986 	phy_cfg_cmd.calib_control.event_trigger =
3987 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3988 	phy_cfg_cmd.calib_control.flow_trigger =
3989 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3990 
3991 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3992 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3993 }
3994 
3995 static int
3996 iwx_send_dqa_cmd(struct iwx_softc *sc)
3997 {
3998 	struct iwx_dqa_enable_cmd dqa_cmd = {
3999 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4000 	};
4001 	uint32_t cmd_id;
4002 
4003 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4004 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4005 }
4006 
4007 static int
4008 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4009 {
4010 	int err;
4011 
4012 	IWX_UNLOCK(sc);
4013 	err = iwx_read_firmware(sc);
4014 	IWX_LOCK(sc);
4015 	if (err)
4016 		return err;
4017 
4018 	err = iwx_start_fw(sc);
4019 	if (err)
4020 		return err;
4021 
4022 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4023 		err = iwx_load_pnvm(sc);
4024 		if (err)
4025 			return err;
4026 	}
4027 
4028 	iwx_post_alive(sc);
4029 
4030 	return 0;
4031 }
4032 
4033 static int
4034 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4035 {
4036 	const int wait_flags = IWX_INIT_COMPLETE;
4037 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4038 	struct iwx_init_extended_cfg_cmd init_cfg = {
4039 		.init_flags = htole32(IWX_INIT_NVM),
4040 	};
4041 
4042 	int err;
4043 
4044 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4045 		printf("%s: radio is disabled by hardware switch\n",
4046 		    DEVNAME(sc));
4047 		return EPERM;
4048 	}
4049 
4050 	sc->sc_init_complete = 0;
4051 	err = iwx_load_ucode_wait_alive(sc);
4052 	if (err) {
4053 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4054 		return err;
4055 	} else {
4056 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4057 		    "%s: successfully loaded init firmware\n", __func__);
4058 	}
4059 
4060 	/*
4061 	 * Send init config command to mark that we are sending NVM
4062 	 * access commands
4063 	 */
4064 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4065 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4066 	if (err) {
4067 		printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4068 		    err);
4069 		return err;
4070 	}
4071 
4072 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4073 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4074 	if (err) {
4075 		return err;
4076 	}
4077 
4078 	/* Wait for the init complete notification from the firmware. */
4079 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4080 		err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4081 		if (err) {
4082 			DPRINTF(("%s: will return err=%d\n", __func__, err));
4083 			return err;
4084 		} else {
4085 			DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4086 			    __func__));
4087 		}
4088 	}
4089 
4090 	if (readnvm) {
4091 		err = iwx_nvm_get(sc);
4092 		DPRINTF(("%s: err=%d\n", __func__, err));
4093 		if (err) {
4094 			printf("%s: failed to read nvm (error %d)\n",
4095 			    DEVNAME(sc), err);
4096 			return err;
4097 		} else {
4098 			DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4099 		}
4100 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4101 	}
4102 	return 0;
4103 }
4104 
4105 static int
4106 iwx_config_ltr(struct iwx_softc *sc)
4107 {
4108 	struct iwx_ltr_config_cmd cmd = {
4109 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4110 	};
4111 
4112 	if (!sc->sc_ltr_enabled)
4113 		return 0;
4114 
4115 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4116 }
4117 
4118 static void
4119 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4120     bus_dma_segment_t *seg)
4121 {
4122 	struct iwx_rx_data *data = &ring->data[idx];
4123 
4124 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4125 		struct iwx_rx_transfer_desc *desc = ring->desc;
4126 		desc[idx].rbid = htole16(idx & 0xffff);
4127 		desc[idx].addr = htole64((*seg).ds_addr);
4128 		bus_dmamap_sync(ring->data_dmat, data->map,
4129 		    BUS_DMASYNC_PREWRITE);
4130 	} else {
4131 		((uint64_t *)ring->desc)[idx] =
4132 		    htole64((*seg).ds_addr);
4133 		bus_dmamap_sync(ring->data_dmat, data->map,
4134 		    BUS_DMASYNC_PREWRITE);
4135 	}
4136 }
4137 
4138 static int
4139 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4140 {
4141 	struct iwx_rx_ring *ring = &sc->rxq;
4142 	struct iwx_rx_data *data = &ring->data[idx];
4143 	struct mbuf *m;
4144 	int err;
4145 	int fatal = 0;
4146 	bus_dma_segment_t seg;
4147 	int nsegs;
4148 
4149 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4150 	if (m == NULL)
4151 		return ENOBUFS;
4152 
4153 	if (data->m != NULL) {
4154 		bus_dmamap_unload(ring->data_dmat, data->map);
4155 		fatal = 1;
4156 	}
4157 
4158 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4159 	err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4160 	    &nsegs, BUS_DMA_NOWAIT);
4161 	if (err) {
4162 		/* XXX */
4163 		if (fatal)
4164 			panic("could not load RX mbuf");
4165 		m_freem(m);
4166 		return err;
4167 	}
4168 	data->m = m;
4169 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4170 
4171 	/* Update RX descriptor. */
4172 	iwx_update_rx_desc(sc, ring, idx, &seg);
4173 	return 0;
4174 }
4175 
4176 static int
4177 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4178     struct iwx_rx_mpdu_desc *desc)
4179 {
4180 	int energy_a, energy_b;
4181 
4182 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4183 		energy_a = desc->v3.energy_a;
4184 		energy_b = desc->v3.energy_b;
4185 	} else {
4186 		energy_a = desc->v1.energy_a;
4187 		energy_b = desc->v1.energy_b;
4188 	}
4189 	energy_a = energy_a ? -energy_a : -256;
4190 	energy_b = energy_b ? -energy_b : -256;
4191 	return MAX(energy_a, energy_b);
4192 }
4193 
4194 static void
4195 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4196     struct iwx_rx_data *data)
4197 {
4198 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4199 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4200 	int qid = cmd_hdr->qid;
4201 	struct iwx_tx_ring *ring = &sc->txq[qid];
4202 
4203 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4204 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4205 }
4206 
4207 /*
4208  * Retrieve the average noise (in dBm) among receivers.
4209  */
4210 static int
4211 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4212 {
4213 	int i, total, nbant, noise;
4214 
4215 	total = nbant = noise = 0;
4216 	for (i = 0; i < 3; i++) {
4217 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4218 		if (noise) {
4219 			total += noise;
4220 			nbant++;
4221 		}
4222 	}
4223 
4224 	/* There should be at least one antenna but check anyway. */
4225 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4226 }
4227 
4228 #if 0
4229 int
4230 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4231     struct ieee80211_rxinfo *rxi)
4232 {
4233 	struct ieee80211com *ic = &sc->sc_ic;
4234 	struct ieee80211_key *k;
4235 	struct ieee80211_frame *wh;
4236 	uint64_t pn, *prsc;
4237 	uint8_t *ivp;
4238 	uint8_t tid;
4239 	int hdrlen, hasqos;
4240 
4241 	wh = mtod(m, struct ieee80211_frame *);
4242 	hdrlen = ieee80211_get_hdrlen(wh);
4243 	ivp = (uint8_t *)wh + hdrlen;
4244 
4245 	/* find key for decryption */
4246 	k = ieee80211_get_rxkey(ic, m, ni);
4247 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4248 		return 1;
4249 
4250 	/* Check that ExtIV bit is be set. */
4251 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4252 		return 1;
4253 
4254 	hasqos = ieee80211_has_qos(wh);
4255 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4256 	prsc = &k->k_rsc[tid];
4257 
4258 	/* Extract the 48-bit PN from the CCMP header. */
4259 	pn = (uint64_t)ivp[0]       |
4260 	     (uint64_t)ivp[1] <<  8 |
4261 	     (uint64_t)ivp[4] << 16 |
4262 	     (uint64_t)ivp[5] << 24 |
4263 	     (uint64_t)ivp[6] << 32 |
4264 	     (uint64_t)ivp[7] << 40;
4265 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4266 		if (pn < *prsc) {
4267 			ic->ic_stats.is_ccmp_replays++;
4268 			return 1;
4269 		}
4270 	} else if (pn <= *prsc) {
4271 		ic->ic_stats.is_ccmp_replays++;
4272 		return 1;
4273 	}
4274 	/* Last seen packet number is updated in ieee80211_inputm(). */
4275 
4276 	/*
4277 	 * Some firmware versions strip the MIC, and some don't. It is not
4278 	 * clear which of the capability flags could tell us what to expect.
4279 	 * For now, keep things simple and just leave the MIC in place if
4280 	 * it is present.
4281 	 *
4282 	 * The IV will be stripped by ieee80211_inputm().
4283 	 */
4284 	return 0;
4285 }
4286 #endif
4287 
4288 static int
4289 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4290 {
4291 	struct ieee80211_frame *wh;
4292 	int ret = 0;
4293 	uint8_t type, subtype;
4294 
4295 	wh = mtod(m, struct ieee80211_frame *);
4296 
4297 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4298 	if (type == IEEE80211_FC0_TYPE_CTL) {
4299 		return 0;
4300 	}
4301 
4302 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4303 	if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4304 		return 0;
4305 	}
4306 
4307 
4308 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4309 	    IEEE80211_FC0_TYPE_CTL)
4310 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4311 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4312 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4313 			DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4314 			ret = 1;
4315 			goto out;
4316 		}
4317 		/* Check whether decryption was successful or not. */
4318 		if ((rx_pkt_status &
4319 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4320 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4321 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4322 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4323 			DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4324 			ret = 1;
4325 			goto out;
4326 		}
4327 	}
4328 	out:
4329 	return ret;
4330 }
4331 
4332 static void
4333 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4334     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4335     uint32_t device_timestamp, uint8_t rssi)
4336 {
4337 	struct ieee80211com *ic = &sc->sc_ic;
4338 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4339 	struct ieee80211_frame *wh;
4340 	struct ieee80211_node *ni;
4341 
4342 	/*
4343 	 * We need to turn the hardware provided channel index into a channel
4344 	 * and then find it in our ic_channels array
4345 	 */
4346 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4347 		/*
4348 		 * OpenBSD points this at the ibss chan, which it defaults to
4349 		 * channel 1 and then never touches again. Skip a step.
4350 		 */
4351 		printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4352 		chanidx = 1;
4353 	}
4354 
4355 	int channel = chanidx;
4356 	for (int i = 0; i < ic->ic_nchans; i++) {
4357 		if (ic->ic_channels[i].ic_ieee == channel) {
4358 			chanidx = i;
4359 		}
4360 	}
4361 	ic->ic_curchan = &ic->ic_channels[chanidx];
4362 
4363 	wh = mtod(m, struct ieee80211_frame *);
4364 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4365 
4366 #if 0	/* XXX hw decrypt */
4367 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4368 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4369 		m_freem(m);
4370 		ieee80211_release_node(ic, ni);
4371 		return;
4372 	}
4373 #endif
4374 	if (ieee80211_radiotap_active_vap(vap)) {
4375 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4376 		uint16_t chan_flags;
4377 		int have_legacy_rate = 1;
4378 		uint8_t mcs, rate;
4379 
4380 		tap->wr_flags = 0;
4381 		if (is_shortpre)
4382 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4383 		tap->wr_chan_freq =
4384 		    htole16(ic->ic_channels[chanidx].ic_freq);
4385 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4386 #if 0
4387 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4388 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4389 			chan_flags &= ~IEEE80211_CHAN_HT;
4390 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4391 		}
4392 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4393 			chan_flags &= ~IEEE80211_CHAN_VHT;
4394 #else
4395 		chan_flags &= ~IEEE80211_CHAN_HT;
4396 #endif
4397 		tap->wr_chan_flags = htole16(chan_flags);
4398 		tap->wr_dbm_antsignal = rssi;
4399 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4400 		tap->wr_tsft = device_timestamp;
4401 
4402 		if (sc->sc_rate_n_flags_version >= 2) {
4403 			uint32_t mod_type = (rate_n_flags &
4404 			    IWX_RATE_MCS_MOD_TYPE_MSK);
4405 			const struct ieee80211_rateset *rs = NULL;
4406 			uint32_t ridx;
4407 			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4408 			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4409 			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4410 			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4411 			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4412 				rs = &ieee80211_std_rateset_11b;
4413 			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4414 				rs = &ieee80211_std_rateset_11a;
4415 			if (rs && ridx < rs->rs_nrates) {
4416 				rate = (rs->rs_rates[ridx] &
4417 				    IEEE80211_RATE_VAL);
4418 			} else
4419 				rate = 0;
4420 		} else {
4421 			have_legacy_rate = ((rate_n_flags &
4422 			    (IWX_RATE_MCS_HT_MSK_V1 |
4423 			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4424 			mcs = (rate_n_flags &
4425 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4426 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4427 			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4428 		}
4429 		if (!have_legacy_rate) {
4430 			tap->wr_rate = (0x80 | mcs);
4431 		} else {
4432 			switch (rate) {
4433 			/* CCK rates. */
4434 			case  10: tap->wr_rate =   2; break;
4435 			case  20: tap->wr_rate =   4; break;
4436 			case  55: tap->wr_rate =  11; break;
4437 			case 110: tap->wr_rate =  22; break;
4438 			/* OFDM rates. */
4439 			case 0xd: tap->wr_rate =  12; break;
4440 			case 0xf: tap->wr_rate =  18; break;
4441 			case 0x5: tap->wr_rate =  24; break;
4442 			case 0x7: tap->wr_rate =  36; break;
4443 			case 0x9: tap->wr_rate =  48; break;
4444 			case 0xb: tap->wr_rate =  72; break;
4445 			case 0x1: tap->wr_rate =  96; break;
4446 			case 0x3: tap->wr_rate = 108; break;
4447 			/* Unknown rate: should not happen. */
4448 			default:  tap->wr_rate =   0;
4449 			}
4450 			// XXX hack - this needs rebased with the new rate stuff anyway
4451 			tap->wr_rate = rate;
4452 		}
4453 	}
4454 
4455 	IWX_UNLOCK(sc);
4456 	if (ni == NULL) {
4457 		if (ieee80211_input_mimo_all(ic, m) == -1)
4458 			printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4459 	} else {
4460 
4461 		if (ieee80211_input_mimo(ni, m) == -1)
4462 			printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4463 		ieee80211_free_node(ni);
4464 	}
4465 	IWX_LOCK(sc);
4466 }
4467 
4468 static void
4469 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4470     size_t maxlen)
4471 {
4472 	struct ieee80211com *ic = &sc->sc_ic;
4473 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4474 	struct ieee80211_node *ni = vap->iv_bss;
4475 	struct ieee80211_key *k;
4476 	struct ieee80211_rx_stats rxs;
4477 	struct iwx_rx_mpdu_desc *desc;
4478 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4479 	int rssi;
4480 	uint8_t chanidx;
4481 	uint16_t phy_info;
4482 	size_t desc_size;
4483 	int pad = 0;
4484 
4485 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4486 		desc_size = sizeof(*desc);
4487 	else
4488 		desc_size = IWX_RX_DESC_SIZE_V1;
4489 
4490 	if (maxlen < desc_size) {
4491 		m_freem(m);
4492 		return; /* drop */
4493 	}
4494 
4495 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4496 
4497 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4498 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4499 		printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4500 		m_freem(m);
4501 		return; /* drop */
4502 	}
4503 
4504 	len = le16toh(desc->mpdu_len);
4505 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4506 		/* Allow control frames in monitor mode. */
4507 		if (len < sizeof(struct ieee80211_frame_cts)) {
4508 			m_freem(m);
4509 			return;
4510 		}
4511 
4512 	} else if (len < sizeof(struct ieee80211_frame)) {
4513 		m_freem(m);
4514 		return;
4515 	}
4516 	if (len > maxlen - desc_size) {
4517 		m_freem(m);
4518 		return;
4519 	}
4520 
4521 	// TODO: arithmetic on a pointer to void is a GNU extension
4522 	m->m_data = (char *)pktdata + desc_size;
4523 	m->m_pkthdr.len = m->m_len = len;
4524 
4525 	/* Account for padding following the frame header. */
4526 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4527 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4528 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4529 		if (type == IEEE80211_FC0_TYPE_CTL) {
4530 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4531 			case IEEE80211_FC0_SUBTYPE_CTS:
4532 				hdrlen = sizeof(struct ieee80211_frame_cts);
4533 				break;
4534 			case IEEE80211_FC0_SUBTYPE_ACK:
4535 				hdrlen = sizeof(struct ieee80211_frame_ack);
4536 				break;
4537 			default:
4538 				hdrlen = sizeof(struct ieee80211_frame_min);
4539 				break;
4540 			}
4541 		} else
4542 			hdrlen = ieee80211_hdrsize(wh);
4543 
4544 		if ((le16toh(desc->status) &
4545 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4546 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4547 			// CCMP header length
4548 			hdrlen += 8;
4549 		}
4550 
4551 		memmove(m->m_data + 2, m->m_data, hdrlen);
4552 		m_adj(m, 2);
4553 
4554 	}
4555 
4556 	if ((le16toh(desc->status) &
4557 	    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4558 	    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4559 		pad = 1;
4560 	}
4561 
4562 //	/*
4563 //	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4564 //	 * in place for each subframe. But it leaves the 'A-MSDU present'
4565 //	 * bit set in the frame header. We need to clear this bit ourselves.
4566 //	 * (XXX This workaround is not required on AX200/AX201 devices that
4567 //	 * have been tested by me, but it's unclear when this problem was
4568 //	 * fixed in the hardware. It definitely affects the 9k generation.
4569 //	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4570 //	 * to exist that we may eventually add support for.)
4571 //	 *
4572 //	 * And we must allow the same CCMP PN for subframes following the
4573 //	 * first subframe. Otherwise they would be discarded as replays.
4574 //	 */
4575 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4576 		DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4577 //		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4578 //		uint8_t subframe_idx = (desc->amsdu_info &
4579 //		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4580 //		if (subframe_idx > 0)
4581 //			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4582 //		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4583 //		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4584 //			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4585 //			    struct ieee80211_qosframe_addr4 *);
4586 //			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4587 //		} else if (ieee80211_has_qos(wh) &&
4588 //		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4589 //			struct ieee80211_qosframe *qwh = mtod(m,
4590 //			    struct ieee80211_qosframe *);
4591 //			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4592 //		}
4593 	}
4594 
4595 	/*
4596 	 * Verify decryption before duplicate detection. The latter uses
4597 	 * the TID supplied in QoS frame headers and this TID is implicitly
4598 	 * verified as part of the CCMP nonce.
4599 	 */
4600 	k = ieee80211_crypto_get_txkey(ni, m);
4601 	if (k != NULL &&
4602 	    (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4603 	    iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4604 		DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4605 		m_freem(m);
4606 		return;
4607 	}
4608 
4609 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4610 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
4611 		chanidx = desc->v3.channel;
4612 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4613 	} else {
4614 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
4615 		chanidx = desc->v1.channel;
4616 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4617 	}
4618 
4619 	phy_info = le16toh(desc->phy_info);
4620 
4621 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4622 	rssi = (0 - IWX_MIN_DBM) + rssi;		/* normalize */
4623 	rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM));	/* clip to max. 100% */
4624 
4625 	memset(&rxs, 0, sizeof(rxs));
4626 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4627 	rxs.r_flags |= IEEE80211_R_BAND;
4628 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4629 	rxs.r_flags |= IEEE80211_R_RSSI | IEEE80211_R_C_RSSI;
4630 	rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4631 
4632 	rxs.c_ieee = chanidx;
4633 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4634 	    chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4635 	rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4636 	rxs.c_rx_tsf = device_timestamp;
4637 	rxs.c_chain = chanidx;
4638 
4639 	/* rssi is in 1/2db units */
4640 	rxs.c_rssi = rssi * 2;
4641 	rxs.c_nf = sc->sc_noise;
4642 
4643 	if (pad) {
4644 		rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4645 		rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4646 	}
4647 
4648 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
4649 		printf("%s: ieee80211_add_rx_params failed\n", __func__);
4650 		return;
4651 	}
4652 
4653 	ieee80211_add_rx_params(m, &rxs);
4654 
4655 #if 0
4656 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4657 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4658 	    rate_n_flags, device_timestamp, &rxi, ml))
4659 		return;
4660 #endif
4661 
4662 	if (pad) {
4663 #define TRIM 8
4664 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4665 		hdrlen = ieee80211_hdrsize(wh);
4666 		memmove(m->m_data + TRIM, m->m_data, hdrlen);
4667 		m_adj(m, TRIM);
4668 #undef TRIM
4669 	}
4670 
4671 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4672 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4673 	    rate_n_flags, device_timestamp, rssi);
4674 }
4675 
4676 static void
4677 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4678 {
4679 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
4680 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4681 	int i;
4682 
4683 	/* First TB is never cleared - it is bidirectional DMA data. */
4684 	for (i = 1; i < num_tbs; i++) {
4685 		struct iwx_tfh_tb *tb = &desc->tbs[i];
4686 		memset(tb, 0, sizeof(*tb));
4687 	}
4688 	desc->num_tbs = htole16(1);
4689 
4690 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4691 	    BUS_DMASYNC_PREWRITE);
4692 }
4693 
4694 static void
4695 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4696     struct iwx_tx_data *txd)
4697 {
4698 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4699 	bus_dmamap_unload(ring->data_dmat, txd->map);
4700 
4701 	ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4702 	txd->m = NULL;
4703 	txd->in = NULL;
4704 }
4705 
4706 static void
4707 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4708 {
4709 	struct iwx_tx_data *txd;
4710 
4711 	while (ring->tail_hw != idx) {
4712 		txd = &ring->data[ring->tail];
4713 		if (txd->m != NULL) {
4714 			iwx_clear_tx_desc(sc, ring, ring->tail);
4715 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4716 			iwx_txd_done(sc, ring, txd);
4717 			ring->queued--;
4718 			if (ring->queued < 0)
4719 				panic("caught negative queue count");
4720 		}
4721 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4722 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4723 	}
4724 }
4725 
4726 static void
4727 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4728     struct iwx_rx_data *data)
4729 {
4730 	struct ieee80211com *ic = &sc->sc_ic;
4731 	struct ifnet *ifp = IC2IFP(ic);
4732 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4733 	int qid = cmd_hdr->qid, status, txfail;
4734 	struct iwx_tx_ring *ring = &sc->txq[qid];
4735 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4736 	uint32_t ssn;
4737 	uint32_t len = iwx_rx_packet_len(pkt);
4738 	int idx = cmd_hdr->idx;
4739 	struct iwx_tx_data *txd = &ring->data[idx];
4740 	struct mbuf *m = txd->m;
4741 
4742 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4743 
4744 	/* Sanity checks. */
4745 	if (sizeof(*tx_resp) > len)
4746 		return;
4747 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4748 		return;
4749 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4750 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
4751 		return;
4752 
4753 	sc->sc_tx_timer[qid] = 0;
4754 
4755 	if (tx_resp->frame_count > 1) /* A-MPDU */
4756 		return;
4757 
4758 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4759 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4760 	    status != IWX_TX_STATUS_DIRECT_DONE);
4761 
4762 #ifdef __not_yet__
4763 	/* TODO: Replace accounting below with ieee80211_tx_complete() */
4764 	ieee80211_tx_complete(&in->in_ni, m, txfail);
4765 #else
4766 	if (txfail)
4767 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4768 	else {
4769 		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4770 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4771 		if (m->m_flags & M_MCAST)
4772 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4773 	}
4774 #endif
4775 	/*
4776 	 * On hardware supported by iwx(4) the SSN counter corresponds
4777 	 * to a Tx ring index rather than a sequence number.
4778 	 * Frames up to this index (non-inclusive) can now be freed.
4779 	 */
4780 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4781 	ssn = le32toh(ssn);
4782 	if (ssn < sc->max_tfd_queue_size) {
4783 		iwx_txq_advance(sc, ring, ssn);
4784 		iwx_clear_oactive(sc, ring);
4785 	}
4786 }
4787 
4788 static void
4789 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4790 {
4791 	if (ring->queued < iwx_lomark) {
4792 		sc->qfullmsk &= ~(1 << ring->qid);
4793 		if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4794 			/*
4795 			 * Well, we're in interrupt context, but then again
4796 			 * I guess net80211 does all sorts of stunts in
4797 			 * interrupt context, so maybe this is no biggie.
4798 			 */
4799 			iwx_start(sc);
4800 		}
4801 	}
4802 }
4803 
4804 static void
4805 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4806 {
4807 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4808 	struct ieee80211com *ic = &sc->sc_ic;
4809 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4810 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
4811 	struct ieee80211_node *ni = &in->in_ni;
4812 	struct iwx_tx_ring *ring;
4813 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4814 	int qid;
4815 
4816 //	if (ic->ic_state != IEEE80211_S_RUN)
4817 //		return;
4818 
4819 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4820 		return;
4821 
4822 	if (ba_res->sta_id != IWX_STATION_ID)
4823 		return;
4824 
4825 	in = (void *)ni;
4826 
4827 	tfd_cnt = le16toh(ba_res->tfd_cnt);
4828 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4829 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4830 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4831 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
4832 		return;
4833 
4834 	for (i = 0; i < tfd_cnt; i++) {
4835 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4836 		uint8_t tid;
4837 
4838 		tid = ba_tfd->tid;
4839 		if (tid >= nitems(sc->aggqid))
4840 			continue;
4841 
4842 		qid = sc->aggqid[tid];
4843 		if (qid != htole16(ba_tfd->q_num))
4844 			continue;
4845 
4846 		ring = &sc->txq[qid];
4847 
4848 #if 0
4849 		ba = &ni->ni_tx_ba[tid];
4850 		if (ba->ba_state != IEEE80211_BA_AGREED)
4851 			continue;
4852 #endif
4853 		idx = le16toh(ba_tfd->tfd_index);
4854 		sc->sc_tx_timer[qid] = 0;
4855 		iwx_txq_advance(sc, ring, idx);
4856 		iwx_clear_oactive(sc, ring);
4857 	}
4858 }
4859 
4860 static void
4861 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4862     struct iwx_rx_data *data)
4863 {
4864 	struct ieee80211com *ic = &sc->sc_ic;
4865 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4866 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4867 	uint32_t missed;
4868 
4869 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4870 	    (vap->iv_state != IEEE80211_S_RUN))
4871 		return;
4872 
4873 	bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4874 	    BUS_DMASYNC_POSTREAD);
4875 
4876 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4877 	if (missed > vap->iv_bmissthreshold) {
4878 		ieee80211_beacon_miss(ic);
4879 	}
4880 
4881 }
4882 
4883 static int
4884 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4885 {
4886 	struct iwx_binding_cmd cmd;
4887 	struct ieee80211com *ic = &sc->sc_ic;
4888 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4889 	struct iwx_vap *ivp = IWX_VAP(vap);
4890 	struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4891 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4892 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4893 	uint32_t status;
4894 
4895 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4896 		panic("binding already added");
4897 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4898 		panic("binding already removed");
4899 
4900 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4901 		return EINVAL;
4902 
4903 	memset(&cmd, 0, sizeof(cmd));
4904 
4905 	cmd.id_and_color
4906 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4907 	cmd.action = htole32(action);
4908 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4909 
4910 	cmd.macs[0] = htole32(mac_id);
4911 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4912 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4913 
4914 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4915 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4916 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4917 	else
4918 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4919 
4920 	status = 0;
4921 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4922 	    &cmd, &status);
4923 	if (err == 0 && status != 0)
4924 		err = EIO;
4925 
4926 	return err;
4927 }
4928 
4929 static uint8_t
4930 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4931 {
4932 	int ctlchan = ieee80211_chan2ieee(ic, chan);
4933 	int midpoint = chan->ic_vht_ch_freq1;
4934 
4935 	/*
4936 	 * The FW is expected to check the control channel position only
4937 	 * when in HT/VHT and the channel width is not 20MHz. Return
4938 	 * this value as the default one:
4939 	 */
4940 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4941 
4942 	switch (ctlchan - midpoint) {
4943 	case -6:
4944 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4945 		break;
4946 	case -2:
4947 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4948 		break;
4949 	case 2:
4950 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4951 		break;
4952 	case 6:
4953 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4954 		break;
4955 	default:
4956 		break;
4957 	}
4958 
4959 	return pos;
4960 }
4961 
4962 static int
4963 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4964     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4965     uint8_t vht_chan_width, int cmdver)
4966 {
4967 	struct ieee80211com *ic = &sc->sc_ic;
4968 	struct iwx_phy_context_cmd_uhb cmd;
4969 	uint8_t active_cnt, idle_cnt;
4970 	struct ieee80211_channel *chan = ctxt->channel;
4971 
4972 	memset(&cmd, 0, sizeof(cmd));
4973 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4974 	    ctxt->color));
4975 	cmd.action = htole32(action);
4976 
4977 	if (IEEE80211_IS_CHAN_2GHZ(chan) ||
4978 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4979 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4980 	else
4981 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4982 
4983 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4984 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4985 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4986 
4987 	if (IEEE80211_IS_CHAN_VHT80(chan)) {
4988 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
4989 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
4990 	} else if (IEEE80211_IS_CHAN_HT40(chan)) {
4991 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4992 		if (IEEE80211_IS_CHAN_HT40D(chan))
4993 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4994 		else
4995 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4996 	} else {
4997 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4998 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4999 	}
5000 
5001 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5002 	    IWX_RLC_CONFIG_CMD) != 2) {
5003 		idle_cnt = chains_static;
5004 		active_cnt = chains_dynamic;
5005 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5006 		    IWX_PHY_RX_CHAIN_VALID_POS);
5007 		cmd.rxchain_info |= htole32(idle_cnt <<
5008 		    IWX_PHY_RX_CHAIN_CNT_POS);
5009 		cmd.rxchain_info |= htole32(active_cnt <<
5010 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5011 	}
5012 
5013 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5014 }
5015 
5016 #if 0
5017 int
5018 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5019     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5020     uint8_t vht_chan_width, int cmdver)
5021 {
5022 	struct ieee80211com *ic = &sc->sc_ic;
5023 	struct iwx_phy_context_cmd cmd;
5024 	uint8_t active_cnt, idle_cnt;
5025 	struct ieee80211_channel *chan = ctxt->channel;
5026 
5027 	memset(&cmd, 0, sizeof(cmd));
5028 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5029 	    ctxt->color));
5030 	cmd.action = htole32(action);
5031 
5032 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5033 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5034 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5035 	else
5036 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5037 
5038 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5039 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5040 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5041 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5042 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5043 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5044 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5045 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5046 			/* secondary chan above -> control chan below */
5047 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5048 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5049 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5050 			/* secondary chan below -> control chan above */
5051 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5052 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5053 		} else {
5054 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5055 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5056 		}
5057 	} else {
5058 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5059 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5060 	}
5061 
5062 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5063 	    IWX_RLC_CONFIG_CMD) != 2) {
5064 		idle_cnt = chains_static;
5065 		active_cnt = chains_dynamic;
5066 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5067 		    IWX_PHY_RX_CHAIN_VALID_POS);
5068 		cmd.rxchain_info |= htole32(idle_cnt <<
5069 		    IWX_PHY_RX_CHAIN_CNT_POS);
5070 		cmd.rxchain_info |= htole32(active_cnt <<
5071 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5072 	}
5073 
5074 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5075 }
5076 #endif
5077 
5078 static int
5079 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5080     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5081     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5082 {
5083 	int cmdver;
5084 
5085 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5086 	if (cmdver != 3 && cmdver != 4) {
5087 		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5088 		    DEVNAME(sc));
5089 		return ENOTSUP;
5090 	}
5091 
5092 	/*
5093 	 * Intel increased the size of the fw_channel_info struct and neglected
5094 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5095 	 * member in the middle.
5096 	 * To keep things simple we use a separate function to handle the larger
5097 	 * variant of the phy context command.
5098 	 */
5099 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5100 		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5101 		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5102 	} else
5103 		panic("Unsupported old hardware contact thj@");
5104 
5105 #if 0
5106 	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5107 	    action, sco, vht_chan_width, cmdver);
5108 #endif
5109 }
5110 
5111 static int
5112 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5113 {
5114 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5115 	struct iwx_tfh_tfd *desc;
5116 	struct iwx_tx_data *txdata;
5117 	struct iwx_device_cmd *cmd;
5118 	struct mbuf *m;
5119 	bus_addr_t paddr;
5120 	uint64_t addr;
5121 	int err = 0, i, paylen, off/*, s*/;
5122 	int idx, code, async, group_id;
5123 	size_t hdrlen, datasz;
5124 	uint8_t *data;
5125 	int generation = sc->sc_generation;
5126 	bus_dma_segment_t seg[10];
5127 	int nsegs;
5128 
5129 	code = hcmd->id;
5130 	async = hcmd->flags & IWX_CMD_ASYNC;
5131 	idx = ring->cur;
5132 
5133 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5134 		paylen += hcmd->len[i];
5135 	}
5136 
5137 	/* If this command waits for a response, allocate response buffer. */
5138 	hcmd->resp_pkt = NULL;
5139 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5140 		uint8_t *resp_buf;
5141 		KASSERT(!async, ("async command want response"));
5142 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5143 		    ("wrong pkt len 1"));
5144 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5145 		    ("wrong pkt len 2"));
5146 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5147 			return ENOSPC;
5148 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5149 		    M_NOWAIT | M_ZERO);
5150 		if (resp_buf == NULL)
5151 			return ENOMEM;
5152 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5153 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5154 	} else {
5155 		sc->sc_cmd_resp_pkt[idx] = NULL;
5156 	}
5157 
5158 	desc = &ring->desc[idx];
5159 	txdata = &ring->data[idx];
5160 
5161 	/*
5162 	 * XXX Intel inside (tm)
5163 	 * Firmware API versions >= 50 reject old-style commands in
5164 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5165 	 * that such commands were in the LONG_GROUP instead in order
5166 	 * for firmware to accept them.
5167 	 */
5168 	if (iwx_cmd_groupid(code) == 0) {
5169 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5170 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5171 	} else
5172 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5173 
5174 	group_id = iwx_cmd_groupid(code);
5175 
5176 	hdrlen = sizeof(cmd->hdr_wide);
5177 	datasz = sizeof(cmd->data_wide);
5178 
5179 	if (paylen > datasz) {
5180 		/* Command is too large to fit in pre-allocated space. */
5181 		size_t totlen = hdrlen + paylen;
5182 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5183 			printf("%s: firmware command too long (%zd bytes)\n",
5184 			    DEVNAME(sc), totlen);
5185 			err = EINVAL;
5186 			goto out;
5187 		}
5188 		if (totlen > IWX_RBUF_SIZE)
5189 			panic("totlen > IWX_RBUF_SIZE");
5190 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5191 		if (m == NULL) {
5192 			printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5193 			    DEVNAME(sc), IWX_RBUF_SIZE);
5194 			err = ENOMEM;
5195 			goto out;
5196 		}
5197 		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5198 		err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5199 		    seg, &nsegs, BUS_DMA_NOWAIT);
5200 		if (nsegs > 20)
5201 			panic("nsegs > 20");
5202 		DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5203 		if (err) {
5204 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5205 			    DEVNAME(sc), totlen);
5206 			m_freem(m);
5207 			goto out;
5208 		}
5209 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5210 		cmd = mtod(m, struct iwx_device_cmd *);
5211 		paddr = seg[0].ds_addr;
5212 	} else {
5213 		cmd = &ring->cmd[idx];
5214 		paddr = txdata->cmd_paddr;
5215 	}
5216 
5217 	memset(cmd, 0, sizeof(*cmd));
5218 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5219 	cmd->hdr_wide.group_id = group_id;
5220 	cmd->hdr_wide.qid = ring->qid;
5221 	cmd->hdr_wide.idx = idx;
5222 	cmd->hdr_wide.length = htole16(paylen);
5223 	cmd->hdr_wide.version = iwx_cmd_version(code);
5224 	data = cmd->data_wide;
5225 
5226 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5227 		if (hcmd->len[i] == 0)
5228 			continue;
5229 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5230 		off += hcmd->len[i];
5231 	}
5232 	KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5233 
5234 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5235 	addr = htole64(paddr);
5236 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5237 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5238 		DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5239 		    paylen));
5240 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5241 		    IWX_FIRST_TB_SIZE);
5242 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5243 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5244 		desc->num_tbs = htole16(2);
5245 	} else
5246 		desc->num_tbs = htole16(1);
5247 
5248 	if (paylen > datasz) {
5249 		bus_dmamap_sync(ring->data_dmat, txdata->map,
5250 		    BUS_DMASYNC_PREWRITE);
5251 	} else {
5252 		bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5253 		    BUS_DMASYNC_PREWRITE);
5254 	}
5255 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5256 	    BUS_DMASYNC_PREWRITE);
5257 
5258 	/* Kick command ring. */
5259 	ring->queued++;
5260 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5261 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5262 	DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5263 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5264 
5265 	if (!async) {
5266 		err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5267 		if (err == 0) {
5268 			/* if hardware is no longer up, return error */
5269 			if (generation != sc->sc_generation) {
5270 				err = ENXIO;
5271 				goto out;
5272 			}
5273 
5274 			/* Response buffer will be freed in iwx_free_resp(). */
5275 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5276 			sc->sc_cmd_resp_pkt[idx] = NULL;
5277 		} else if (generation == sc->sc_generation) {
5278 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5279 			sc->sc_cmd_resp_pkt[idx] = NULL;
5280 		}
5281 	}
5282 out:
5283 	return err;
5284 }
5285 
5286 static int
5287 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5288     uint16_t len, const void *data)
5289 {
5290 	struct iwx_host_cmd cmd = {
5291 		.id = id,
5292 		.len = { len, },
5293 		.data = { data, },
5294 		.flags = flags,
5295 	};
5296 
5297 	return iwx_send_cmd(sc, &cmd);
5298 }
5299 
5300 static int
5301 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5302     uint32_t *status)
5303 {
5304 	struct iwx_rx_packet *pkt;
5305 	struct iwx_cmd_response *resp;
5306 	int err, resp_len;
5307 
5308 	KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5309 	cmd->flags |= IWX_CMD_WANT_RESP;
5310 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5311 
5312 	err = iwx_send_cmd(sc, cmd);
5313 	if (err)
5314 		return err;
5315 
5316 	pkt = cmd->resp_pkt;
5317 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5318 		return EIO;
5319 
5320 	resp_len = iwx_rx_packet_payload_len(pkt);
5321 	if (resp_len != sizeof(*resp)) {
5322 		iwx_free_resp(sc, cmd);
5323 		return EIO;
5324 	}
5325 
5326 	resp = (void *)pkt->data;
5327 	*status = le32toh(resp->status);
5328 	iwx_free_resp(sc, cmd);
5329 	return err;
5330 }
5331 
5332 static int
5333 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5334     const void *data, uint32_t *status)
5335 {
5336 	struct iwx_host_cmd cmd = {
5337 		.id = id,
5338 		.len = { len, },
5339 		.data = { data, },
5340 	};
5341 
5342 	return iwx_send_cmd_status(sc, &cmd, status);
5343 }
5344 
5345 static void
5346 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5347 {
5348 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5349 	    ("hcmd flags !IWX_CMD_WANT_RESP"));
5350 	free(hcmd->resp_pkt, M_DEVBUF);
5351 	hcmd->resp_pkt = NULL;
5352 }
5353 
5354 static void
5355 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5356 {
5357 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5358 	struct iwx_tx_data *data;
5359 
5360 	if (qid != IWX_DQA_CMD_QUEUE) {
5361 		return;	/* Not a command ack. */
5362 	}
5363 
5364 	data = &ring->data[idx];
5365 
5366 	if (data->m != NULL) {
5367 		bus_dmamap_sync(ring->data_dmat, data->map,
5368 		    BUS_DMASYNC_POSTWRITE);
5369 		bus_dmamap_unload(ring->data_dmat, data->map);
5370 		m_freem(data->m);
5371 		data->m = NULL;
5372 	}
5373 	wakeup(&ring->desc[idx]);
5374 
5375 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5376 	if (ring->queued == 0) {
5377 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5378 			DEVNAME(sc), code));
5379 	} else if (ring->queued > 0)
5380 		ring->queued--;
5381 }
5382 
5383 static uint32_t
5384 iwx_fw_rateidx_ofdm(uint8_t rval)
5385 {
5386 	/* Firmware expects indices which match our 11a rate set. */
5387 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5388 	int i;
5389 
5390 	for (i = 0; i < rs->rs_nrates; i++) {
5391 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5392 			return i;
5393 	}
5394 
5395 	return 0;
5396 }
5397 
5398 static uint32_t
5399 iwx_fw_rateidx_cck(uint8_t rval)
5400 {
5401 	/* Firmware expects indices which match our 11b rate set. */
5402 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5403 	int i;
5404 
5405 	for (i = 0; i < rs->rs_nrates; i++) {
5406 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5407 			return i;
5408 	}
5409 
5410 	return 0;
5411 }
5412 
5413 static int
5414 iwx_min_basic_rate(struct ieee80211com *ic)
5415 {
5416 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5417 	struct ieee80211_node *ni = vap->iv_bss;
5418 	struct ieee80211_rateset *rs = &ni->ni_rates;
5419 	struct ieee80211_channel *c = ni->ni_chan;
5420 	int i, min, rval;
5421 
5422 	min = -1;
5423 
5424 	if (c == IEEE80211_CHAN_ANYC) {
5425 		printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5426 		return -1;
5427 	}
5428 
5429 	for (i = 0; i < rs->rs_nrates; i++) {
5430 		if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5431 			continue;
5432 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5433 		if (min == -1)
5434 			min = rval;
5435 		else if (rval < min)
5436 			min = rval;
5437 	}
5438 
5439 	/* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5440 	if (min == -1)
5441 		min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5442 
5443 	return min;
5444 }
5445 
5446 /*
5447  * Determine the Tx command flags and Tx rate+flags to use.
5448  * Return the selected Tx rate.
5449  */
5450 static const struct iwx_rate *
5451 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5452     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5453     struct mbuf *m)
5454 {
5455 	struct ieee80211com *ic = &sc->sc_ic;
5456 	struct ieee80211_node *ni = &in->in_ni;
5457 	struct ieee80211_rateset *rs = &ni->ni_rates;
5458 	const struct iwx_rate *rinfo = NULL;
5459 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5460 	int ridx = iwx_min_basic_rate(ic);
5461 	int min_ridx, rate_flags;
5462 	uint8_t rval;
5463 
5464 	/* We're in the process of clearing the node, no channel already */
5465 	if (ridx == -1)
5466 		return NULL;
5467 
5468 	min_ridx = iwx_rval2ridx(ridx);
5469 
5470 	*flags = 0;
5471 
5472 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5473 	    type != IEEE80211_FC0_TYPE_DATA) {
5474 		/* for non-data, use the lowest supported rate */
5475 		ridx = min_ridx;
5476 		*flags |= IWX_TX_FLAGS_CMD_RATE;
5477 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5478 		ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5479 		    & ~IEEE80211_RATE_MCS];
5480 	} else {
5481 		rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5482 		    & IEEE80211_RATE_VAL);
5483 		ridx = iwx_rval2ridx(rval);
5484 		if (ridx < min_ridx)
5485 			ridx = min_ridx;
5486 	}
5487 
5488 	if (m->m_flags & M_EAPOL)
5489 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
5490 
5491 	rinfo = &iwx_rates[ridx];
5492 
5493 	/*
5494 	 * Do not fill rate_n_flags if firmware controls the Tx rate.
5495 	 * For data frames we rely on Tx rate scaling in firmware by default.
5496 	 */
5497 	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5498 		*rate_n_flags = 0;
5499 		return rinfo;
5500 	}
5501 
5502 	/*
5503 	 * Forcing a CCK/OFDM legacy rate is important for management frames.
5504 	 * Association will only succeed if we do this correctly.
5505 	 */
5506 
5507 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5508 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5509 	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5510 	if (IWX_RIDX_IS_CCK(ridx)) {
5511 		if (sc->sc_rate_n_flags_version >= 2)
5512 			rate_flags |= IWX_RATE_MCS_CCK_MSK;
5513 		else
5514 			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5515 	} else if (sc->sc_rate_n_flags_version >= 2)
5516 		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5517 
5518 	rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5519 	    & IEEE80211_RATE_VAL);
5520 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5521 	    rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5522 
5523 	if (sc->sc_rate_n_flags_version >= 2) {
5524 		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5525 			rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5526 			    IWX_RATE_LEGACY_RATE_MSK);
5527 		} else {
5528 			rate_flags |= (iwx_fw_rateidx_cck(rval) &
5529 			    IWX_RATE_LEGACY_RATE_MSK);
5530 		}
5531 	} else
5532 		rate_flags |= rinfo->plcp;
5533 
5534 	*rate_n_flags = rate_flags;
5535 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5536 	    __func__, __LINE__,*flags);
5537 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5538 	    __func__, __LINE__, *rate_n_flags);
5539 
5540 	if (sc->sc_debug & IWX_DEBUG_TXRATE)
5541 		print_ratenflags(__func__, __LINE__,
5542 		    *rate_n_flags, sc->sc_rate_n_flags_version);
5543 
5544 	return rinfo;
5545 }
5546 
5547 static void
5548 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5549     int idx, uint16_t byte_cnt, uint16_t num_tbs)
5550 {
5551 	uint8_t filled_tfd_size, num_fetch_chunks;
5552 	uint16_t len = byte_cnt;
5553 	uint16_t bc_ent;
5554 
5555 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5556 			  num_tbs * sizeof(struct iwx_tfh_tb);
5557 	/*
5558 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5559 	 * Dividing it by 64 will give the number of chunks to fetch
5560 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5561 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5562 	 * of the TFD are used, and only one chunk of 64 bytes should
5563 	 * be fetched
5564 	 */
5565 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5566 
5567 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5568 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5569 		/* Starting from AX210, the HW expects bytes */
5570 		bc_ent = htole16(len | (num_fetch_chunks << 14));
5571 		scd_bc_tbl[idx].tfd_offset = bc_ent;
5572 	} else {
5573 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5574 		/* Before AX210, the HW expects DW */
5575 		len = howmany(len, 4);
5576 		bc_ent = htole16(len | (num_fetch_chunks << 12));
5577 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
5578 	}
5579 
5580 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5581 }
5582 
5583 static int
5584 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5585 {
5586 	struct ieee80211com *ic = &sc->sc_ic;
5587 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5588 	struct iwx_node *in = (void *)ni;
5589 	struct iwx_tx_ring *ring;
5590 	struct iwx_tx_data *data;
5591 	struct iwx_tfh_tfd *desc;
5592 	struct iwx_device_cmd *cmd;
5593 	struct ieee80211_frame *wh;
5594 	struct ieee80211_key *k = NULL;
5595 	const struct iwx_rate *rinfo;
5596 	uint64_t paddr;
5597 	u_int hdrlen;
5598 	uint32_t rate_n_flags;
5599 	uint16_t num_tbs, flags, offload_assist = 0;
5600 	uint8_t type, subtype;
5601 	int i, totlen, err, pad, qid;
5602 #define IWM_MAX_SCATTER 20
5603 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5604 	int nsegs;
5605 	struct mbuf *m1;
5606 	size_t txcmd_size;
5607 
5608 	wh = mtod(m, struct ieee80211_frame *);
5609 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5610 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5611 	hdrlen = ieee80211_anyhdrsize(wh);
5612 
5613 	qid = sc->first_data_qid;
5614 
5615 	/* Put QoS frames on the data queue which maps to their TID. */
5616 	if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
5617 		uint16_t qos = ieee80211_gettid(wh);
5618 		uint8_t tid = qos & IEEE80211_QOS_TID;
5619 #if 0
5620 		/*
5621 		 * XXX-THJ: TODO when we enable ba we need to manage the
5622 		 * mappings
5623 		 */
5624 		struct ieee80211_tx_ba *ba;
5625 		ba = &ni->ni_tx_ba[tid];
5626 
5627 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5628 		    type == IEEE80211_FC0_TYPE_DATA &&
5629 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5630 		    subtype != IEEE80211_FC0_SUBTYPE_BAR &&
5631 		    sc->aggqid[tid] != 0  /*&&
5632 		    ba->ba_state == IEEE80211_BA_AGREED*/) {
5633 			qid = sc->aggqid[tid];
5634 #else
5635 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5636 		    type == IEEE80211_FC0_TYPE_DATA &&
5637 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5638 		    sc->aggqid[tid] != 0) {
5639 			qid = sc->aggqid[tid];
5640 #endif
5641 		}
5642 	}
5643 
5644 	ring = &sc->txq[qid];
5645 	desc = &ring->desc[ring->cur];
5646 	memset(desc, 0, sizeof(*desc));
5647 	data = &ring->data[ring->cur];
5648 
5649 	cmd = &ring->cmd[ring->cur];
5650 	cmd->hdr.code = IWX_TX_CMD;
5651 	cmd->hdr.flags = 0;
5652 	cmd->hdr.qid = ring->qid;
5653 	cmd->hdr.idx = ring->cur;
5654 
5655 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5656 	if (rinfo == NULL)
5657 		return EINVAL;
5658 
5659 	if (ieee80211_radiotap_active_vap(vap)) {
5660 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5661 
5662 		tap->wt_flags = 0;
5663 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5664 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5665 		tap->wt_rate = rinfo->rate;
5666 		if (k != NULL)
5667 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5668 		ieee80211_radiotap_tx(vap, m);
5669 	}
5670 
5671 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5672 		k = ieee80211_crypto_get_txkey(ni, m);
5673 		if (k == NULL) {
5674 			printf("%s: k is NULL!\n", __func__);
5675 			m_freem(m);
5676 			return (ENOBUFS);
5677 		} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5678 			k->wk_keytsc++;
5679 		} else {
5680 			k->wk_cipher->ic_encap(k, m);
5681 
5682 			/* 802.11 headers may have moved */
5683 			wh = mtod(m, struct ieee80211_frame *);
5684 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5685 		}
5686 	} else
5687 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5688 
5689 	totlen = m->m_pkthdr.len;
5690 
5691 	if (hdrlen & 3) {
5692 		/* First segment length must be a multiple of 4. */
5693 		pad = 4 - (hdrlen & 3);
5694 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5695 	} else
5696 		pad = 0;
5697 
5698 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5699 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5700 		memset(tx, 0, sizeof(*tx));
5701 		tx->len = htole16(totlen);
5702 		tx->offload_assist = htole32(offload_assist);
5703 		tx->flags = htole16(flags);
5704 		tx->rate_n_flags = htole32(rate_n_flags);
5705 		memcpy(tx->hdr, wh, hdrlen);
5706 		txcmd_size = sizeof(*tx);
5707 	} else {
5708 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5709 		memset(tx, 0, sizeof(*tx));
5710 		tx->len = htole16(totlen);
5711 		tx->offload_assist = htole16(offload_assist);
5712 		tx->flags = htole32(flags);
5713 		tx->rate_n_flags = htole32(rate_n_flags);
5714 		memcpy(tx->hdr, wh, hdrlen);
5715 		txcmd_size = sizeof(*tx);
5716 	}
5717 
5718 	/* Trim 802.11 header. */
5719 	m_adj(m, hdrlen);
5720 
5721 	err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5722 	    &nsegs, BUS_DMA_NOWAIT);
5723 	if (err && err != EFBIG) {
5724 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5725 		m_freem(m);
5726 		return err;
5727 	}
5728 	if (err) {
5729 		/* Too many DMA segments, linearize mbuf. */
5730 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5731 		if (m1 == NULL) {
5732 			printf("%s: could not defrag mbufs\n", __func__);
5733 			m_freem(m);
5734 			return (ENOBUFS);
5735 		}
5736 		m = m1;
5737 		err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5738 		    segs, &nsegs, BUS_DMA_NOWAIT);
5739 		if (err) {
5740 			printf("%s: can't map mbuf (error %d)\n", __func__,
5741 			    err);
5742 			m_freem(m);
5743 			return (err);
5744 		}
5745 	}
5746 	data->m = m;
5747 	data->in = in;
5748 
5749 	/* Fill TX descriptor. */
5750 	num_tbs = 2 + nsegs;
5751 	desc->num_tbs = htole16(num_tbs);
5752 
5753 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5754 	paddr = htole64(data->cmd_paddr);
5755 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5756 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5757 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5758 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5759 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5760 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5761 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5762 
5763 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5764 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5765 
5766 	/* Other DMA segments are for data payload. */
5767 	for (i = 0; i < nsegs; i++) {
5768 		seg = &segs[i];
5769 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5770 		paddr = htole64(seg->ds_addr);
5771 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5772 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5773 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5774 	}
5775 
5776 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5777 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5778 	    BUS_DMASYNC_PREWRITE);
5779 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5780 	    BUS_DMASYNC_PREWRITE);
5781 
5782 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5783 
5784 	/* Kick TX ring. */
5785 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5786 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5787 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5788 
5789 	/* Mark TX ring as full if we reach a certain threshold. */
5790 	if (++ring->queued > iwx_himark) {
5791 		sc->qfullmsk |= 1 << ring->qid;
5792 	}
5793 
5794 	sc->sc_tx_timer[ring->qid] = 15;
5795 
5796 	return 0;
5797 }
5798 
5799 static int
5800 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5801 {
5802 	struct iwx_rx_packet *pkt;
5803 	struct iwx_tx_path_flush_cmd_rsp *resp;
5804 	struct iwx_tx_path_flush_cmd flush_cmd = {
5805 		.sta_id = htole32(sta_id),
5806 		.tid_mask = htole16(tids),
5807 	};
5808 	struct iwx_host_cmd hcmd = {
5809 		.id = IWX_TXPATH_FLUSH,
5810 		.len = { sizeof(flush_cmd), },
5811 		.data = { &flush_cmd, },
5812 		.flags = IWX_CMD_WANT_RESP,
5813 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5814 	};
5815 	int err, resp_len, i, num_flushed_queues;
5816 
5817 	err = iwx_send_cmd(sc, &hcmd);
5818 	if (err)
5819 		return err;
5820 
5821 	pkt = hcmd.resp_pkt;
5822 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5823 		err = EIO;
5824 		goto out;
5825 	}
5826 
5827 	resp_len = iwx_rx_packet_payload_len(pkt);
5828 	/* Some firmware versions don't provide a response. */
5829 	if (resp_len == 0)
5830 		goto out;
5831 	else if (resp_len != sizeof(*resp)) {
5832 		err = EIO;
5833 		goto out;
5834 	}
5835 
5836 	resp = (void *)pkt->data;
5837 
5838 	if (le16toh(resp->sta_id) != sta_id) {
5839 		err = EIO;
5840 		goto out;
5841 	}
5842 
5843 	num_flushed_queues = le16toh(resp->num_flushed_queues);
5844 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5845 		err = EIO;
5846 		goto out;
5847 	}
5848 
5849 	for (i = 0; i < num_flushed_queues; i++) {
5850 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5851 		uint16_t tid = le16toh(queue_info->tid);
5852 		uint16_t read_after = le16toh(queue_info->read_after_flush);
5853 		uint16_t qid = le16toh(queue_info->queue_num);
5854 		struct iwx_tx_ring *txq;
5855 
5856 		if (qid >= nitems(sc->txq))
5857 			continue;
5858 
5859 		txq = &sc->txq[qid];
5860 		if (tid != txq->tid)
5861 			continue;
5862 
5863 		iwx_txq_advance(sc, txq, read_after);
5864 	}
5865 out:
5866 	iwx_free_resp(sc, &hcmd);
5867 	return err;
5868 }
5869 
5870 #define IWX_FLUSH_WAIT_MS	2000
5871 
5872 static int
5873 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5874 {
5875 	struct iwx_add_sta_cmd cmd;
5876 	int err;
5877 	uint32_t status;
5878 
5879 	memset(&cmd, 0, sizeof(cmd));
5880 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5881 	    in->in_color));
5882 	cmd.sta_id = IWX_STATION_ID;
5883 	cmd.add_modify = IWX_STA_MODE_MODIFY;
5884 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5885 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5886 
5887 	status = IWX_ADD_STA_SUCCESS;
5888 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5889 	    sizeof(cmd), &cmd, &status);
5890 	if (err) {
5891 		printf("%s: could not update sta (error %d)\n",
5892 		    DEVNAME(sc), err);
5893 		return err;
5894 	}
5895 
5896 	switch (status & IWX_ADD_STA_STATUS_MASK) {
5897 	case IWX_ADD_STA_SUCCESS:
5898 		break;
5899 	default:
5900 		err = EIO;
5901 		printf("%s: Couldn't %s draining for station\n",
5902 		    DEVNAME(sc), drain ? "enable" : "disable");
5903 		break;
5904 	}
5905 
5906 	return err;
5907 }
5908 
5909 static int
5910 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5911 {
5912 	int err;
5913 
5914 	IWX_ASSERT_LOCKED(sc);
5915 
5916 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
5917 
5918 	err = iwx_drain_sta(sc, in, 1);
5919 	if (err)
5920 		goto done;
5921 
5922 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5923 	if (err) {
5924 		printf("%s: could not flush Tx path (error %d)\n",
5925 		    DEVNAME(sc), err);
5926 		goto done;
5927 	}
5928 
5929 	/*
5930 	 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5931 	 * fc drive rand has has been replaced in OpenBSD.
5932 	 */
5933 
5934 	err = iwx_drain_sta(sc, in, 0);
5935 done:
5936 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5937 	return err;
5938 }
5939 
5940 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
5941 
5942 static int
5943 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5944     struct iwx_beacon_filter_cmd *cmd)
5945 {
5946 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5947 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5948 }
5949 
5950 static int
5951 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5952 {
5953 	struct iwx_beacon_filter_cmd cmd = {
5954 		IWX_BF_CMD_CONFIG_DEFAULTS,
5955 		.bf_enable_beacon_filter = htole32(1),
5956 		.ba_enable_beacon_abort = htole32(enable),
5957 	};
5958 
5959 	if (!sc->sc_bf.bf_enabled)
5960 		return 0;
5961 
5962 	sc->sc_bf.ba_enabled = enable;
5963 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5964 }
5965 
5966 static void
5967 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5968     struct iwx_mac_power_cmd *cmd)
5969 {
5970 	struct ieee80211com *ic = &sc->sc_ic;
5971 	struct ieee80211_node *ni = &in->in_ni;
5972 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5973 	int dtim_period, dtim_msec, keep_alive;
5974 
5975 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5976 	    in->in_color));
5977 	if (vap->iv_dtim_period)
5978 		dtim_period = vap->iv_dtim_period;
5979 	else
5980 		dtim_period = 1;
5981 
5982 	/*
5983 	 * Regardless of power management state the driver must set
5984 	 * keep alive period. FW will use it for sending keep alive NDPs
5985 	 * immediately after association. Check that keep alive period
5986 	 * is at least 3 * DTIM.
5987 	 */
5988 	dtim_msec = dtim_period * ni->ni_intval;
5989 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5990 	keep_alive = roundup(keep_alive, 1000) / 1000;
5991 	cmd->keep_alive_seconds = htole16(keep_alive);
5992 
5993 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5994 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5995 }
5996 
5997 static int
5998 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5999 {
6000 	int err;
6001 	int ba_enable;
6002 	struct iwx_mac_power_cmd cmd;
6003 
6004 	memset(&cmd, 0, sizeof(cmd));
6005 
6006 	iwx_power_build_cmd(sc, in, &cmd);
6007 
6008 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6009 	    sizeof(cmd), &cmd);
6010 	if (err != 0)
6011 		return err;
6012 
6013 	ba_enable = !!(cmd.flags &
6014 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6015 	return iwx_update_beacon_abort(sc, in, ba_enable);
6016 }
6017 
6018 static int
6019 iwx_power_update_device(struct iwx_softc *sc)
6020 {
6021 	struct iwx_device_power_cmd cmd = { };
6022 	struct ieee80211com *ic = &sc->sc_ic;
6023 
6024 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6025 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6026 
6027 	return iwx_send_cmd_pdu(sc,
6028 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6029 }
6030 #if 0
6031 static int
6032 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6033 {
6034 	struct iwx_beacon_filter_cmd cmd = {
6035 		IWX_BF_CMD_CONFIG_DEFAULTS,
6036 		.bf_enable_beacon_filter = htole32(1),
6037 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6038 	};
6039 	int err;
6040 
6041 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6042 	if (err == 0)
6043 		sc->sc_bf.bf_enabled = 1;
6044 
6045 	return err;
6046 }
6047 #endif
6048 static int
6049 iwx_disable_beacon_filter(struct iwx_softc *sc)
6050 {
6051 	struct iwx_beacon_filter_cmd cmd;
6052 	int err;
6053 
6054 	memset(&cmd, 0, sizeof(cmd));
6055 
6056 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6057 	if (err == 0)
6058 		sc->sc_bf.bf_enabled = 0;
6059 
6060 	return err;
6061 }
6062 
6063 static int
6064 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6065 {
6066 	struct iwx_add_sta_cmd add_sta_cmd;
6067 	int err, i;
6068 	uint32_t status, aggsize;
6069 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6070 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6071 	struct ieee80211com *ic = &sc->sc_ic;
6072 	struct ieee80211_node *ni = &in->in_ni;
6073 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6074 
6075 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6076 		panic("STA already added");
6077 
6078 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6079 
6080 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6081 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6082 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6083 	} else {
6084 		add_sta_cmd.sta_id = IWX_STATION_ID;
6085 		add_sta_cmd.station_type = IWX_STA_LINK;
6086 	}
6087 	add_sta_cmd.mac_id_n_color
6088 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6089 	if (!update) {
6090 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6091 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6092 			    etheranyaddr);
6093 		else
6094 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6095 			    in->in_macaddr);
6096 	}
6097 	DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6098 	    ether_sprintf(add_sta_cmd.addr)));
6099 	add_sta_cmd.add_modify = update ? 1 : 0;
6100 	add_sta_cmd.station_flags_msk
6101 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6102 
6103 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6104 		add_sta_cmd.station_flags_msk
6105 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6106 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6107 
6108 		if (iwx_mimo_enabled(sc)) {
6109 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
6110 				add_sta_cmd.station_flags |=
6111 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6112 			} else {
6113 				int hasmimo = 0;
6114 				for (i = 0; i < htrs->rs_nrates; i++) {
6115 					if (htrs->rs_rates[i] > 7) {
6116 						hasmimo = 1;
6117 						break;
6118 					}
6119 				}
6120 				if (hasmimo) {
6121 					add_sta_cmd.station_flags |=
6122 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6123 				}
6124 			}
6125 		}
6126 
6127 		if (ni->ni_flags & IEEE80211_NODE_HT &&
6128 		    IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6129 			add_sta_cmd.station_flags |= htole32(
6130 			    IWX_STA_FLG_FAT_EN_40MHZ);
6131 		}
6132 
6133 
6134 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6135 			if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6136 				add_sta_cmd.station_flags |= htole32(
6137 				    IWX_STA_FLG_FAT_EN_80MHZ);
6138 			}
6139 			// XXX-misha: TODO get real ampdu size
6140 			aggsize = max_aggsize;
6141 		} else {
6142 			aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6143 			    IEEE80211_HTCAP_MAXRXAMPDU);
6144 		}
6145 
6146 		if (aggsize > max_aggsize)
6147 			aggsize = max_aggsize;
6148 		add_sta_cmd.station_flags |= htole32((aggsize <<
6149 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6150 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6151 
6152 		switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6153 		    IEEE80211_HTCAP_MPDUDENSITY)) {
6154 		case IEEE80211_HTCAP_MPDUDENSITY_2:
6155 			add_sta_cmd.station_flags
6156 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6157 			break;
6158 		case IEEE80211_HTCAP_MPDUDENSITY_4:
6159 			add_sta_cmd.station_flags
6160 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6161 			break;
6162 		case IEEE80211_HTCAP_MPDUDENSITY_8:
6163 			add_sta_cmd.station_flags
6164 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6165 			break;
6166 		case IEEE80211_HTCAP_MPDUDENSITY_16:
6167 			add_sta_cmd.station_flags
6168 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6169 			break;
6170 		default:
6171 			break;
6172 		}
6173 	}
6174 
6175 	status = IWX_ADD_STA_SUCCESS;
6176 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6177 	    &add_sta_cmd, &status);
6178 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6179 		err = EIO;
6180 
6181 	return err;
6182 }
6183 
6184 static int
6185 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6186 {
6187 	struct ieee80211com *ic = &sc->sc_ic;
6188 	struct iwx_rm_sta_cmd rm_sta_cmd;
6189 	int err;
6190 
6191 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6192 		panic("sta already removed");
6193 
6194 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6195 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6196 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6197 	else
6198 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6199 
6200 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6201 	    &rm_sta_cmd);
6202 
6203 	return err;
6204 }
6205 
6206 static int
6207 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6208 {
6209 	int err, i, cmd_ver;
6210 
6211 	err = iwx_flush_sta(sc, in);
6212 	if (err) {
6213 		printf("%s: could not flush Tx path (error %d)\n",
6214 		    DEVNAME(sc), err);
6215 		return err;
6216 	}
6217 
6218 	/*
6219 	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6220 	 * before a station gets removed.
6221 	 */
6222 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6223 	    IWX_SCD_QUEUE_CONFIG_CMD);
6224 	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6225 		err = iwx_disable_mgmt_queue(sc);
6226 		if (err)
6227 			return err;
6228 		for (i = IWX_FIRST_AGG_TX_QUEUE;
6229 		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
6230 			struct iwx_tx_ring *ring = &sc->txq[i];
6231 			if ((sc->qenablemsk & (1 << i)) == 0)
6232 				continue;
6233 			err = iwx_disable_txq(sc, IWX_STATION_ID,
6234 			    ring->qid, ring->tid);
6235 			if (err) {
6236 				printf("%s: could not disable Tx queue %d "
6237 				    "(error %d)\n", DEVNAME(sc), ring->qid,
6238 				    err);
6239 				return err;
6240 			}
6241 		}
6242 	}
6243 
6244 	err = iwx_rm_sta_cmd(sc, in);
6245 	if (err) {
6246 		printf("%s: could not remove STA (error %d)\n",
6247 		    DEVNAME(sc), err);
6248 		return err;
6249 	}
6250 
6251 	in->in_flags = 0;
6252 
6253 	sc->sc_rx_ba_sessions = 0;
6254 	sc->ba_rx.start_tidmask = 0;
6255 	sc->ba_rx.stop_tidmask = 0;
6256 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6257 	sc->ba_tx.start_tidmask = 0;
6258 	sc->ba_tx.stop_tidmask = 0;
6259 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6260 		sc->qenablemsk &= ~(1 << i);
6261 
6262 #if 0
6263 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6264 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6265 		if (ba->ba_state != IEEE80211_BA_AGREED)
6266 			continue;
6267 		ieee80211_delba_request(ic, ni, 0, 1, i);
6268 	}
6269 #endif
6270 	/* Clear ampdu rx state (GOS-1525) */
6271 	for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6272 		struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6273 		ba->ba_flags = 0;
6274 	}
6275 
6276 	return 0;
6277 }
6278 
6279 static uint8_t
6280 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6281     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6282     int n_ssids, uint32_t channel_cfg_flags)
6283 {
6284 	struct ieee80211com *ic = &sc->sc_ic;
6285 	struct ieee80211_scan_state *ss = ic->ic_scan;
6286 	struct ieee80211_channel *c;
6287 	uint8_t nchan;
6288 	int j;
6289 
6290 	for (nchan = j = 0;
6291 	    j < ss->ss_last &&
6292 	    nchan < sc->sc_capa_n_scan_channels;
6293 	    j++) {
6294 		uint8_t channel_num;
6295 
6296 		c = ss->ss_chans[j];
6297 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6298 		if (isset(sc->sc_ucode_api,
6299 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6300 			chan->v2.channel_num = channel_num;
6301 			if (IEEE80211_IS_CHAN_2GHZ(c))
6302 				chan->v2.band = IWX_PHY_BAND_24;
6303 			else
6304 				chan->v2.band = IWX_PHY_BAND_5;
6305 			chan->v2.iter_count = 1;
6306 			chan->v2.iter_interval = 0;
6307 		} else {
6308 			chan->v1.channel_num = channel_num;
6309 			chan->v1.iter_count = 1;
6310 			chan->v1.iter_interval = htole16(0);
6311 		}
6312 		chan->flags |= htole32(channel_cfg_flags);
6313 		chan++;
6314 		nchan++;
6315 	}
6316 
6317 	return nchan;
6318 }
6319 
6320 static int
6321 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6322 {
6323 	struct ieee80211com *ic = &sc->sc_ic;
6324 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6325 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6326 	struct ieee80211_rateset *rs;
6327 	size_t remain = sizeof(preq->buf);
6328 	uint8_t *frm, *pos;
6329 
6330 	memset(preq, 0, sizeof(*preq));
6331 
6332 	if (remain < sizeof(*wh) + 2)
6333 		return ENOBUFS;
6334 
6335 	/*
6336 	 * Build a probe request frame.  Most of the following code is a
6337 	 * copy & paste of what is done in net80211.
6338 	 */
6339 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6340 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6341 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6342 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6343 	IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6344 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6345 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6346 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6347 
6348 	frm = (uint8_t *)(wh + 1);
6349 	*frm++ = IEEE80211_ELEMID_SSID;
6350 	*frm++ = 0;
6351 	/* hardware inserts SSID */
6352 
6353 	/* Tell the firmware where the MAC header is. */
6354 	preq->mac_header.offset = 0;
6355 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6356 	remain -= frm - (uint8_t *)wh;
6357 
6358 	/* Fill in 2GHz IEs and tell firmware where they are. */
6359 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6360 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6361 		if (remain < 4 + rs->rs_nrates)
6362 			return ENOBUFS;
6363 	} else if (remain < 2 + rs->rs_nrates)
6364 		return ENOBUFS;
6365 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6366 	pos = frm;
6367 	frm = ieee80211_add_rates(frm, rs);
6368 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6369 		frm = ieee80211_add_xrates(frm, rs);
6370 	remain -= frm - pos;
6371 
6372 	if (isset(sc->sc_enabled_capa,
6373 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6374 		if (remain < 3)
6375 			return ENOBUFS;
6376 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6377 		*frm++ = 1;
6378 		*frm++ = 0;
6379 		remain -= 3;
6380 	}
6381 	preq->band_data[0].len = htole16(frm - pos);
6382 
6383 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6384 		/* Fill in 5GHz IEs. */
6385 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6386 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6387 			if (remain < 4 + rs->rs_nrates)
6388 				return ENOBUFS;
6389 		} else if (remain < 2 + rs->rs_nrates)
6390 			return ENOBUFS;
6391 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6392 		pos = frm;
6393 		frm = ieee80211_add_rates(frm, rs);
6394 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6395 			frm = ieee80211_add_xrates(frm, rs);
6396 		preq->band_data[1].len = htole16(frm - pos);
6397 		remain -= frm - pos;
6398 		if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6399 			if (remain < 14)
6400 				return ENOBUFS;
6401 			frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6402 			remain -= frm - pos;
6403 			preq->band_data[1].len = htole16(frm - pos);
6404 		}
6405 	}
6406 
6407 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6408 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6409 	pos = frm;
6410 	if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6411 		if (remain < 28)
6412 			return ENOBUFS;
6413 		frm = ieee80211_add_htcap(frm, vap->iv_bss);
6414 		/* XXX add WME info? */
6415 		remain -= frm - pos;
6416 	}
6417 
6418 	preq->common_data.len = htole16(frm - pos);
6419 
6420 	return 0;
6421 }
6422 
6423 static int
6424 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6425 {
6426 	struct iwx_scan_config scan_cfg;
6427 	struct iwx_host_cmd hcmd = {
6428 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6429 		.len[0] = sizeof(scan_cfg),
6430 		.data[0] = &scan_cfg,
6431 		.flags = 0,
6432 	};
6433 	int cmdver;
6434 
6435 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6436 		printf("%s: firmware does not support reduced scan config\n",
6437 		    DEVNAME(sc));
6438 		return ENOTSUP;
6439 	}
6440 
6441 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6442 
6443 	/*
6444 	 * SCAN_CFG version >= 5 implies that the broadcast
6445 	 * STA ID field is deprecated.
6446 	 */
6447 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6448 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6449 		scan_cfg.bcast_sta_id = 0xff;
6450 
6451 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6452 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6453 
6454 	return iwx_send_cmd(sc, &hcmd);
6455 }
6456 
6457 static uint16_t
6458 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6459 {
6460 	struct ieee80211com *ic = &sc->sc_ic;
6461 	struct ieee80211_scan_state *ss = ic->ic_scan;
6462 	uint16_t flags = 0;
6463 
6464 	if (ss->ss_nssid == 0) {
6465 		DPRINTF(("%s: Passive scan started\n", __func__));
6466 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6467 	}
6468 
6469 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6470 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6471 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6472 
6473 	return flags;
6474 }
6475 
6476 #define IWX_SCAN_DWELL_ACTIVE		10
6477 #define IWX_SCAN_DWELL_PASSIVE		110
6478 
6479 /* adaptive dwell max budget time [TU] for full scan */
6480 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6481 /* adaptive dwell max budget time [TU] for directed scan */
6482 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6483 /* adaptive dwell default high band APs number */
6484 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6485 /* adaptive dwell default low band APs number */
6486 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6487 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6488 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6489 /* adaptive dwell number of APs override for p2p friendly GO channels */
6490 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6491 /* adaptive dwell number of APs override for social channels */
6492 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6493 
6494 static void
6495 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6496     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6497 {
6498 	uint32_t suspend_time, max_out_time;
6499 	uint8_t active_dwell, passive_dwell;
6500 
6501 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6502 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6503 
6504 	general_params->adwell_default_social_chn =
6505 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6506 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6507 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6508 
6509 	if (bgscan)
6510 		general_params->adwell_max_budget =
6511 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6512 	else
6513 		general_params->adwell_max_budget =
6514 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6515 
6516 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6517 	if (bgscan) {
6518 		max_out_time = htole32(120);
6519 		suspend_time = htole32(120);
6520 	} else {
6521 		max_out_time = htole32(0);
6522 		suspend_time = htole32(0);
6523 	}
6524 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6525 		htole32(max_out_time);
6526 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6527 		htole32(suspend_time);
6528 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6529 		htole32(max_out_time);
6530 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6531 		htole32(suspend_time);
6532 
6533 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6534 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6535 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6536 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6537 }
6538 
6539 static void
6540 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6541     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6542 {
6543 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6544 
6545 	gp->flags = htole16(gen_flags);
6546 
6547 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6548 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6549 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6550 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6551 
6552 	gp->scan_start_mac_id = 0;
6553 }
6554 
6555 static void
6556 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6557     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6558     int n_ssid)
6559 {
6560 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6561 
6562 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6563 	    nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6564 
6565 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6566 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6567 }
6568 
6569 static int
6570 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6571 {
6572 	struct ieee80211com *ic = &sc->sc_ic;
6573 	struct ieee80211_scan_state *ss = ic->ic_scan;
6574 	struct iwx_host_cmd hcmd = {
6575 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6576 		.len = { 0, },
6577 		.data = { NULL, },
6578 		.flags = 0,
6579 	};
6580 	struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6581 	struct iwx_scan_req_params_v14 *scan_p;
6582 	int err, async = bgscan, n_ssid = 0;
6583 	uint16_t gen_flags;
6584 	uint32_t bitmap_ssid = 0;
6585 
6586 	IWX_ASSERT_LOCKED(sc);
6587 
6588 	bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6589 
6590 	scan_p = &cmd->scan_params;
6591 
6592 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6593 	cmd->uid = htole32(0);
6594 
6595 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6596 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6597 	    gen_flags, bgscan);
6598 
6599 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6600 	scan_p->periodic_params.schedule[0].iter_count = 1;
6601 
6602 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6603 	if (err) {
6604 		printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6605 		    err);
6606 		return err;
6607 	}
6608 
6609 	for (int i=0; i < ss->ss_nssid; i++) {
6610 		scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6611 		scan_p->probe_params.direct_scan[i].len =
6612 		    MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6613 		DPRINTF(("%s: Active scan started for ssid ", __func__));
6614 		memcpy(scan_p->probe_params.direct_scan[i].ssid,
6615 		    ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6616 		n_ssid++;
6617 		bitmap_ssid |= (1 << i);
6618 	}
6619 	DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6620 
6621 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6622 	    n_ssid);
6623 
6624 	hcmd.len[0] = sizeof(*cmd);
6625 	hcmd.data[0] = (void *)cmd;
6626 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6627 
6628 	err = iwx_send_cmd(sc, &hcmd);
6629 	return err;
6630 }
6631 
6632 static void
6633 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6634 {
6635 	char alpha2[3];
6636 
6637 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6638 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6639 
6640 	IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6641 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6642 
6643 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6644 }
6645 
6646 uint8_t
6647 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6648 {
6649 	int i;
6650 	uint8_t rval;
6651 
6652 	for (i = 0; i < rs->rs_nrates; i++) {
6653 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6654 		if (rval == iwx_rates[ridx].rate)
6655 			return rs->rs_rates[i];
6656 	}
6657 
6658 	return 0;
6659 }
6660 
6661 static int
6662 iwx_rval2ridx(int rval)
6663 {
6664 	int ridx;
6665 
6666 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6667 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6668 			continue;
6669 		if (rval == iwx_rates[ridx].rate)
6670 			break;
6671 	}
6672 
6673        return ridx;
6674 }
6675 
6676 static void
6677 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6678     int *ofdm_rates)
6679 {
6680 	struct ieee80211_node *ni = &in->in_ni;
6681 	struct ieee80211_rateset *rs = &ni->ni_rates;
6682 	int lowest_present_ofdm = -1;
6683 	int lowest_present_cck = -1;
6684 	uint8_t cck = 0;
6685 	uint8_t ofdm = 0;
6686 	int i;
6687 
6688 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6689 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6690 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6691 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6692 				continue;
6693 			cck |= (1 << i);
6694 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6695 				lowest_present_cck = i;
6696 		}
6697 	}
6698 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6699 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6700 			continue;
6701 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6702 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6703 			lowest_present_ofdm = i;
6704 	}
6705 
6706 	/*
6707 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6708 	 * variables. This isn't sufficient though, as there might not
6709 	 * be all the right rates in the bitmap. E.g. if the only basic
6710 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6711 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6712 	 *
6713 	 *    [...] a STA responding to a received frame shall transmit
6714 	 *    its Control Response frame [...] at the highest rate in the
6715 	 *    BSSBasicRateSet parameter that is less than or equal to the
6716 	 *    rate of the immediately previous frame in the frame exchange
6717 	 *    sequence ([...]) and that is of the same modulation class
6718 	 *    ([...]) as the received frame. If no rate contained in the
6719 	 *    BSSBasicRateSet parameter meets these conditions, then the
6720 	 *    control frame sent in response to a received frame shall be
6721 	 *    transmitted at the highest mandatory rate of the PHY that is
6722 	 *    less than or equal to the rate of the received frame, and
6723 	 *    that is of the same modulation class as the received frame.
6724 	 *
6725 	 * As a consequence, we need to add all mandatory rates that are
6726 	 * lower than all of the basic rates to these bitmaps.
6727 	 */
6728 
6729 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6730 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6731 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6732 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6733 	/* 6M already there or needed so always add */
6734 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6735 
6736 	/*
6737 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6738 	 * Note, however:
6739 	 *  - if no CCK rates are basic, it must be ERP since there must
6740 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6741 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6742 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6743 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6744 	 *  - if 2M is basic, 1M is mandatory
6745 	 *  - if 1M is basic, that's the only valid ACK rate.
6746 	 * As a consequence, it's not as complicated as it sounds, just add
6747 	 * any lower rates to the ACK rate bitmap.
6748 	 */
6749 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
6750 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6751 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
6752 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6753 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
6754 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6755 	/* 1M already there or needed so always add */
6756 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6757 
6758 	*cck_rates = cck;
6759 	*ofdm_rates = ofdm;
6760 }
6761 
6762 static void
6763 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6764     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6765 {
6766 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6767 	struct ieee80211com *ic = &sc->sc_ic;
6768 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6769 	struct ieee80211_node *ni = vap->iv_bss;
6770 	int cck_ack_rates, ofdm_ack_rates;
6771 
6772 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6773 	    in->in_color));
6774 	cmd->action = htole32(action);
6775 
6776 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
6777 		return;
6778 
6779 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6780 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6781 	else if (ic->ic_opmode == IEEE80211_M_STA)
6782 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6783 	else
6784 		panic("unsupported operating mode %d", ic->ic_opmode);
6785 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
6786 
6787 	IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6788 	DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6789 	    ether_sprintf(cmd->node_addr)));
6790 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6791 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6792 		return;
6793 	}
6794 
6795 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6796 	DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6797 	    ether_sprintf(cmd->bssid_addr)));
6798 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6799 	cmd->cck_rates = htole32(cck_ack_rates);
6800 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6801 
6802 	cmd->cck_short_preamble
6803 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6804 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6805 	cmd->short_slot
6806 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6807 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
6808 
6809 	struct chanAccParams chp;
6810 	ieee80211_wme_vap_getparams(vap, &chp);
6811 
6812 	for (int i = 0; i < WME_NUM_AC; i++) {
6813 		int txf = iwx_ac_to_tx_fifo[i];
6814 		cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6815 		cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6816 		cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6817 		cmd->ac[txf].fifos_mask = (1 << txf);
6818 		cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6819 
6820 		cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6821 	}
6822 
6823 	if (ni->ni_flags & IEEE80211_NODE_QOS) {
6824 		DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6825 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6826 	}
6827 
6828 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6829 		switch (vap->iv_curhtprotmode) {
6830 		case IEEE80211_HTINFO_OPMODE_PURE:
6831 			break;
6832 		case IEEE80211_HTINFO_OPMODE_PROTOPT:
6833 		case IEEE80211_HTINFO_OPMODE_MIXED:
6834 			cmd->protection_flags |=
6835 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6836 			    IWX_MAC_PROT_FLG_FAT_PROT);
6837 			break;
6838 		case IEEE80211_HTINFO_OPMODE_HT20PR:
6839 			if (in->in_phyctxt &&
6840 			    (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6841 			    in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6842 				cmd->protection_flags |=
6843 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6844 				    IWX_MAC_PROT_FLG_FAT_PROT);
6845 			}
6846 			break;
6847 		default:
6848 			break;
6849 		}
6850 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6851 		DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6852 	}
6853 
6854 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6855 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6856 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6857 #undef IWX_EXP2
6858 }
6859 
6860 static void
6861 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6862     struct iwx_mac_data_sta *sta, int assoc)
6863 {
6864 	struct ieee80211_node *ni = &in->in_ni;
6865 	struct ieee80211com *ic = &sc->sc_ic;
6866 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6867 	uint32_t dtim_off;
6868 	uint64_t tsf;
6869 	int dtim_period;
6870 
6871 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6872 	tsf = le64toh(ni->ni_tstamp.tsf);
6873 	dtim_period = vap->iv_dtim_period;
6874 
6875 	sta->is_assoc = htole32(assoc);
6876 
6877 	if (assoc) {
6878 		sta->dtim_time = htole32(tsf + dtim_off);
6879 		sta->dtim_tsf = htole64(tsf + dtim_off);
6880 		// XXX: unset in iwm
6881 		sta->assoc_beacon_arrive_time = 0;
6882 	}
6883 	sta->bi = htole32(ni->ni_intval);
6884 	sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6885 	sta->data_policy = htole32(0);
6886 	sta->listen_interval = htole32(10);
6887 	sta->assoc_id = htole32(ni->ni_associd);
6888 }
6889 
6890 static int
6891 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6892     int assoc)
6893 {
6894 	struct ieee80211com *ic = &sc->sc_ic;
6895 	struct ieee80211_node *ni = &in->in_ni;
6896 	struct iwx_mac_ctx_cmd cmd;
6897 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6898 
6899 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
6900 		panic("MAC already added");
6901 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6902 		panic("MAC already removed");
6903 
6904 	memset(&cmd, 0, sizeof(cmd));
6905 
6906 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6907 
6908 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6909 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6910 		    sizeof(cmd), &cmd);
6911 	}
6912 
6913 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6914 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6915 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6916 		    IWX_MAC_FILTER_ACCEPT_GRP |
6917 		    IWX_MAC_FILTER_IN_BEACON |
6918 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
6919 		    IWX_MAC_FILTER_IN_CRC32);
6920 	// XXX: dtim period is in vap
6921 	} else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6922 		/*
6923 		 * Allow beacons to pass through as long as we are not
6924 		 * associated or we do not have dtim period information.
6925 		 */
6926 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6927 	}
6928 	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6929 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6930 }
6931 
6932 static int
6933 iwx_clear_statistics(struct iwx_softc *sc)
6934 {
6935 	struct iwx_statistics_cmd scmd = {
6936 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6937 	};
6938 	struct iwx_host_cmd cmd = {
6939 		.id = IWX_STATISTICS_CMD,
6940 		.len[0] = sizeof(scmd),
6941 		.data[0] = &scmd,
6942 		.flags = IWX_CMD_WANT_RESP,
6943 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6944 	};
6945 	int err;
6946 
6947 	err = iwx_send_cmd(sc, &cmd);
6948 	if (err)
6949 		return err;
6950 
6951 	iwx_free_resp(sc, &cmd);
6952 	return 0;
6953 }
6954 
6955 static int
6956 iwx_scan(struct iwx_softc *sc)
6957 {
6958 	int err;
6959 	err = iwx_umac_scan_v14(sc, 0);
6960 
6961 	if (err) {
6962 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6963 		return err;
6964 	}
6965 	return 0;
6966 }
6967 
6968 static int
6969 iwx_bgscan(struct ieee80211com *ic)
6970 {
6971 	struct iwx_softc *sc = ic->ic_softc;
6972 	int err;
6973 
6974 	err = iwx_umac_scan_v14(sc, 1);
6975 	if (err) {
6976 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6977 		return err;
6978 	}
6979 	return 0;
6980 }
6981 
6982 static int
6983 iwx_enable_mgmt_queue(struct iwx_softc *sc)
6984 {
6985 	int err;
6986 
6987 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
6988 
6989 	/*
6990 	 * Non-QoS frames use the "MGMT" TID and queue.
6991 	 * Other TIDs and data queues are reserved for QoS data frames.
6992 	 */
6993 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
6994 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
6995 	if (err) {
6996 		printf("%s: could not enable Tx queue %d (error %d)\n",
6997 		    DEVNAME(sc), sc->first_data_qid, err);
6998 		return err;
6999 	}
7000 
7001 	return 0;
7002 }
7003 
7004 static int
7005 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7006 {
7007 	int err, cmd_ver;
7008 
7009 	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7010 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7011 	    IWX_SCD_QUEUE_CONFIG_CMD);
7012 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7013 		return 0;
7014 
7015 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7016 
7017 	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7018 	    IWX_MGMT_TID);
7019 	if (err) {
7020 		printf("%s: could not disable Tx queue %d (error %d)\n",
7021 		    DEVNAME(sc), sc->first_data_qid, err);
7022 		return err;
7023 	}
7024 
7025 	return 0;
7026 }
7027 
7028 static int
7029 iwx_rs_rval2idx(uint8_t rval)
7030 {
7031 	/* Firmware expects indices which match our 11g rate set. */
7032 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7033 	int i;
7034 
7035 	for (i = 0; i < rs->rs_nrates; i++) {
7036 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7037 			return i;
7038 	}
7039 
7040 	return -1;
7041 }
7042 
7043 static uint16_t
7044 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7045 {
7046 	uint16_t htrates = 0;
7047 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7048 	int i;
7049 
7050 	if (rsidx == IEEE80211_HT_RATESET_SISO) {
7051 		for (i = 0; i < htrs->rs_nrates; i++) {
7052 			if (htrs->rs_rates[i] <= 7)
7053 				htrates |= (1 << htrs->rs_rates[i]);
7054 		}
7055 	} else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7056 		for (i = 0; i < htrs->rs_nrates; i++) {
7057 			if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7058 				htrates |= (1 << (htrs->rs_rates[i] - 8));
7059 		}
7060 	} else
7061 		panic(("iwx_rs_ht_rates"));
7062 
7063 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7064 	    "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7065 
7066 	return htrates;
7067 }
7068 
7069 uint16_t
7070 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7071 {
7072 	uint16_t rx_mcs;
7073 	int max_mcs = -1;
7074 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n)        (0x3 << (2*((n)-1)))
7075 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n)       (2*((n)-1))
7076 	rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7077 	    IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7078 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7079 
7080 	switch (rx_mcs) {
7081 	case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7082 		break;
7083 	case IEEE80211_VHT_MCS_SUPPORT_0_7:
7084 		max_mcs = 7;
7085 		break;
7086 	case IEEE80211_VHT_MCS_SUPPORT_0_8:
7087 		max_mcs = 8;
7088 		break;
7089 	case IEEE80211_VHT_MCS_SUPPORT_0_9:
7090 		/* Disable VHT MCS 9 for 20MHz-only stations. */
7091 		if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7092 			max_mcs = 8;
7093 		else
7094 			max_mcs = 9;
7095 		break;
7096 	default:
7097 		/* Should not happen; Values above cover the possible range. */
7098 		panic("invalid VHT Rx MCS value %u", rx_mcs);
7099 	}
7100 
7101 	return ((1 << (max_mcs + 1)) - 1);
7102 }
7103 
7104 static int
7105 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7106 {
7107 #if 1
7108 	panic("iwx: Trying to init rate set on untested version");
7109 #else
7110 	struct ieee80211_node *ni = &in->in_ni;
7111 	struct ieee80211_rateset *rs = &ni->ni_rates;
7112 	struct iwx_tlc_config_cmd_v3 cfg_cmd;
7113 	uint32_t cmd_id;
7114 	int i;
7115 	size_t cmd_size = sizeof(cfg_cmd);
7116 
7117 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7118 
7119 	for (i = 0; i < rs->rs_nrates; i++) {
7120 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7121 		int idx = iwx_rs_rval2idx(rval);
7122 		if (idx == -1)
7123 			return EINVAL;
7124 		cfg_cmd.non_ht_rates |= (1 << idx);
7125 	}
7126 
7127 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7128 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7129 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7130 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7131 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7132 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7133 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7134 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7135 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7136 		    htole16(iwx_rs_ht_rates(sc, ni,
7137 		    IEEE80211_HT_RATESET_SISO));
7138 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7139 		    htole16(iwx_rs_ht_rates(sc, ni,
7140 		    IEEE80211_HT_RATESET_MIMO2));
7141 	} else
7142 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7143 
7144 	cfg_cmd.sta_id = IWX_STATION_ID;
7145 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7146 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7147 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7148 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7149 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7150 	else
7151 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7152 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7153 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7154 		cfg_cmd.max_mpdu_len = htole16(3895);
7155 	else
7156 		cfg_cmd.max_mpdu_len = htole16(3839);
7157 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7158 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7159 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7160 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7161 		}
7162 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7163 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7164 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7165 		}
7166 	}
7167 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7168 	    ieee80211_node_supports_vht_sgi80(ni))
7169 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7170 
7171 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7172 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7173 #endif
7174 }
7175 
7176 static int
7177 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7178 {
7179 	struct ieee80211_node *ni = &in->in_ni;
7180 	struct ieee80211_rateset *rs = &ni->ni_rates;
7181 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7182 	struct iwx_tlc_config_cmd_v4 cfg_cmd;
7183 	uint32_t cmd_id;
7184 	int i;
7185 	int sgi80 = 0;
7186 	size_t cmd_size = sizeof(cfg_cmd);
7187 
7188 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7189 
7190 	for (i = 0; i < rs->rs_nrates; i++) {
7191 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7192 		int idx = iwx_rs_rval2idx(rval);
7193 		if (idx == -1)
7194 			return EINVAL;
7195 		cfg_cmd.non_ht_rates |= (1 << idx);
7196 	}
7197 	for (i = 0; i < htrs->rs_nrates; i++) {
7198 		DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7199 	}
7200 
7201 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7202 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7203 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7204 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7205 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7206 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7207 
7208 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7209 		    __func__, __LINE__,
7210 		    cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7211 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7212 		    __func__, __LINE__,
7213 		    cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7214 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7215 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7216 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7217 		    htole16(iwx_rs_ht_rates(sc, ni,
7218 		    IEEE80211_HT_RATESET_SISO));
7219 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7220 		    htole16(iwx_rs_ht_rates(sc, ni,
7221 		    IEEE80211_HT_RATESET_MIMO2));
7222 
7223 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7224 		    __func__, __LINE__,
7225 		    cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7226 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7227 		    __func__, __LINE__,
7228 		    cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7229 	} else
7230 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7231 
7232 	cfg_cmd.sta_id = IWX_STATION_ID;
7233 #if 0
7234 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7235 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7236 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7237 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7238 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7239 	else
7240 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7241 #endif
7242 	if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7243 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7244 	} else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7245 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7246 	} else {
7247 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7248 	}
7249 
7250 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7251 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7252 		cfg_cmd.max_mpdu_len = htole16(3895);
7253 	else
7254 		cfg_cmd.max_mpdu_len = htole16(3839);
7255 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7256 		if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7257 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7258 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7259 		}
7260 		if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7261 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7262 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7263 		}
7264 	}
7265 	sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7266 	    IEEE80211_VHTCAP_SHORT_GI_80);
7267 	if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7268 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7269 	}
7270 
7271 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7272 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7273 }
7274 
7275 static int
7276 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7277 {
7278 	int cmd_ver;
7279 
7280 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7281 	    IWX_TLC_MNG_CONFIG_CMD);
7282 	if (cmd_ver == 4)
7283 		return iwx_rs_init_v4(sc, in);
7284 	else
7285 		return iwx_rs_init_v3(sc, in);
7286 }
7287 
7288 static void
7289 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7290 {
7291 	struct ieee80211com *ic = &sc->sc_ic;
7292 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7293 	struct ieee80211_node *ni = (void *)vap->iv_bss;
7294 
7295 	struct ieee80211_rateset *rs = &ni->ni_rates;
7296 	uint32_t rate_n_flags;
7297 	uint8_t plcp, rval;
7298 	int i, cmd_ver, rate_n_flags_ver2 = 0;
7299 
7300 	if (notif->sta_id != IWX_STATION_ID ||
7301 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7302 		return;
7303 
7304 	rate_n_flags = le32toh(notif->rate);
7305 
7306 	if (sc->sc_debug & IWX_DEBUG_TXRATE)
7307 		print_ratenflags(__func__, __LINE__,
7308 		    rate_n_flags, sc->sc_rate_n_flags_version);
7309 
7310 	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7311 	    IWX_TLC_MNG_UPDATE_NOTIF);
7312 	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7313 		rate_n_flags_ver2 = 1;
7314 
7315 	if (rate_n_flags_ver2) {
7316 		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7317 		if (mod_type == IWX_RATE_MCS_HT_MSK) {
7318 
7319 			ieee80211_node_set_txrate_dot11rate(ni,
7320 				IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
7321 				IEEE80211_RATE_MCS);
7322 			IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7323 			    "%s:%d new MCS: %d rate_n_flags: %x\n",
7324 			    __func__, __LINE__,
7325 			    ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
7326 			    rate_n_flags);
7327 			return;
7328 		}
7329 	} else {
7330 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7331 			ieee80211_node_set_txrate_dot11rate(ni,
7332 			    rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7333 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
7334 
7335 			IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7336 			    "%s:%d new MCS idx: %d rate_n_flags: %x\n",
7337 			    __func__, __LINE__,
7338 			    ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
7339 			return;
7340 		}
7341 	}
7342 
7343 	if (rate_n_flags_ver2) {
7344 		const struct ieee80211_rateset *rs;
7345 		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7346 		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7347 			rs = &ieee80211_std_rateset_11a;
7348 		else
7349 			rs = &ieee80211_std_rateset_11b;
7350 		if (ridx < rs->rs_nrates)
7351 			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7352 		else
7353 			rval = 0;
7354 	} else {
7355 		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7356 
7357 		rval = 0;
7358 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7359 			if (iwx_rates[i].plcp == plcp) {
7360 				rval = iwx_rates[i].rate;
7361 				break;
7362 			}
7363 		}
7364 	}
7365 
7366 	if (rval) {
7367 		uint8_t rv;
7368 		for (i = 0; i < rs->rs_nrates; i++) {
7369 			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7370 			if (rv == rval) {
7371 				ieee80211_node_set_txrate_dot11rate(ni, i);
7372 				break;
7373 			}
7374 		}
7375 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7376 		    "%s:%d new rate %d\n", __func__, __LINE__,
7377 		    ieee80211_node_get_txrate_dot11rate(ni));
7378 	}
7379 }
7380 
7381 static int
7382 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7383     uint8_t chains_static, uint8_t chains_dynamic)
7384 {
7385 	struct iwx_rlc_config_cmd cmd;
7386 	uint32_t cmd_id;
7387 	uint8_t active_cnt, idle_cnt;
7388 
7389 	memset(&cmd, 0, sizeof(cmd));
7390 
7391 	idle_cnt = chains_static;
7392 	active_cnt = chains_dynamic;
7393 
7394 	cmd.phy_id = htole32(phyctxt->id);
7395 	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7396 	    IWX_PHY_RX_CHAIN_VALID_POS);
7397 	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7398 	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7399 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7400 
7401 	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7402 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7403 }
7404 
7405 static int
7406 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7407     struct ieee80211_channel *chan, uint8_t chains_static,
7408     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7409     uint8_t vht_chan_width)
7410 {
7411 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7412 	int err;
7413 
7414 	if (chan == IEEE80211_CHAN_ANYC) {
7415 		printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7416 		    DEVNAME(sc));
7417 		    return EIO;
7418 	}
7419 
7420 	if (isset(sc->sc_enabled_capa,
7421 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7422 	    (phyctxt->channel->ic_flags & band_flags) !=
7423 	    (chan->ic_flags & band_flags)) {
7424 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7425 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7426 		    vht_chan_width);
7427 		if (err) {
7428 			printf("%s: could not remove PHY context "
7429 			    "(error %d)\n", DEVNAME(sc), err);
7430 			return err;
7431 		}
7432 		phyctxt->channel = chan;
7433 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7434 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7435 		    vht_chan_width);
7436 		if (err) {
7437 			printf("%s: could not add PHY context "
7438 			    "(error %d)\n", DEVNAME(sc), err);
7439 			return err;
7440 		}
7441 	} else {
7442 		phyctxt->channel = chan;
7443 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7444 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7445 		    vht_chan_width);
7446 		if (err) {
7447 			printf("%s: could not update PHY context (error %d)\n",
7448 			    DEVNAME(sc), err);
7449 			return err;
7450 		}
7451 	}
7452 
7453 	phyctxt->sco = sco;
7454 	phyctxt->vht_chan_width = vht_chan_width;
7455 
7456 	DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7457 	    phyctxt->channel->ic_ieee));
7458 	DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7459 	DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7460 	    phyctxt->vht_chan_width));
7461 
7462 	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7463 	    IWX_RLC_CONFIG_CMD) == 2)
7464 		return iwx_phy_send_rlc(sc, phyctxt,
7465 		    chains_static, chains_dynamic);
7466 
7467 	return 0;
7468 }
7469 
7470 static int
7471 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7472 {
7473 	struct ieee80211com *ic = &sc->sc_ic;
7474 	struct iwx_node *in;
7475 	struct iwx_vap *ivp = IWX_VAP(vap);
7476 	struct ieee80211_node *ni;
7477 	uint32_t duration;
7478 	int generation = sc->sc_generation, err;
7479 
7480 	IWX_ASSERT_LOCKED(sc);
7481 
7482 	ni = ieee80211_ref_node(vap->iv_bss);
7483 	in = IWX_NODE(ni);
7484 
7485 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7486 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7487 		    ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7488 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7489 		if (err)
7490 			return err;
7491 	} else {
7492 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7493 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7494 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7495 		if (err)
7496 			return err;
7497 	}
7498 	ivp->phy_ctxt = &sc->sc_phyctxt[0];
7499 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7500 	DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7501 	    ether_sprintf(in->in_macaddr)));
7502 
7503 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7504 	if (err) {
7505 		printf("%s: could not add MAC context (error %d)\n",
7506 		    DEVNAME(sc), err);
7507 		return err;
7508  	}
7509 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7510 
7511 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7512 	if (err) {
7513 		printf("%s: could not add binding (error %d)\n",
7514 		    DEVNAME(sc), err);
7515 		goto rm_mac_ctxt;
7516 	}
7517 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7518 
7519 	err = iwx_add_sta_cmd(sc, in, 0);
7520 	if (err) {
7521 		printf("%s: could not add sta (error %d)\n",
7522 		    DEVNAME(sc), err);
7523 		goto rm_binding;
7524 	}
7525 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7526 
7527 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7528 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7529 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7530 		    IWX_TX_RING_COUNT);
7531 		if (err)
7532 			goto rm_sta;
7533 		return 0;
7534 	}
7535 
7536 	err = iwx_enable_mgmt_queue(sc);
7537 	if (err)
7538 		goto rm_sta;
7539 
7540 	err = iwx_clear_statistics(sc);
7541 	if (err)
7542 		goto rm_mgmt_queue;
7543 
7544 	/*
7545 	 * Prevent the FW from wandering off channel during association
7546 	 * by "protecting" the session with a time event.
7547 	 */
7548 	if (in->in_ni.ni_intval)
7549 		duration = in->in_ni.ni_intval * 9;
7550 	else
7551 		duration = 900;
7552 	return iwx_schedule_session_protection(sc, in, duration);
7553 
7554 rm_mgmt_queue:
7555 	if (generation == sc->sc_generation)
7556 		iwx_disable_mgmt_queue(sc);
7557 rm_sta:
7558 	if (generation == sc->sc_generation) {
7559 		iwx_rm_sta_cmd(sc, in);
7560 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7561 	}
7562 rm_binding:
7563 	if (generation == sc->sc_generation) {
7564 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7565 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7566 	}
7567 rm_mac_ctxt:
7568 	if (generation == sc->sc_generation) {
7569 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7570 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7571 	}
7572 	return err;
7573 }
7574 
7575 static int
7576 iwx_deauth(struct iwx_softc *sc)
7577 {
7578 	struct ieee80211com *ic = &sc->sc_ic;
7579 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7580 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7581 	int err;
7582 
7583 	IWX_ASSERT_LOCKED(sc);
7584 
7585 	iwx_unprotect_session(sc, in);
7586 
7587 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7588 		err = iwx_rm_sta(sc, in);
7589 		if (err)
7590 			return err;
7591 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7592 	}
7593 
7594 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7595 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7596 		if (err) {
7597 			printf("%s: could not remove binding (error %d)\n",
7598 			    DEVNAME(sc), err);
7599 			return err;
7600 		}
7601 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7602 	}
7603 
7604 	DPRINTF(("%s:  IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7605 	    IWX_FLAG_MAC_ACTIVE));
7606 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7607 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7608 		if (err) {
7609 			printf("%s: could not remove MAC context (error %d)\n",
7610 			    DEVNAME(sc), err);
7611 			return err;
7612 		}
7613 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7614 	}
7615 
7616 	/* Move unused PHY context to a default channel. */
7617 	//TODO uncommented in obsd, but stays on the way of auth->auth
7618 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7619 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7620 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7621 	if (err)
7622 		return err;
7623 
7624 	return 0;
7625 }
7626 
7627 static int
7628 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7629 {
7630 	struct ieee80211com *ic = &sc->sc_ic;
7631 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7632 	struct ieee80211_node *ni = &in->in_ni;
7633 	struct iwx_vap *ivp = IWX_VAP(vap);
7634 	int err;
7635 
7636 	IWX_ASSERT_LOCKED(sc);
7637 
7638 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7639 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7640 		uint8_t sco, vht_chan_width;
7641 			sco = IEEE80211_HTOP0_SCO_SCN;
7642 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7643 		    IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7644 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7645 		else
7646 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7647 		err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7648 		    ivp->phy_ctxt->channel, chains, chains,
7649 		    0, sco, vht_chan_width);
7650 		if (err) {
7651 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7652 			return err;
7653 		}
7654 	}
7655 
7656 	/* Update STA again to apply HT and VHT settings. */
7657 	err = iwx_add_sta_cmd(sc, in, 1);
7658 	if (err) {
7659 		printf("%s: could not update STA (error %d)\n",
7660 		    DEVNAME(sc), err);
7661 		return err;
7662 	}
7663 
7664 	/* We have now been assigned an associd by the AP. */
7665 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7666 	if (err) {
7667 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7668 		return err;
7669 	}
7670 
7671 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7672 	if (err) {
7673 		printf("%s: could not set sf full on (error %d)\n",
7674 		    DEVNAME(sc), err);
7675 		return err;
7676 	}
7677 
7678 	err = iwx_allow_mcast(sc);
7679 	if (err) {
7680 		printf("%s: could not allow mcast (error %d)\n",
7681 		    DEVNAME(sc), err);
7682 		return err;
7683 	}
7684 
7685 	err = iwx_power_update_device(sc);
7686 	if (err) {
7687 		printf("%s: could not send power command (error %d)\n",
7688 		    DEVNAME(sc), err);
7689 		return err;
7690 	}
7691 #ifdef notyet
7692 	/*
7693 	 * Disabled for now. Default beacon filter settings
7694 	 * prevent net80211 from getting ERP and HT protection
7695 	 * updates from beacons.
7696 	 */
7697 	err = iwx_enable_beacon_filter(sc, in);
7698 	if (err) {
7699 		printf("%s: could not enable beacon filter\n",
7700 		    DEVNAME(sc));
7701 		return err;
7702 	}
7703 #endif
7704 	err = iwx_power_mac_update_mode(sc, in);
7705 	if (err) {
7706 		printf("%s: could not update MAC power (error %d)\n",
7707 		    DEVNAME(sc), err);
7708 		return err;
7709 	}
7710 
7711 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7712 		return 0;
7713 
7714 	err = iwx_rs_init(sc, in);
7715 	if (err) {
7716 		printf("%s: could not init rate scaling (error %d)\n",
7717 		    DEVNAME(sc), err);
7718 		return err;
7719 	}
7720 
7721 	return 0;
7722 }
7723 
7724 static int
7725 iwx_run_stop(struct iwx_softc *sc)
7726 {
7727 	struct ieee80211com *ic = &sc->sc_ic;
7728 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7729 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7730 	struct ieee80211_node *ni = &in->in_ni;
7731 	int err, i;
7732 
7733 	IWX_ASSERT_LOCKED(sc);
7734 
7735 	err = iwx_flush_sta(sc, in);
7736 	if (err) {
7737 		printf("%s: could not flush Tx path (error %d)\n",
7738 		    DEVNAME(sc), err);
7739 		return err;
7740 	}
7741 
7742 	/*
7743 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7744 	 * for this when moving out of RUN state since it runs in a
7745 	 * separate thread.
7746 	 * Note that in->in_ni (struct ieee80211_node) already represents
7747 	 * our new access point in case we are roaming between APs.
7748 	 * This means we cannot rely on struct ieee802111_node to tell
7749 	 * us which BA sessions exist.
7750 	 */
7751 	// TODO agg
7752 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7753 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7754 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7755 			continue;
7756 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7757 	}
7758 
7759 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7760 	if (err)
7761 		return err;
7762 
7763 	err = iwx_disable_beacon_filter(sc);
7764 	if (err) {
7765 		printf("%s: could not disable beacon filter (error %d)\n",
7766 		    DEVNAME(sc), err);
7767 		return err;
7768 	}
7769 
7770 	/* Mark station as disassociated. */
7771 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7772 	if (err) {
7773 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7774 		return err;
7775 	}
7776 
7777 	return 0;
7778 }
7779 
7780 static struct ieee80211_node *
7781 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7782 {
7783 	return malloc(sizeof (struct iwx_node), M_80211_NODE,
7784 	    M_NOWAIT | M_ZERO);
7785 }
7786 
7787 #if 0
7788 int
7789 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7790     struct ieee80211_key *k)
7791 {
7792 	struct iwx_softc *sc = ic->ic_softc;
7793 	struct iwx_node *in = (void *)ni;
7794 	struct iwx_setkey_task_arg *a;
7795 	int err;
7796 
7797 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7798 		/* Fallback to software crypto for other ciphers. */
7799 		err = ieee80211_set_key(ic, ni, k);
7800 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7801 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7802 		return err;
7803 	}
7804 
7805 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7806 		return ENOSPC;
7807 
7808 	a = &sc->setkey_arg[sc->setkey_cur];
7809 	a->sta_id = IWX_STATION_ID;
7810 	a->ni = ni;
7811 	a->k = k;
7812 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7813 	sc->setkey_nkeys++;
7814 	iwx_add_task(sc, systq, &sc->setkey_task);
7815 	return EBUSY;
7816 }
7817 
7818 int
7819 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7820     struct ieee80211_key *k)
7821 {
7822 	struct ieee80211com *ic = &sc->sc_ic;
7823 	struct iwx_node *in = (void *)ni;
7824 	struct iwx_add_sta_key_cmd cmd;
7825 	uint32_t status;
7826 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7827 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
7828 	int err;
7829 
7830 	/*
7831 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7832 	 * Currently we only implement station mode where 'ni' is always
7833 	 * ic->ic_bss so there is no need to validate arguments beyond this:
7834 	 */
7835 	KASSERT(ni == ic->ic_bss);
7836 
7837 	memset(&cmd, 0, sizeof(cmd));
7838 
7839 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7840 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
7841 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7842 	    IWX_STA_KEY_FLG_KEYID_MSK));
7843 	if (k->k_flags & IEEE80211_KEY_GROUP) {
7844 		cmd.common.key_offset = 1;
7845 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7846 	} else
7847 		cmd.common.key_offset = 0;
7848 
7849 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7850 	cmd.common.sta_id = sta_id;
7851 
7852 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7853 
7854 	status = IWX_ADD_STA_SUCCESS;
7855 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7856 	    &status);
7857 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7858 		return ECANCELED;
7859 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7860 		err = EIO;
7861 	if (err) {
7862 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7863 		    IEEE80211_REASON_AUTH_LEAVE);
7864 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7865 		return err;
7866 	}
7867 
7868 	if (k->k_flags & IEEE80211_KEY_GROUP)
7869 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7870 	else
7871 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7872 
7873 	if ((in->in_flags & want_keymask) == want_keymask) {
7874 		DPRINTF(("marking port %s valid\n",
7875 		    ether_sprintf(ni->ni_macaddr)));
7876 		ni->ni_port_valid = 1;
7877 		ieee80211_set_link_state(ic, LINK_STATE_UP);
7878 	}
7879 
7880 	return 0;
7881 }
7882 
7883 void
7884 iwx_setkey_task(void *arg)
7885 {
7886 	struct iwx_softc *sc = arg;
7887 	struct iwx_setkey_task_arg *a;
7888 	int err = 0, s = splnet();
7889 
7890 	while (sc->setkey_nkeys > 0) {
7891 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7892 			break;
7893 		a = &sc->setkey_arg[sc->setkey_tail];
7894 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7895 		a->sta_id = 0;
7896 		a->ni = NULL;
7897 		a->k = NULL;
7898 		sc->setkey_tail = (sc->setkey_tail + 1) %
7899 		    nitems(sc->setkey_arg);
7900 		sc->setkey_nkeys--;
7901 	}
7902 
7903 	refcnt_rele_wake(&sc->task_refs);
7904 	splx(s);
7905 }
7906 
7907 void
7908 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7909     struct ieee80211_key *k)
7910 {
7911 	struct iwx_softc *sc = ic->ic_softc;
7912 	struct iwx_add_sta_key_cmd cmd;
7913 
7914 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7915 		/* Fallback to software crypto for other ciphers. */
7916                 ieee80211_delete_key(ic, ni, k);
7917 		return;
7918 	}
7919 
7920 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7921 		return;
7922 
7923 	memset(&cmd, 0, sizeof(cmd));
7924 
7925 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7926 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7927 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7928 	    IWX_STA_KEY_FLG_KEYID_MSK));
7929 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7930 	if (k->k_flags & IEEE80211_KEY_GROUP)
7931 		cmd.common.key_offset = 1;
7932 	else
7933 		cmd.common.key_offset = 0;
7934 	cmd.common.sta_id = IWX_STATION_ID;
7935 
7936 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7937 }
7938 #endif
7939 
7940 static int
7941 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
7942 {
7943 	struct ieee80211com *ic = vap->iv_ic;
7944 	struct iwx_softc *sc = ic->ic_softc;
7945 	enum ieee80211_state ostate = vap->iv_state;
7946 	int err = 0;
7947 
7948 	IWX_LOCK(sc);
7949 
7950 	if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
7951 		switch (ostate) {
7952 		case IEEE80211_S_RUN:
7953 			err = iwx_run_stop(sc);
7954 			if (err)
7955 				goto out;
7956 			/* FALLTHROUGH */
7957 		case IEEE80211_S_ASSOC:
7958 		case IEEE80211_S_AUTH:
7959 			if (nstate <= IEEE80211_S_AUTH) {
7960 				err = iwx_deauth(sc);
7961 				if (err)
7962 					goto out;
7963 			}
7964 			/* FALLTHROUGH */
7965 		case IEEE80211_S_SCAN:
7966 		case IEEE80211_S_INIT:
7967 		default:
7968 			break;
7969 		}
7970 //
7971 //		/* Die now if iwx_stop() was called while we were sleeping. */
7972 //		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7973 //			refcnt_rele_wake(&sc->task_refs);
7974 //			splx(s);
7975 //			return;
7976 //		}
7977 	}
7978 
7979 	switch (nstate) {
7980 	case IEEE80211_S_INIT:
7981 		break;
7982 
7983 	case IEEE80211_S_SCAN:
7984 			break;
7985 
7986 	case IEEE80211_S_AUTH:
7987 		err = iwx_auth(vap, sc);
7988 		break;
7989 
7990 	case IEEE80211_S_ASSOC:
7991 		break;
7992 
7993 	case IEEE80211_S_RUN:
7994 		err = iwx_run(vap, sc);
7995 		break;
7996 	default:
7997 		break;
7998 	}
7999 
8000 out:
8001 	IWX_UNLOCK(sc);
8002 
8003 	return (err);
8004 }
8005 
8006 static int
8007 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8008 {
8009 	struct iwx_vap *ivp = IWX_VAP(vap);
8010 	struct ieee80211com *ic = vap->iv_ic;
8011 	enum ieee80211_state ostate = vap->iv_state;
8012 	int err;
8013 
8014 	/*
8015 	 * Prevent attempts to transition towards the same state, unless
8016 	 * we are scanning in which case a SCAN -> SCAN transition
8017 	 * triggers another scan iteration. And AUTH -> AUTH is needed
8018 	 * to support band-steering.
8019 	 */
8020 	if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8021 	    nstate != IEEE80211_S_AUTH)
8022 		return 0;
8023 	IEEE80211_UNLOCK(ic);
8024 	err = iwx_newstate_sub(vap, nstate);
8025 	IEEE80211_LOCK(ic);
8026 	if (err == 0)
8027 		err = ivp->iv_newstate(vap, nstate, arg);
8028 
8029 	return (err);
8030 }
8031 
8032 static void
8033 iwx_endscan(struct iwx_softc *sc)
8034 {
8035         struct ieee80211com *ic = &sc->sc_ic;
8036         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8037 
8038         if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8039                 return;
8040 
8041         sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8042 
8043         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8044         wakeup(&vap->iv_state); /* wake up iwx_newstate */
8045 }
8046 
8047 /*
8048  * Aging and idle timeouts for the different possible scenarios
8049  * in default configuration
8050  */
8051 static const uint32_t
8052 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8053 	{
8054 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8055 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8056 	},
8057 	{
8058 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8059 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8060 	},
8061 	{
8062 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8063 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8064 	},
8065 	{
8066 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8067 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8068 	},
8069 	{
8070 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8071 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8072 	},
8073 };
8074 
8075 /*
8076  * Aging and idle timeouts for the different possible scenarios
8077  * in single BSS MAC configuration.
8078  */
8079 static const uint32_t
8080 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8081 	{
8082 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8083 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8084 	},
8085 	{
8086 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8087 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8088 	},
8089 	{
8090 		htole32(IWX_SF_MCAST_AGING_TIMER),
8091 		htole32(IWX_SF_MCAST_IDLE_TIMER)
8092 	},
8093 	{
8094 		htole32(IWX_SF_BA_AGING_TIMER),
8095 		htole32(IWX_SF_BA_IDLE_TIMER)
8096 	},
8097 	{
8098 		htole32(IWX_SF_TX_RE_AGING_TIMER),
8099 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8100 	},
8101 };
8102 
8103 static void
8104 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8105     struct ieee80211_node *ni)
8106 {
8107 	int i, j, watermark;
8108 
8109 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8110 
8111 	/*
8112 	 * If we are in association flow - check antenna configuration
8113 	 * capabilities of the AP station, and choose the watermark accordingly.
8114 	 */
8115 	if (ni) {
8116 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8117 			struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8118 			int hasmimo = 0;
8119 			for (i = 0; i < htrs->rs_nrates; i++) {
8120 				if (htrs->rs_rates[i] > 7) {
8121 					hasmimo = 1;
8122 					break;
8123 				}
8124 			}
8125 			if (hasmimo)
8126 				watermark = IWX_SF_W_MARK_MIMO2;
8127 			else
8128 				watermark = IWX_SF_W_MARK_SISO;
8129 		} else {
8130 			watermark = IWX_SF_W_MARK_LEGACY;
8131 		}
8132 	/* default watermark value for unassociated mode. */
8133 	} else {
8134 		watermark = IWX_SF_W_MARK_MIMO2;
8135 	}
8136 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8137 
8138 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8139 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8140 			sf_cmd->long_delay_timeouts[i][j] =
8141 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8142 		}
8143 	}
8144 
8145 	if (ni) {
8146 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8147 		       sizeof(iwx_sf_full_timeout));
8148 	} else {
8149 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8150 		       sizeof(iwx_sf_full_timeout_def));
8151 	}
8152 
8153 }
8154 
8155 static int
8156 iwx_sf_config(struct iwx_softc *sc, int new_state)
8157 {
8158 	struct ieee80211com *ic = &sc->sc_ic;
8159 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8160 	struct ieee80211_node *ni = vap->iv_bss;
8161 	struct iwx_sf_cfg_cmd sf_cmd = {
8162 		.state = htole32(new_state),
8163 	};
8164 	int err = 0;
8165 
8166 	switch (new_state) {
8167 	case IWX_SF_UNINIT:
8168 	case IWX_SF_INIT_OFF:
8169 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8170 		break;
8171 	case IWX_SF_FULL_ON:
8172 		iwx_fill_sf_command(sc, &sf_cmd, ni);
8173 		break;
8174 	default:
8175 		return EINVAL;
8176 	}
8177 
8178 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8179 				   sizeof(sf_cmd), &sf_cmd);
8180 	return err;
8181 }
8182 
8183 static int
8184 iwx_send_bt_init_conf(struct iwx_softc *sc)
8185 {
8186 	struct iwx_bt_coex_cmd bt_cmd;
8187 
8188 	bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8189 
8190 	bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8191 	bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8192 	bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8193 
8194 
8195 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8196 	    &bt_cmd);
8197 }
8198 
8199 static int
8200 iwx_send_soc_conf(struct iwx_softc *sc)
8201 {
8202 	struct iwx_soc_configuration_cmd cmd;
8203 	int err;
8204 	uint32_t cmd_id, flags = 0;
8205 
8206 	memset(&cmd, 0, sizeof(cmd));
8207 
8208 	/*
8209 	 * In VER_1 of this command, the discrete value is considered
8210 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8211 	 * values in VER_1, this is backwards-compatible with VER_2,
8212 	 * as long as we don't set any other flag bits.
8213 	 */
8214 	if (!sc->sc_integrated) { /* VER_1 */
8215 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8216 	} else { /* VER_2 */
8217 		uint8_t scan_cmd_ver;
8218 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8219 			flags |= (sc->sc_ltr_delay &
8220 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8221 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8222 		    IWX_SCAN_REQ_UMAC);
8223 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8224 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8225 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8226 	}
8227 	cmd.flags = htole32(flags);
8228 
8229 	cmd.latency = htole32(sc->sc_xtal_latency);
8230 
8231 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8232 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8233 	if (err)
8234 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8235 	return err;
8236 }
8237 
8238 static int
8239 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8240 {
8241 	struct iwx_mcc_update_cmd mcc_cmd;
8242 	struct iwx_host_cmd hcmd = {
8243 		.id = IWX_MCC_UPDATE_CMD,
8244 		.flags = IWX_CMD_WANT_RESP,
8245 		.data = { &mcc_cmd },
8246 	};
8247 	struct iwx_rx_packet *pkt;
8248 	struct iwx_mcc_update_resp *resp;
8249 	size_t resp_len;
8250 	int err;
8251 
8252 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8253 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8254 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8255 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8256 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8257 	else
8258 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8259 
8260 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8261 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8262 
8263 	err = iwx_send_cmd(sc, &hcmd);
8264 	if (err)
8265 		return err;
8266 
8267 	pkt = hcmd.resp_pkt;
8268 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8269 		err = EIO;
8270 		goto out;
8271 	}
8272 
8273 	resp_len = iwx_rx_packet_payload_len(pkt);
8274 	if (resp_len < sizeof(*resp)) {
8275 		err = EIO;
8276 		goto out;
8277 	}
8278 
8279 	resp = (void *)pkt->data;
8280 	if (resp_len != sizeof(*resp) +
8281 	    resp->n_channels * sizeof(resp->channels[0])) {
8282 		err = EIO;
8283 		goto out;
8284 	}
8285 
8286 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8287 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8288 
8289 out:
8290 	iwx_free_resp(sc, &hcmd);
8291 
8292 	return err;
8293 }
8294 
8295 static int
8296 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8297 {
8298 	struct iwx_temp_report_ths_cmd cmd;
8299 	int err;
8300 
8301 	/*
8302 	 * In order to give responsibility for critical-temperature-kill
8303 	 * and TX backoff to FW we need to send an empty temperature
8304 	 * reporting command at init time.
8305 	 */
8306 	memset(&cmd, 0, sizeof(cmd));
8307 
8308 	err = iwx_send_cmd_pdu(sc,
8309 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8310 	    0, sizeof(cmd), &cmd);
8311 	if (err)
8312 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8313 		    DEVNAME(sc), err);
8314 
8315 	return err;
8316 }
8317 
8318 static int
8319 iwx_init_hw(struct iwx_softc *sc)
8320 {
8321 	struct ieee80211com *ic = &sc->sc_ic;
8322 	int err = 0, i;
8323 
8324 	err = iwx_run_init_mvm_ucode(sc, 0);
8325 	if (err)
8326 		return err;
8327 
8328 	if (!iwx_nic_lock(sc))
8329 		return EBUSY;
8330 
8331 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8332 	if (err) {
8333 		printf("%s: could not init tx ant config (error %d)\n",
8334 		    DEVNAME(sc), err);
8335 		goto err;
8336 	}
8337 
8338 	if (sc->sc_tx_with_siso_diversity) {
8339 		err = iwx_send_phy_cfg_cmd(sc);
8340 		if (err) {
8341 			printf("%s: could not send phy config (error %d)\n",
8342 			    DEVNAME(sc), err);
8343 			goto err;
8344 		}
8345 	}
8346 
8347 	err = iwx_send_bt_init_conf(sc);
8348 	if (err) {
8349 		printf("%s: could not init bt coex (error %d)\n",
8350 		    DEVNAME(sc), err);
8351 		return err;
8352 	}
8353 
8354 	err = iwx_send_soc_conf(sc);
8355 	if (err) {
8356 		printf("%s: iwx_send_soc_conf failed\n", __func__);
8357 		return err;
8358 	}
8359 
8360 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8361 		printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8362 		err = iwx_send_dqa_cmd(sc);
8363 		if (err) {
8364 			printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8365 			    "failed (error %d)\n", __func__, err);
8366 			return err;
8367 		}
8368 	}
8369 	// TODO phyctxt
8370 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8371 		/*
8372 		 * The channel used here isn't relevant as it's
8373 		 * going to be overwritten in the other flows.
8374 		 * For now use the first channel we have.
8375 		 */
8376 		sc->sc_phyctxt[i].id = i;
8377 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8378 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8379 		    IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8380 		if (err) {
8381 			printf("%s: could not add phy context %d (error %d)\n",
8382 			    DEVNAME(sc), i, err);
8383 			goto err;
8384 		}
8385 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8386 		    IWX_RLC_CONFIG_CMD) == 2) {
8387 			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8388 			if (err) {
8389 				printf("%s: could not configure RLC for PHY "
8390 				    "%d (error %d)\n", DEVNAME(sc), i, err);
8391 				goto err;
8392 			}
8393 		}
8394 	}
8395 
8396 	err = iwx_config_ltr(sc);
8397 	if (err) {
8398 		printf("%s: PCIe LTR configuration failed (error %d)\n",
8399 		    DEVNAME(sc), err);
8400 	}
8401 
8402 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8403 		err = iwx_send_temp_report_ths_cmd(sc);
8404 		if (err) {
8405 			printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8406 			    __func__);
8407 			goto err;
8408 		}
8409 	}
8410 
8411 	err = iwx_power_update_device(sc);
8412 	if (err) {
8413 		printf("%s: could not send power command (error %d)\n",
8414 		    DEVNAME(sc), err);
8415 		goto err;
8416 	}
8417 
8418 	if (sc->sc_nvm.lar_enabled) {
8419 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
8420 		if (err) {
8421 			printf("%s: could not init LAR (error %d)\n",
8422 			    DEVNAME(sc), err);
8423 			goto err;
8424 		}
8425 	}
8426 
8427 	err = iwx_config_umac_scan_reduced(sc);
8428 	if (err) {
8429 		printf("%s: could not configure scan (error %d)\n",
8430 		    DEVNAME(sc), err);
8431 		goto err;
8432 	}
8433 
8434 	err = iwx_disable_beacon_filter(sc);
8435 	if (err) {
8436 		printf("%s: could not disable beacon filter (error %d)\n",
8437 		    DEVNAME(sc), err);
8438 		goto err;
8439 	}
8440 
8441 err:
8442 	iwx_nic_unlock(sc);
8443 	return err;
8444 }
8445 
8446 /* Allow multicast from our BSSID. */
8447 static int
8448 iwx_allow_mcast(struct iwx_softc *sc)
8449 {
8450 	struct ieee80211com *ic = &sc->sc_ic;
8451 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8452 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
8453 	struct iwx_mcast_filter_cmd *cmd;
8454 	size_t size;
8455 	int err;
8456 
8457 	size = roundup(sizeof(*cmd), 4);
8458 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8459 	if (cmd == NULL)
8460 		return ENOMEM;
8461 	cmd->filter_own = 1;
8462 	cmd->port_id = 0;
8463 	cmd->count = 0;
8464 	cmd->pass_all = 1;
8465 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8466 
8467 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8468 	    0, size, cmd);
8469 	free(cmd, M_DEVBUF);
8470 	return err;
8471 }
8472 
8473 static int
8474 iwx_init(struct iwx_softc *sc)
8475 {
8476 	int err, generation;
8477 	generation = ++sc->sc_generation;
8478 	iwx_preinit(sc);
8479 
8480 	err = iwx_start_hw(sc);
8481 	if (err) {
8482 		printf("%s: iwx_start_hw failed\n", __func__);
8483 		return err;
8484 	}
8485 
8486 	err = iwx_init_hw(sc);
8487 	if (err) {
8488 		if (generation == sc->sc_generation)
8489 			iwx_stop_device(sc);
8490 		printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8491 		return err;
8492 	}
8493 
8494 	sc->sc_flags |= IWX_FLAG_HW_INITED;
8495 	callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8496 
8497 	return 0;
8498 }
8499 
8500 static void
8501 iwx_start(struct iwx_softc *sc)
8502 {
8503         struct ieee80211_node *ni;
8504         struct mbuf *m;
8505 
8506         while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8507                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8508                 if (iwx_tx(sc, m, ni) != 0) {
8509                       if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8510                         continue;
8511                 }
8512         }
8513 }
8514 
8515 static void
8516 iwx_stop(struct iwx_softc *sc)
8517 {
8518 	struct ieee80211com *ic = &sc->sc_ic;
8519 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8520 	struct iwx_vap *ivp = IWX_VAP(vap);
8521 
8522 	iwx_stop_device(sc);
8523 
8524 	/* Reset soft state. */
8525 	sc->sc_generation++;
8526 	ivp->phy_ctxt = NULL;
8527 
8528 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8529 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8530 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8531 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8532 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8533 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8534 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8535 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8536 
8537 	sc->sc_rx_ba_sessions = 0;
8538 	sc->ba_rx.start_tidmask = 0;
8539 	sc->ba_rx.stop_tidmask = 0;
8540 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8541 	sc->ba_tx.start_tidmask = 0;
8542 	sc->ba_tx.stop_tidmask = 0;
8543 }
8544 
8545 static void
8546 iwx_watchdog(void *arg)
8547 {
8548 	struct iwx_softc *sc = arg;
8549 	struct ieee80211com *ic = &sc->sc_ic;
8550 	int i;
8551 
8552 	/*
8553 	 * We maintain a separate timer for each Tx queue because
8554 	 * Tx aggregation queues can get "stuck" while other queues
8555 	 * keep working. The Linux driver uses a similar workaround.
8556 	 */
8557 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8558 		if (sc->sc_tx_timer[i] > 0) {
8559 			if (--sc->sc_tx_timer[i] == 0) {
8560 				printf("%s: device timeout\n", DEVNAME(sc));
8561 
8562 				iwx_nic_error(sc);
8563 				iwx_dump_driver_status(sc);
8564 				ieee80211_restart_all(ic);
8565 				return;
8566 			}
8567 		}
8568 	}
8569 	callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8570 }
8571 
8572 /*
8573  * Note: This structure is read from the device with IO accesses,
8574  * and the reading already does the endian conversion. As it is
8575  * read with uint32_t-sized accesses, any members with a different size
8576  * need to be ordered correctly though!
8577  */
8578 struct iwx_error_event_table {
8579 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8580 	uint32_t error_id;		/* type of error */
8581 	uint32_t trm_hw_status0;	/* TRM HW status */
8582 	uint32_t trm_hw_status1;	/* TRM HW status */
8583 	uint32_t blink2;		/* branch link */
8584 	uint32_t ilink1;		/* interrupt link */
8585 	uint32_t ilink2;		/* interrupt link */
8586 	uint32_t data1;		/* error-specific data */
8587 	uint32_t data2;		/* error-specific data */
8588 	uint32_t data3;		/* error-specific data */
8589 	uint32_t bcon_time;		/* beacon timer */
8590 	uint32_t tsf_low;		/* network timestamp function timer */
8591 	uint32_t tsf_hi;		/* network timestamp function timer */
8592 	uint32_t gp1;		/* GP1 timer register */
8593 	uint32_t gp2;		/* GP2 timer register */
8594 	uint32_t fw_rev_type;	/* firmware revision type */
8595 	uint32_t major;		/* uCode version major */
8596 	uint32_t minor;		/* uCode version minor */
8597 	uint32_t hw_ver;		/* HW Silicon version */
8598 	uint32_t brd_ver;		/* HW board version */
8599 	uint32_t log_pc;		/* log program counter */
8600 	uint32_t frame_ptr;		/* frame pointer */
8601 	uint32_t stack_ptr;		/* stack pointer */
8602 	uint32_t hcmd;		/* last host command header */
8603 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8604 				 * rxtx_flag */
8605 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8606 				 * host_flag */
8607 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8608 				 * enc_flag */
8609 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8610 				 * time_flag */
8611 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8612 				 * wico interrupt */
8613 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8614 	uint32_t wait_event;		/* wait event() caller address */
8615 	uint32_t l2p_control;	/* L2pControlField */
8616 	uint32_t l2p_duration;	/* L2pDurationField */
8617 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8618 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8619 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8620 				 * (LMPM_PMG_SEL) */
8621 	uint32_t u_timestamp;	/* indicate when the date and time of the
8622 				 * compilation */
8623 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8624 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8625 
8626 /*
8627  * UMAC error struct - relevant starting from family 8000 chip.
8628  * Note: This structure is read from the device with IO accesses,
8629  * and the reading already does the endian conversion. As it is
8630  * read with u32-sized accesses, any members with a different size
8631  * need to be ordered correctly though!
8632  */
8633 struct iwx_umac_error_event_table {
8634 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8635 	uint32_t error_id;	/* type of error */
8636 	uint32_t blink1;	/* branch link */
8637 	uint32_t blink2;	/* branch link */
8638 	uint32_t ilink1;	/* interrupt link */
8639 	uint32_t ilink2;	/* interrupt link */
8640 	uint32_t data1;		/* error-specific data */
8641 	uint32_t data2;		/* error-specific data */
8642 	uint32_t data3;		/* error-specific data */
8643 	uint32_t umac_major;
8644 	uint32_t umac_minor;
8645 	uint32_t frame_pointer;	/* core register 27*/
8646 	uint32_t stack_pointer;	/* core register 28 */
8647 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8648 	uint32_t nic_isr_pref;	/* ISR status register */
8649 } __packed;
8650 
8651 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8652 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8653 
8654 static void
8655 iwx_nic_umac_error(struct iwx_softc *sc)
8656 {
8657 	struct iwx_umac_error_event_table table;
8658 	uint32_t base;
8659 
8660 	base = sc->sc_uc.uc_umac_error_event_table;
8661 
8662 	if (base < 0x400000) {
8663 		printf("%s: Invalid error log pointer 0x%08x\n",
8664 		    DEVNAME(sc), base);
8665 		return;
8666 	}
8667 
8668 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8669 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8670 		return;
8671 	}
8672 
8673 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8674 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8675 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8676 			sc->sc_flags, table.valid);
8677 	}
8678 
8679 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8680 		iwx_desc_lookup(table.error_id));
8681 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8682 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8683 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8684 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8685 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8686 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8687 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8688 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8689 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8690 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8691 	    table.frame_pointer);
8692 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8693 	    table.stack_pointer);
8694 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8695 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8696 	    table.nic_isr_pref);
8697 }
8698 
8699 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8700 static struct {
8701 	const char *name;
8702 	uint8_t num;
8703 } advanced_lookup[] = {
8704 	{ "NMI_INTERRUPT_WDG", 0x34 },
8705 	{ "SYSASSERT", 0x35 },
8706 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8707 	{ "BAD_COMMAND", 0x38 },
8708 	{ "BAD_COMMAND", 0x39 },
8709 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8710 	{ "FATAL_ERROR", 0x3D },
8711 	{ "NMI_TRM_HW_ERR", 0x46 },
8712 	{ "NMI_INTERRUPT_TRM", 0x4C },
8713 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8714 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8715 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8716 	{ "NMI_INTERRUPT_HOST", 0x66 },
8717 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8718 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8719 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8720 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8721 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8722 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8723 	{ "ADVANCED_SYSASSERT", 0 },
8724 };
8725 
8726 static const char *
8727 iwx_desc_lookup(uint32_t num)
8728 {
8729 	int i;
8730 
8731 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8732 		if (advanced_lookup[i].num ==
8733 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8734 			return advanced_lookup[i].name;
8735 
8736 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8737 	return advanced_lookup[i].name;
8738 }
8739 
8740 /*
8741  * Support for dumping the error log seemed like a good idea ...
8742  * but it's mostly hex junk and the only sensible thing is the
8743  * hw/ucode revision (which we know anyway).  Since it's here,
8744  * I'll just leave it in, just in case e.g. the Intel guys want to
8745  * help us decipher some "ADVANCED_SYSASSERT" later.
8746  */
8747 static void
8748 iwx_nic_error(struct iwx_softc *sc)
8749 {
8750 	struct iwx_error_event_table table;
8751 	uint32_t base;
8752 
8753 	printf("%s: dumping device error log\n", DEVNAME(sc));
8754 	printf("%s: GOS-3758: 1\n", __func__);
8755 	base = sc->sc_uc.uc_lmac_error_event_table[0];
8756 	printf("%s: GOS-3758: 2\n", __func__);
8757 	if (base < 0x400000) {
8758 		printf("%s: Invalid error log pointer 0x%08x\n",
8759 		    DEVNAME(sc), base);
8760 		return;
8761 	}
8762 
8763 	printf("%s: GOS-3758: 3\n", __func__);
8764 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8765 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8766 		return;
8767 	}
8768 
8769 	printf("%s: GOS-3758: 4\n", __func__);
8770 	if (!table.valid) {
8771 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8772 		return;
8773 	}
8774 
8775 	printf("%s: GOS-3758: 5\n", __func__);
8776 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8777 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8778 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8779 		    sc->sc_flags, table.valid);
8780 	}
8781 
8782 	printf("%s: GOS-3758: 6\n", __func__);
8783 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8784 	    iwx_desc_lookup(table.error_id));
8785 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8786 	    table.trm_hw_status0);
8787 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8788 	    table.trm_hw_status1);
8789 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8790 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8791 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8792 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8793 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8794 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8795 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8796 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8797 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8798 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8799 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8800 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8801 	    table.fw_rev_type);
8802 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8803 	    table.major);
8804 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8805 	    table.minor);
8806 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8807 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8808 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8809 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8810 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8811 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8812 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8813 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8814 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8815 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8816 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8817 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8818 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8819 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8820 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8821 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8822 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8823 
8824 	if (sc->sc_uc.uc_umac_error_event_table)
8825 		iwx_nic_umac_error(sc);
8826 }
8827 
8828 static void
8829 iwx_dump_driver_status(struct iwx_softc *sc)
8830 {
8831 	struct ieee80211com *ic = &sc->sc_ic;
8832 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8833 	enum ieee80211_state state = vap->iv_state;
8834 	int i;
8835 
8836 	printf("driver status:\n");
8837 	for (i = 0; i < nitems(sc->txq); i++) {
8838 		struct iwx_tx_ring *ring = &sc->txq[i];
8839 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
8840 		    "cur_hw=%-3d queued=%-3d\n",
8841 		    i, ring->qid, ring->cur, ring->cur_hw,
8842 		    ring->queued);
8843 	}
8844 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
8845 	printf("  802.11 state %s\n", ieee80211_state_name[state]);
8846 }
8847 
8848 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8849 do {									\
8850 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8851 	_var_ = (void *)((_pkt_)+1);					\
8852 } while (/*CONSTCOND*/0)
8853 
8854 static int
8855 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8856 {
8857 	int qid, idx, code;
8858 
8859 	qid = pkt->hdr.qid & ~0x80;
8860 	idx = pkt->hdr.idx;
8861 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8862 
8863 	return (!(qid == 0 && idx == 0 && code == 0) &&
8864 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8865 }
8866 
8867 static void
8868 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8869 {
8870 	struct ieee80211com *ic = &sc->sc_ic;
8871 	struct iwx_rx_packet *pkt, *nextpkt;
8872 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8873 	struct mbuf *m0, *m;
8874 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8875 	int qid, idx, code, handled = 1;
8876 
8877 	m0 = data->m;
8878 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8879 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8880 		qid = pkt->hdr.qid;
8881 		idx = pkt->hdr.idx;
8882 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8883 
8884 		if (!iwx_rx_pkt_valid(pkt))
8885 			break;
8886 
8887 		/*
8888 		 * XXX Intel inside (tm)
8889 		 * Any commands in the LONG_GROUP could actually be in the
8890 		 * LEGACY group. Firmware API versions >= 50 reject commands
8891 		 * in group 0, forcing us to use this hack.
8892 		 */
8893 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8894 			struct iwx_tx_ring *ring = &sc->txq[qid];
8895 			struct iwx_tx_data *txdata = &ring->data[idx];
8896 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8897 				code = iwx_cmd_opcode(code);
8898 		}
8899 
8900 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8901 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8902 			break;
8903 
8904 		// TODO ???
8905 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8906 			/* Take mbuf m0 off the RX ring. */
8907 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8908 				break;
8909 			}
8910 			KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8911 		}
8912 
8913 		switch (code) {
8914 		case IWX_REPLY_RX_PHY_CMD:
8915 			/* XXX-THJ: I've not managed to hit this path in testing */
8916 			iwx_rx_rx_phy_cmd(sc, pkt, data);
8917 			break;
8918 
8919 		case IWX_REPLY_RX_MPDU_CMD: {
8920 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8921 			nextoff = offset +
8922 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8923 			nextpkt = (struct iwx_rx_packet *)
8924 			    (m0->m_data + nextoff);
8925 			/* AX210 devices ship only one packet per Rx buffer. */
8926 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
8927 			    nextoff + minsz >= IWX_RBUF_SIZE ||
8928 			    !iwx_rx_pkt_valid(nextpkt)) {
8929 				/* No need to copy last frame in buffer. */
8930 				if (offset > 0)
8931 					m_adj(m0, offset);
8932 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
8933 				m0 = NULL; /* stack owns m0 now; abort loop */
8934 			} else {
8935 				/*
8936 				 * Create an mbuf which points to the current
8937 				 * packet. Always copy from offset zero to
8938 				 * preserve m_pkthdr.
8939 				 */
8940 				m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
8941 				if (m == NULL) {
8942 					m_freem(m0);
8943 					m0 = NULL;
8944 					break;
8945 				}
8946 				m_adj(m, offset);
8947 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
8948 			}
8949 			break;
8950 		}
8951 
8952 //		case IWX_BAR_FRAME_RELEASE:
8953 //			iwx_rx_bar_frame_release(sc, pkt, ml);
8954 //			break;
8955 //
8956 		case IWX_TX_CMD:
8957 			iwx_rx_tx_cmd(sc, pkt, data);
8958 			break;
8959 
8960 		case IWX_BA_NOTIF:
8961 			iwx_rx_compressed_ba(sc, pkt);
8962 			break;
8963 
8964 		case IWX_MISSED_BEACONS_NOTIFICATION:
8965 			iwx_rx_bmiss(sc, pkt, data);
8966 			DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
8967 			    __func__));
8968 			ieee80211_beacon_miss(ic);
8969 			break;
8970 
8971 		case IWX_MFUART_LOAD_NOTIFICATION:
8972 			break;
8973 
8974 		case IWX_ALIVE: {
8975 			struct iwx_alive_resp_v4 *resp4;
8976 			struct iwx_alive_resp_v5 *resp5;
8977 			struct iwx_alive_resp_v6 *resp6;
8978 
8979 			DPRINTF(("%s: firmware alive\n", __func__));
8980 			sc->sc_uc.uc_ok = 0;
8981 
8982 			/*
8983 			 * For v5 and above, we can check the version, for older
8984 			 * versions we need to check the size.
8985 			 */
8986 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
8987 			    IWX_ALIVE) == 6) {
8988 				SYNC_RESP_STRUCT(resp6, pkt);
8989 				if (iwx_rx_packet_payload_len(pkt) !=
8990 				    sizeof(*resp6)) {
8991 					sc->sc_uc.uc_intr = 1;
8992 					wakeup(&sc->sc_uc);
8993 					break;
8994 				}
8995 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8996 				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8997 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8998 				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8999 				sc->sc_uc.uc_log_event_table = le32toh(
9000 				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9001 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9002 				    resp6->umac_data.dbg_ptrs.error_info_addr);
9003 				sc->sc_sku_id[0] =
9004 				    le32toh(resp6->sku_id.data[0]);
9005 				sc->sc_sku_id[1] =
9006 				    le32toh(resp6->sku_id.data[1]);
9007 				sc->sc_sku_id[2] =
9008 				    le32toh(resp6->sku_id.data[2]);
9009 				if (resp6->status == IWX_ALIVE_STATUS_OK) {
9010 					sc->sc_uc.uc_ok = 1;
9011 				}
9012 			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9013 			    IWX_ALIVE) == 5) {
9014 				SYNC_RESP_STRUCT(resp5, pkt);
9015 				if (iwx_rx_packet_payload_len(pkt) !=
9016 				    sizeof(*resp5)) {
9017 					sc->sc_uc.uc_intr = 1;
9018 					wakeup(&sc->sc_uc);
9019 					break;
9020 				}
9021 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9022 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9023 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9024 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9025 				sc->sc_uc.uc_log_event_table = le32toh(
9026 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9027 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9028 				    resp5->umac_data.dbg_ptrs.error_info_addr);
9029 				sc->sc_sku_id[0] =
9030 				    le32toh(resp5->sku_id.data[0]);
9031 				sc->sc_sku_id[1] =
9032 				    le32toh(resp5->sku_id.data[1]);
9033 				sc->sc_sku_id[2] =
9034 				    le32toh(resp5->sku_id.data[2]);
9035 				if (resp5->status == IWX_ALIVE_STATUS_OK)
9036 					sc->sc_uc.uc_ok = 1;
9037 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9038 				SYNC_RESP_STRUCT(resp4, pkt);
9039 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9040 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9041 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9042 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9043 				sc->sc_uc.uc_log_event_table = le32toh(
9044 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9045 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9046 				    resp4->umac_data.dbg_ptrs.error_info_addr);
9047 				if (resp4->status == IWX_ALIVE_STATUS_OK)
9048 					sc->sc_uc.uc_ok = 1;
9049 			} else
9050 				printf("unknown payload version");
9051 
9052 			sc->sc_uc.uc_intr = 1;
9053 			wakeup(&sc->sc_uc);
9054 			break;
9055 		}
9056 
9057 		case IWX_STATISTICS_NOTIFICATION: {
9058 			struct iwx_notif_statistics *stats;
9059 			SYNC_RESP_STRUCT(stats, pkt);
9060 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9061 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9062 			break;
9063 		}
9064 
9065 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9066 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9067 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9068 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9069 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9070 			break;
9071 
9072 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9073 		    IWX_CT_KILL_NOTIFICATION): {
9074 			struct iwx_ct_kill_notif *notif;
9075 			SYNC_RESP_STRUCT(notif, pkt);
9076 			printf("%s: device at critical temperature (%u degC), "
9077 			    "stopping device\n",
9078 			    DEVNAME(sc), le16toh(notif->temperature));
9079 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9080 			ieee80211_restart_all(ic);
9081 			break;
9082 		}
9083 
9084 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9085 		    IWX_SCD_QUEUE_CONFIG_CMD):
9086 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9087 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9088 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9089 		    IWX_SESSION_PROTECTION_CMD):
9090 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9091 		    IWX_NVM_GET_INFO):
9092 		case IWX_ADD_STA_KEY:
9093 		case IWX_PHY_CONFIGURATION_CMD:
9094 		case IWX_TX_ANT_CONFIGURATION_CMD:
9095 		case IWX_ADD_STA:
9096 		case IWX_MAC_CONTEXT_CMD:
9097 		case IWX_REPLY_SF_CFG_CMD:
9098 		case IWX_POWER_TABLE_CMD:
9099 		case IWX_LTR_CONFIG:
9100 		case IWX_PHY_CONTEXT_CMD:
9101 		case IWX_BINDING_CONTEXT_CMD:
9102 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9103 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9104 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9105 		case IWX_REPLY_BEACON_FILTERING_CMD:
9106 		case IWX_MAC_PM_POWER_TABLE:
9107 		case IWX_TIME_QUOTA_CMD:
9108 		case IWX_REMOVE_STA:
9109 		case IWX_TXPATH_FLUSH:
9110 		case IWX_BT_CONFIG:
9111 		case IWX_MCC_UPDATE_CMD:
9112 		case IWX_TIME_EVENT_CMD:
9113 		case IWX_STATISTICS_CMD:
9114 		case IWX_SCD_QUEUE_CFG: {
9115 			size_t pkt_len;
9116 
9117 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9118 				break;
9119 
9120 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9121 			    BUS_DMASYNC_POSTREAD);
9122 
9123 			pkt_len = sizeof(pkt->len_n_flags) +
9124 			    iwx_rx_packet_len(pkt);
9125 
9126 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9127 			    pkt_len < sizeof(*pkt) ||
9128 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9129 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9130 				sc->sc_cmd_resp_pkt[idx] = NULL;
9131 				break;
9132 			}
9133 
9134 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9135 			    BUS_DMASYNC_POSTREAD);
9136 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9137 			break;
9138 		}
9139 
9140 		case IWX_INIT_COMPLETE_NOTIF:
9141 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
9142 			wakeup(&sc->sc_init_complete);
9143 			break;
9144 
9145 		case IWX_SCAN_COMPLETE_UMAC: {
9146 			DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9147 			struct iwx_umac_scan_complete *notif __attribute__((unused));
9148 			SYNC_RESP_STRUCT(notif, pkt);
9149 			DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9150 			    notif->status));
9151 			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9152 			iwx_endscan(sc);
9153 			break;
9154 		}
9155 
9156 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9157 			DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9158 			    __func__));
9159 			struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9160 			SYNC_RESP_STRUCT(notif, pkt);
9161 			DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9162 			    notif->status));
9163 			iwx_endscan(sc);
9164 			break;
9165 		}
9166 
9167 		case IWX_MCC_CHUB_UPDATE_CMD: {
9168 			struct iwx_mcc_chub_notif *notif;
9169 			SYNC_RESP_STRUCT(notif, pkt);
9170 			iwx_mcc_update(sc, notif);
9171 			break;
9172 		}
9173 
9174 		case IWX_REPLY_ERROR: {
9175 			struct iwx_error_resp *resp;
9176 			SYNC_RESP_STRUCT(resp, pkt);
9177 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
9178 				DEVNAME(sc), le32toh(resp->error_type),
9179 				resp->cmd_id);
9180 			break;
9181 		}
9182 
9183 		case IWX_TIME_EVENT_NOTIFICATION: {
9184 			struct iwx_time_event_notif *notif;
9185 			uint32_t action;
9186 			SYNC_RESP_STRUCT(notif, pkt);
9187 
9188 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9189 				break;
9190 			action = le32toh(notif->action);
9191 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9192 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9193 			break;
9194 		}
9195 
9196 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9197 		    IWX_SESSION_PROTECTION_NOTIF): {
9198 			struct iwx_session_prot_notif *notif;
9199 			uint32_t status, start, conf_id;
9200 
9201 			SYNC_RESP_STRUCT(notif, pkt);
9202 
9203 			status = le32toh(notif->status);
9204 			start = le32toh(notif->start);
9205 			conf_id = le32toh(notif->conf_id);
9206 			/* Check for end of successful PROTECT_CONF_ASSOC. */
9207 			if (status == 1 && start == 0 &&
9208 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9209 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9210 			break;
9211 		}
9212 
9213 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9214 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9215 		    break;
9216 
9217 		/*
9218 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9219 		 * messages. Just ignore them for now.
9220 		 */
9221 		case IWX_DEBUG_LOG_MSG:
9222 			break;
9223 
9224 		case IWX_MCAST_FILTER_CMD:
9225 			break;
9226 
9227 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9228 			break;
9229 
9230 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9231 			break;
9232 
9233 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9234 			break;
9235 
9236 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9237 		    IWX_NVM_ACCESS_COMPLETE):
9238 			break;
9239 
9240 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9241 			break; /* happens in monitor mode; ignore for now */
9242 
9243 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9244 			break;
9245 
9246 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9247 		    IWX_TLC_MNG_UPDATE_NOTIF): {
9248 			struct iwx_tlc_update_notif *notif;
9249 			SYNC_RESP_STRUCT(notif, pkt);
9250 			(void)notif;
9251 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9252 				iwx_rs_update(sc, notif);
9253 			break;
9254 		}
9255 
9256 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9257 			break;
9258 
9259 		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9260 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9261 			break;
9262 
9263 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9264 		    IWX_PNVM_INIT_COMPLETE):
9265 			DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9266 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9267 			wakeup(&sc->sc_init_complete);
9268 			break;
9269 
9270 		default:
9271 			handled = 0;
9272 			/* XXX wulf: Get rid of bluetooth-related spam */
9273 			if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9274 			    (code == 0xce && pkt->len_n_flags == 0x2000002c))
9275 				break;
9276 			printf("%s: unhandled firmware response 0x%x/0x%x "
9277 			    "rx ring %d[%d]\n",
9278 			    DEVNAME(sc), code, pkt->len_n_flags,
9279 			    (qid & ~0x80), idx);
9280 			break;
9281 		}
9282 
9283 		/*
9284 		 * uCode sets bit 0x80 when it originates the notification,
9285 		 * i.e. when the notification is not a direct response to a
9286 		 * command sent by the driver.
9287 		 * For example, uCode issues IWX_REPLY_RX when it sends a
9288 		 * received frame to the driver.
9289 		 */
9290 		if (handled && !(qid & (1 << 7))) {
9291 			iwx_cmd_done(sc, qid, idx, code);
9292 		}
9293 
9294 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9295 
9296 		/* AX210 devices ship only one packet per Rx buffer. */
9297 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9298 			break;
9299 	}
9300 
9301 	if (m0 && m0 != data->m)
9302 		m_freem(m0);
9303 }
9304 
9305 static void
9306 iwx_notif_intr(struct iwx_softc *sc)
9307 {
9308 	struct mbuf m;
9309 	uint16_t hw;
9310 
9311 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9312 	    BUS_DMASYNC_POSTREAD);
9313 
9314 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9315 		uint16_t *status = sc->rxq.stat_dma.vaddr;
9316 		hw = le16toh(*status) & 0xfff;
9317 	} else
9318 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9319 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
9320 	while (sc->rxq.cur != hw) {
9321 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9322 
9323 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9324 		BUS_DMASYNC_POSTREAD);
9325 
9326 		iwx_rx_pkt(sc, data, &m);
9327 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9328 	}
9329 
9330 	/*
9331 	 * Tell the firmware what we have processed.
9332 	 * Seems like the hardware gets upset unless we align the write by 8??
9333 	 */
9334 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9335 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9336 }
9337 
9338 #if 0
9339 int
9340 iwx_intr(void *arg)
9341 {
9342 	struct iwx_softc *sc = arg;
9343 	struct ieee80211com *ic = &sc->sc_ic;
9344 	struct ifnet *ifp = IC2IFP(ic);
9345 	int r1, r2, rv = 0;
9346 
9347 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9348 
9349 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9350 		uint32_t *ict = sc->ict_dma.vaddr;
9351 		int tmp;
9352 
9353 		tmp = htole32(ict[sc->ict_cur]);
9354 		if (!tmp)
9355 			goto out_ena;
9356 
9357 		/*
9358 		 * ok, there was something.  keep plowing until we have all.
9359 		 */
9360 		r1 = r2 = 0;
9361 		while (tmp) {
9362 			r1 |= tmp;
9363 			ict[sc->ict_cur] = 0;
9364 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9365 			tmp = htole32(ict[sc->ict_cur]);
9366 		}
9367 
9368 		/* this is where the fun begins.  don't ask */
9369 		if (r1 == 0xffffffff)
9370 			r1 = 0;
9371 
9372 		/* i am not expected to understand this */
9373 		if (r1 & 0xc0000)
9374 			r1 |= 0x8000;
9375 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9376 	} else {
9377 		r1 = IWX_READ(sc, IWX_CSR_INT);
9378 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9379 			goto out;
9380 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9381 	}
9382 	if (r1 == 0 && r2 == 0) {
9383 		goto out_ena;
9384 	}
9385 
9386 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9387 
9388 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9389 #if 0
9390 		int i;
9391 		/* Firmware has now configured the RFH. */
9392 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9393 			iwx_update_rx_desc(sc, &sc->rxq, i);
9394 #endif
9395 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9396 	}
9397 
9398 
9399 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9400 		iwx_check_rfkill(sc);
9401 		rv = 1;
9402 		goto out_ena;
9403 	}
9404 
9405 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9406 		if (ifp->if_flags & IFF_DEBUG) {
9407 			iwx_nic_error(sc);
9408 			iwx_dump_driver_status(sc);
9409 		}
9410 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9411 		ieee80211_restart_all(ic);
9412 		rv = 1;
9413 		goto out;
9414 
9415 	}
9416 
9417 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9418 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9419 		iwx_stop(sc);
9420 		rv = 1;
9421 		goto out;
9422 	}
9423 
9424 	/* firmware chunk loaded */
9425 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9426 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9427 
9428 		sc->sc_fw_chunk_done = 1;
9429 		wakeup(&sc->sc_fw);
9430 	}
9431 
9432 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9433 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
9434 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9435 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9436 		}
9437 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9438 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9439 		}
9440 
9441 		/* Disable periodic interrupt; we use it as just a one-shot. */
9442 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9443 
9444 		/*
9445 		 * Enable periodic interrupt in 8 msec only if we received
9446 		 * real RX interrupt (instead of just periodic int), to catch
9447 		 * any dangling Rx interrupt.  If it was just the periodic
9448 		 * interrupt, there was no dangling Rx activity, and no need
9449 		 * to extend the periodic interrupt; one-shot is enough.
9450 		 */
9451 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9452 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9453 			    IWX_CSR_INT_PERIODIC_ENA);
9454 
9455 		iwx_notif_intr(sc);
9456 	}
9457 
9458 	rv = 1;
9459 
9460  out_ena:
9461 	iwx_restore_interrupts(sc);
9462  out:
9463 	return rv;
9464 }
9465 #endif
9466 
9467 static void
9468 iwx_intr_msix(void *arg)
9469 {
9470 	struct iwx_softc *sc = arg;
9471 	struct ieee80211com *ic = &sc->sc_ic;
9472 	uint32_t inta_fh, inta_hw;
9473 	int vector = 0;
9474 
9475 	IWX_LOCK(sc);
9476 
9477 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9478 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9479 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9480 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9481 	inta_fh &= sc->sc_fh_mask;
9482 	inta_hw &= sc->sc_hw_mask;
9483 
9484 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9485 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9486 		iwx_notif_intr(sc);
9487 	}
9488 
9489 	/* firmware chunk loaded */
9490 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9491 		sc->sc_fw_chunk_done = 1;
9492 		wakeup(&sc->sc_fw);
9493 	}
9494 
9495 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9496 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9497 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9498 		if (sc->sc_debug) {
9499 			iwx_nic_error(sc);
9500 			iwx_dump_driver_status(sc);
9501 		}
9502 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9503 		ieee80211_restart_all(ic);
9504 		goto out;
9505 	}
9506 
9507 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9508 		iwx_check_rfkill(sc);
9509 	}
9510 
9511 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9512 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9513 		sc->sc_flags |= IWX_FLAG_HW_ERR;
9514 		iwx_stop(sc);
9515 		goto out;
9516 	}
9517 
9518 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9519 		IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9520 		    "%s:%d WARNING: Skipping rx desc update\n",
9521 		    __func__, __LINE__);
9522 #if 0
9523 		/*
9524 		 * XXX-THJ: we don't have the dma segment handy. This is hacked
9525 		 * out in the fc release, return to it if we ever get this
9526 		 * warning.
9527 		 */
9528 		/* Firmware has now configured the RFH. */
9529 		for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9530 			iwx_update_rx_desc(sc, &sc->rxq, i);
9531 #endif
9532 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9533 	}
9534 
9535 	/*
9536 	 * Before sending the interrupt the HW disables it to prevent
9537 	 * a nested interrupt. This is done by writing 1 to the corresponding
9538 	 * bit in the mask register. After handling the interrupt, it should be
9539 	 * re-enabled by clearing this bit. This register is defined as
9540 	 * write 1 clear (W1C) register, meaning that it's being clear
9541 	 * by writing 1 to the bit.
9542 	 */
9543 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9544 out:
9545 	IWX_UNLOCK(sc);
9546 	return;
9547 }
9548 
9549 /*
9550  * The device info table below contains device-specific config overrides.
9551  * The most important parameter derived from this table is the name of the
9552  * firmware image to load.
9553  *
9554  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9555  * The "old" table matches devices based on PCI vendor/product IDs only.
9556  * The "new" table extends this with various device parameters derived
9557  * from MAC type, and RF type.
9558  *
9559  * In iwlwifi "old" and "new" tables share the same array, where "old"
9560  * entries contain dummy values for data defined only for "new" entries.
9561  * As of 2022, Linux developers are still in the process of moving entries
9562  * from "old" to "new" style and it looks like this effort has stalled in
9563  * in some work-in-progress state for quite a while. Linux commits moving
9564  * entries from "old" to "new" have at times been reverted due to regressions.
9565  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9566  * devices in the same driver.
9567  *
9568  * Our table below contains mostly "new" entries declared in iwlwifi
9569  * with the _IWL_DEV_INFO() macro (with a leading underscore).
9570  * Other devices are matched based on PCI vendor/product ID as usual,
9571  * unless matching specific PCI subsystem vendor/product IDs is required.
9572  *
9573  * Some "old"-style entries are required to identify the firmware image to use.
9574  * Others might be used to print a specific marketing name into Linux dmesg,
9575  * but we can't be sure whether the corresponding devices would be matched
9576  * correctly in the absence of their entries. So we include them just in case.
9577  */
9578 
9579 struct iwx_dev_info {
9580 	uint16_t device;
9581 	uint16_t subdevice;
9582 	uint16_t mac_type;
9583 	uint16_t rf_type;
9584 	uint8_t mac_step;
9585 	uint8_t rf_id;
9586 	uint8_t no_160;
9587 	uint8_t cores;
9588 	uint8_t cdb;
9589 	uint8_t jacket;
9590 	const struct iwx_device_cfg *cfg;
9591 };
9592 
9593 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9594 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9595 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
9596 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
9597 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
9598 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9599 
9600 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9601 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
9602 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
9603 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9604 
9605 /*
9606  * When adding entries to this table keep in mind that entries must
9607  * be listed in the same order as in the Linux driver. Code walks this
9608  * table backwards and uses the first matching entry it finds.
9609  * Device firmware must be available in fw_update(8).
9610  */
9611 static const struct iwx_dev_info iwx_dev_info_table[] = {
9612 	/* So with HR */
9613 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9614 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9615 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9616 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9617 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9618 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9619 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9620 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9621 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9622 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9623 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9624 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9625 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9626 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9627 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9628 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9629 	IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9630 	IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9631 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9632 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9633 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9634 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9635 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9636 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9637 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9638 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9639 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9640 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9641 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9642 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9643 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9644 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9645 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9646 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9647 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9648 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9649 
9650 	/* So with GF2 */
9651 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9652 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9653 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9654 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9655 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9656 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9657 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9658 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9659 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9660 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9661 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9662 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9663 
9664 	/* Qu with Jf, C step */
9665 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9666 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9667 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9668 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9669 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9670 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9671 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9672 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9673 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9674 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9675 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9676 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9677 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9678 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9679 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9680 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9681 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9682 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9683 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9684 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9685 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9686 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9687 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9688 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9689 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9690 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9691 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9692 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9693 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9694 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9695 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9696 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9697 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9698 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9699 		      IWX_CFG_ANY,
9700 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9701 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9702 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9703 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9704 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9705 		      IWX_CFG_ANY,
9706 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9707 
9708 	/* QuZ with Jf */
9709 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9710 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9711 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9712 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9713 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9714 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9715 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9716 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9717 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9718 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9719 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9720 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9721 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9722 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9723 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9724 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9725 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9726 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9727 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9728 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9729 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9730 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9731 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9732 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9733 		      IWX_CFG_ANY,
9734 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9735 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9736 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9737 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9738 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9739 		      IWX_CFG_ANY,
9740 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9741 
9742 	/* Qu with Hr, B step */
9743 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9744 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9745 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9746 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9747 		      iwx_qu_b0_hr1_b0), /* AX101 */
9748 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9749 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9750 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9751 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9752 		      iwx_qu_b0_hr_b0), /* AX203 */
9753 
9754 	/* Qu with Hr, C step */
9755 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9756 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9757 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9758 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9759 		      iwx_qu_c0_hr1_b0), /* AX101 */
9760 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9761 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9762 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9763 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9764 		      iwx_qu_c0_hr_b0), /* AX203 */
9765 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9766 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9767 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9768 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9769 		      iwx_qu_c0_hr_b0), /* AX201 */
9770 
9771 	/* QuZ with Hr */
9772 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9773 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9774 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9775 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9776 		      iwx_quz_a0_hr1_b0), /* AX101 */
9777 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9778 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9779 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9780 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9781 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
9782 
9783 	/* SoF with JF2 */
9784 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9785 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9786 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9787 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9788 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9789 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9790 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9791 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9792 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9793 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9794 
9795 	/* SoF with JF */
9796 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9797 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9798 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9799 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9800 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9801 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9802 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9803 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9804 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9805 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9806 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9807 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9808 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9809 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9810 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9811 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9812 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9813 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9814 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9815 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9816 
9817 	/* So with Hr */
9818 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9819 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9820 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9821 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9822 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
9823 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9824 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9825 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9826 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9827 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
9828 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9829 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9830 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9831 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9832 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
9833 
9834 	/* So-F with Hr */
9835 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9836 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9837 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9838 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9839 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
9840 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9841 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9842 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9843 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9844 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
9845 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9846 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9847 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9848 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9849 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
9850 
9851 	/* So-F with GF */
9852 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9853 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9854 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9855 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9856 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
9857 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9858 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9859 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9860 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9861 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9862 
9863 	/* So with GF */
9864 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9865 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9866 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9867 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9868 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
9869 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9870 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9871 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9872 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9873 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9874 
9875 	/* So with JF2 */
9876 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9877 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9878 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9879 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9880 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9881 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9882 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9883 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9884 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9885 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9886 
9887 	/* So with JF */
9888 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9889 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9890 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9891 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9892 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9893 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9894 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9895 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9896 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9897 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9898 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9899 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9900 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9901 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9902 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9903 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9904 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9905 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9906 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9907 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9908 };
9909 
9910 static int
9911 iwx_preinit(struct iwx_softc *sc)
9912 {
9913 	struct ieee80211com *ic = &sc->sc_ic;
9914 	int err;
9915 
9916 	err = iwx_prepare_card_hw(sc);
9917 	if (err) {
9918 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9919 		return err;
9920 	}
9921 
9922 	if (sc->attached) {
9923 		return 0;
9924 	}
9925 
9926 	err = iwx_start_hw(sc);
9927 	if (err) {
9928 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9929 		return err;
9930 	}
9931 
9932 	err = iwx_run_init_mvm_ucode(sc, 1);
9933 	iwx_stop_device(sc);
9934 	if (err) {
9935 		printf("%s: failed to stop device\n", DEVNAME(sc));
9936 		return err;
9937 	}
9938 
9939 	/* Print version info and MAC address on first successful fw load. */
9940 	sc->attached = 1;
9941 	if (sc->sc_pnvm_ver) {
9942 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
9943 		    "address %s\n",
9944 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9945 		    sc->sc_fwver, sc->sc_pnvm_ver,
9946 		    ether_sprintf(sc->sc_nvm.hw_addr));
9947 	} else {
9948 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
9949 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9950 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9951 	}
9952 
9953 	/* not all hardware can do 5GHz band */
9954 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9955 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9956 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9957 
9958 	return 0;
9959 }
9960 
9961 static void
9962 iwx_attach_hook(void *self)
9963 {
9964 	struct iwx_softc *sc = (void *)self;
9965 	struct ieee80211com *ic = &sc->sc_ic;
9966 	int err;
9967 
9968 	IWX_LOCK(sc);
9969 	err = iwx_preinit(sc);
9970 	IWX_UNLOCK(sc);
9971 	if (err != 0)
9972 		goto out;
9973 
9974 	iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
9975 	    ic->ic_channels);
9976 
9977 	ieee80211_ifattach(ic);
9978 	ic->ic_vap_create = iwx_vap_create;
9979 	ic->ic_vap_delete = iwx_vap_delete;
9980 	ic->ic_raw_xmit = iwx_raw_xmit;
9981 	ic->ic_node_alloc = iwx_node_alloc;
9982 	ic->ic_scan_start = iwx_scan_start;
9983 	ic->ic_scan_end = iwx_scan_end;
9984 	ic->ic_update_mcast = iwx_update_mcast;
9985 	ic->ic_getradiocaps = iwx_init_channel_map;
9986 
9987 	ic->ic_set_channel = iwx_set_channel;
9988 	ic->ic_scan_curchan = iwx_scan_curchan;
9989 	ic->ic_scan_mindwell = iwx_scan_mindwell;
9990 	ic->ic_wme.wme_update = iwx_wme_update;
9991 	ic->ic_parent = iwx_parent;
9992 	ic->ic_transmit = iwx_transmit;
9993 
9994 	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
9995 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
9996 	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
9997 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
9998 
9999 	sc->sc_addba_request = ic->ic_addba_request;
10000 	ic->ic_addba_request = iwx_addba_request;
10001 	sc->sc_addba_response = ic->ic_addba_response;
10002 	ic->ic_addba_response = iwx_addba_response;
10003 
10004 	iwx_radiotap_attach(sc);
10005 	ieee80211_announce(ic);
10006 out:
10007 	config_intrhook_disestablish(&sc->sc_preinit_hook);
10008 }
10009 
10010 const struct iwx_device_cfg *
10011 iwx_find_device_cfg(struct iwx_softc *sc)
10012 {
10013 	uint16_t sdev_id, mac_type, rf_type;
10014 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10015 	int i;
10016 
10017 	sdev_id = pci_get_subdevice(sc->sc_dev);
10018 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10019 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10020 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10021 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10022 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10023 
10024 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10025 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10026 	cores = IWX_SUBDEVICE_CORES(sdev_id);
10027 
10028 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10029 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10030 
10031 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10032 		    dev_info->device != sc->sc_pid)
10033 			continue;
10034 
10035 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10036 		    dev_info->subdevice != sdev_id)
10037 			continue;
10038 
10039 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10040 		    dev_info->mac_type != mac_type)
10041 			continue;
10042 
10043 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10044 		    dev_info->mac_step != mac_step)
10045 			continue;
10046 
10047 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10048 		    dev_info->rf_type != rf_type)
10049 			continue;
10050 
10051 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10052 		    dev_info->cdb != cdb)
10053 			continue;
10054 
10055 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10056 		    dev_info->jacket != jacket)
10057 			continue;
10058 
10059 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10060 		    dev_info->rf_id != rf_id)
10061 			continue;
10062 
10063 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10064 		    dev_info->no_160 != no_160)
10065 			continue;
10066 
10067 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10068 		    dev_info->cores != cores)
10069 			continue;
10070 
10071 		return dev_info->cfg;
10072 	}
10073 
10074 	return NULL;
10075 }
10076 
10077 static int
10078 iwx_probe(device_t dev)
10079 {
10080 	int i;
10081 
10082 	for (i = 0; i < nitems(iwx_devices); i++) {
10083 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10084 		    pci_get_device(dev) == iwx_devices[i].device) {
10085 			device_set_desc(dev, iwx_devices[i].name);
10086 
10087 			/*
10088 			 * Due to significant existing deployments using
10089 			 * iwlwifi lower the priority of iwx.
10090 			 *
10091 			 * This inverts the advice in bus.h where drivers
10092 			 * supporting newer hardware should return
10093 			 * BUS_PROBE_DEFAULT and drivers for older devices
10094 			 * return BUS_PROBE_LOW_PRIORITY.
10095 			 *
10096 			 */
10097 			return (BUS_PROBE_LOW_PRIORITY);
10098 		}
10099 	}
10100 
10101 	return (ENXIO);
10102 }
10103 
10104 static int
10105 iwx_attach(device_t dev)
10106 {
10107 	struct iwx_softc *sc = device_get_softc(dev);
10108 	struct ieee80211com *ic = &sc->sc_ic;
10109 	const struct iwx_device_cfg *cfg;
10110 	int err;
10111 	int txq_i, i, j;
10112 	size_t ctxt_info_size;
10113 	int rid;
10114 	int count;
10115 	int error;
10116 	sc->sc_dev = dev;
10117 	sc->sc_pid = pci_get_device(dev);
10118 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10119 
10120 	TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10121 	IWX_LOCK_INIT(sc);
10122 	mbufq_init(&sc->sc_snd, ifqmaxlen);
10123 	TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10124 	TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10125 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10126 	    taskqueue_thread_enqueue, &sc->sc_tq);
10127 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10128 	if (error != 0) {
10129 		device_printf(dev, "can't start taskq thread, error %d\n",
10130 		    error);
10131 		return (ENXIO);
10132 	}
10133 
10134 	pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10135 	if (sc->sc_cap_off == 0) {
10136 		device_printf(dev, "PCIe capability structure not found!\n");
10137 		return (ENXIO);
10138 	}
10139 
10140 	/*
10141 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10142 	 * PCI Tx retries from interfering with C3 CPU state.
10143 	 */
10144 #define	PCI_CFG_RETRY_TIMEOUT	0x41
10145 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10146 
10147 	if (pci_msix_count(dev)) {
10148 		sc->sc_msix = 1;
10149 	} else {
10150 		device_printf(dev, "no MSI-X found\n");
10151 		return (ENXIO);
10152 	}
10153 
10154 	pci_enable_busmaster(dev);
10155 	rid = PCIR_BAR(0);
10156 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10157 	    RF_ACTIVE);
10158 	if (sc->sc_mem == NULL) {
10159 		device_printf(sc->sc_dev, "can't map mem space\n");
10160 		return (ENXIO);
10161 	}
10162 	sc->sc_st = rman_get_bustag(sc->sc_mem);
10163 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10164 
10165 	count = 1;
10166 	rid = 0;
10167 	if (pci_alloc_msix(dev, &count) == 0)
10168 		rid = 1;
10169 	DPRINTF(("%s: count=%d\n", __func__, count));
10170 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10171 	    (rid != 0 ? 0 : RF_SHAREABLE));
10172 	if (sc->sc_irq == NULL) {
10173 		device_printf(dev, "can't map interrupt\n");
10174 		return (ENXIO);
10175 	}
10176 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10177 	    NULL, iwx_intr_msix, sc, &sc->sc_ih);
10178 	if (error != 0) {
10179 		device_printf(dev, "can't establish interrupt\n");
10180 		return (ENXIO);
10181 	}
10182 
10183 	/* Clear pending interrupts. */
10184 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10185 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
10186 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10187 
10188 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10189 	DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10190 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10191 	DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10192 
10193 	/*
10194 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10195 	 * changed, and now the revision step also includes bit 0-1 (no more
10196 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10197 	 * in the old format.
10198 	 */
10199 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10200 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10201 
10202 	switch (sc->sc_pid) {
10203 	case PCI_PRODUCT_INTEL_WL_22500_1:
10204 		sc->sc_fwname = IWX_CC_A_FW;
10205 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10206 		sc->sc_integrated = 0;
10207 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10208 		sc->sc_low_latency_xtal = 0;
10209 		sc->sc_xtal_latency = 0;
10210 		sc->sc_tx_with_siso_diversity = 0;
10211 		sc->sc_uhb_supported = 0;
10212 		break;
10213 	case PCI_PRODUCT_INTEL_WL_22500_2:
10214 	case PCI_PRODUCT_INTEL_WL_22500_5:
10215 		/* These devices should be QuZ only. */
10216 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10217 			device_printf(dev, "unsupported AX201 adapter\n");
10218 			return (ENXIO);
10219 		}
10220 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10221 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10222 		sc->sc_integrated = 1;
10223 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10224 		sc->sc_low_latency_xtal = 0;
10225 		sc->sc_xtal_latency = 500;
10226 		sc->sc_tx_with_siso_diversity = 0;
10227 		sc->sc_uhb_supported = 0;
10228 		break;
10229 	case PCI_PRODUCT_INTEL_WL_22500_3:
10230 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10231 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10232 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10233 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10234 		else
10235 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10236 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10237 		sc->sc_integrated = 1;
10238 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10239 		sc->sc_low_latency_xtal = 0;
10240 		sc->sc_xtal_latency = 500;
10241 		sc->sc_tx_with_siso_diversity = 0;
10242 		sc->sc_uhb_supported = 0;
10243 		break;
10244 	case PCI_PRODUCT_INTEL_WL_22500_4:
10245 	case PCI_PRODUCT_INTEL_WL_22500_7:
10246 	case PCI_PRODUCT_INTEL_WL_22500_8:
10247 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10248 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10249 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10250 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10251 		else
10252 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10253 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10254 		sc->sc_integrated = 1;
10255 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10256 		sc->sc_low_latency_xtal = 0;
10257 		sc->sc_xtal_latency = 1820;
10258 		sc->sc_tx_with_siso_diversity = 0;
10259 		sc->sc_uhb_supported = 0;
10260 		break;
10261 	case PCI_PRODUCT_INTEL_WL_22500_6:
10262 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10263 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10264 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10265 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10266 		else
10267 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10268 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10269 		sc->sc_integrated = 1;
10270 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10271 		sc->sc_low_latency_xtal = 1;
10272 		sc->sc_xtal_latency = 12000;
10273 		sc->sc_tx_with_siso_diversity = 0;
10274 		sc->sc_uhb_supported = 0;
10275 		break;
10276 	case PCI_PRODUCT_INTEL_WL_22500_9:
10277 	case PCI_PRODUCT_INTEL_WL_22500_10:
10278 	case PCI_PRODUCT_INTEL_WL_22500_11:
10279 	case PCI_PRODUCT_INTEL_WL_22500_13:
10280 	/* _14 is an MA device, not yet supported */
10281 	case PCI_PRODUCT_INTEL_WL_22500_15:
10282 	case PCI_PRODUCT_INTEL_WL_22500_16:
10283 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
10284 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10285 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10286 		sc->sc_integrated = 0;
10287 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10288 		sc->sc_low_latency_xtal = 0;
10289 		sc->sc_xtal_latency = 0;
10290 		sc->sc_tx_with_siso_diversity = 0;
10291 		sc->sc_uhb_supported = 1;
10292 		break;
10293 	case PCI_PRODUCT_INTEL_WL_22500_12:
10294 	case PCI_PRODUCT_INTEL_WL_22500_17:
10295 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
10296 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10297 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10298 		sc->sc_integrated = 1;
10299 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10300 		sc->sc_low_latency_xtal = 1;
10301 		sc->sc_xtal_latency = 12000;
10302 		sc->sc_tx_with_siso_diversity = 0;
10303 		sc->sc_uhb_supported = 0;
10304 		sc->sc_imr_enabled = 1;
10305 		break;
10306 	default:
10307 		device_printf(dev, "unknown adapter type\n");
10308 		return (ENXIO);
10309 	}
10310 
10311 	cfg = iwx_find_device_cfg(sc);
10312 	DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10313 	if (cfg) {
10314 		sc->sc_fwname = cfg->fw_name;
10315 		sc->sc_pnvm_name = cfg->pnvm_name;
10316 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10317 		sc->sc_uhb_supported = cfg->uhb_supported;
10318 		if (cfg->xtal_latency) {
10319 			sc->sc_xtal_latency = cfg->xtal_latency;
10320 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10321 		}
10322 	}
10323 
10324 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10325 
10326 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10327 		sc->sc_umac_prph_offset = 0x300000;
10328 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10329 	} else
10330 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10331 
10332 	/* Allocate DMA memory for loading firmware. */
10333 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10334 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10335 	else
10336 		ctxt_info_size = sizeof(struct iwx_context_info);
10337 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10338 	    ctxt_info_size, 1);
10339 	if (err) {
10340 		device_printf(dev,
10341 		    "could not allocate memory for loading firmware\n");
10342 		return (ENXIO);
10343 	}
10344 
10345 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10346 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10347 		    sizeof(struct iwx_prph_scratch), 1);
10348 		if (err) {
10349 			device_printf(dev,
10350 			    "could not allocate prph scratch memory\n");
10351 			goto fail1;
10352 		}
10353 
10354 		/*
10355 		 * Allocate prph information. The driver doesn't use this.
10356 		 * We use the second half of this page to give the device
10357 		 * some dummy TR/CR tail pointers - which shouldn't be
10358 		 * necessary as we don't use this, but the hardware still
10359 		 * reads/writes there and we can't let it go do that with
10360 		 * a NULL pointer.
10361 		 */
10362 		KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10363 		    ("iwx_prph_info has wrong size"));
10364 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10365 		    PAGE_SIZE, 1);
10366 		if (err) {
10367 			device_printf(dev,
10368 			    "could not allocate prph info memory\n");
10369 			goto fail1;
10370 		}
10371 	}
10372 
10373 	/* Allocate interrupt cause table (ICT).*/
10374 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10375 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10376 	if (err) {
10377 		device_printf(dev, "could not allocate ICT table\n");
10378 		goto fail1;
10379 	}
10380 
10381 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10382 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10383 		if (err) {
10384 			device_printf(dev, "could not allocate TX ring %d\n",
10385 			    txq_i);
10386 			goto fail4;
10387 		}
10388 	}
10389 
10390 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
10391 	if (err) {
10392 		device_printf(sc->sc_dev, "could not allocate RX ring\n");
10393 		goto fail4;
10394 	}
10395 
10396 #ifdef IWX_DEBUG
10397 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10398 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10399 	    CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10400 
10401 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10402 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10403 	    CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10404 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10405 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10406 	    CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10407 
10408 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10409 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10410 	    CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10411 
10412 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10413 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10414 	    CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10415 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10416 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10417 	    CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10418 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10419 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10420 	    CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10421 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10422 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10423 	    CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10424 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10425 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10426 	    CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10427 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10428 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10429 	    CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10430 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10431 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10432 	    CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10433 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10434 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10435 	    CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10436 #endif
10437 	ic->ic_softc = sc;
10438 	ic->ic_name = device_get_nameunit(sc->sc_dev);
10439 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
10440 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
10441 
10442 	/* Set device capabilities. */
10443 	ic->ic_caps =
10444 	    IEEE80211_C_STA |
10445 	    IEEE80211_C_MONITOR |
10446 	    IEEE80211_C_WPA |		/* WPA/RSN */
10447 	    IEEE80211_C_WME |
10448 	    IEEE80211_C_PMGT |
10449 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
10450 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
10451 	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
10452 	    ;
10453 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10454 
10455 	ic->ic_txstream = 2;
10456 	ic->ic_rxstream = 2;
10457 	ic->ic_htcaps |= IEEE80211_HTC_HT
10458 			| IEEE80211_HTCAP_SMPS_OFF
10459 			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
10460 			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
10461 			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
10462 			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
10463 //			| IEEE80211_HTC_RX_AMSDU_AMPDU	/* TODO: hw reorder */
10464 			| IEEE80211_HTCAP_MAXAMSDU_3839;	/* max A-MSDU length */
10465 
10466 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10467 
10468 	/*
10469 	 * XXX: setupcurchan() expects vhtcaps to be non-zero
10470 	 * https://bugs.freebsd.org/274156
10471 	 */
10472 	ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10473 			| IEEE80211_VHTCAP_SHORT_GI_80
10474 			| 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10475 			| IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10476 			| IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10477 
10478 	ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10479 	int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10480 		  IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10481 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10482 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10483 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10484 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10485 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10486 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10487 	ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10488 	ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10489 
10490 	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10491 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10492 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10493 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10494 		rxba->sc = sc;
10495 		for (j = 0; j < nitems(rxba->entries); j++)
10496 			mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10497 	}
10498 
10499 	sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10500 	sc->sc_preinit_hook.ich_arg = sc;
10501 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10502 		device_printf(dev,
10503 		    "config_intrhook_establish failed\n");
10504 		goto fail4;
10505 	}
10506 
10507 	return (0);
10508 
10509 fail4:
10510 	while (--txq_i >= 0)
10511 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10512 	iwx_free_rx_ring(sc, &sc->rxq);
10513 	if (sc->ict_dma.vaddr != NULL)
10514 		iwx_dma_contig_free(&sc->ict_dma);
10515 
10516 fail1:
10517 	iwx_dma_contig_free(&sc->ctxt_info_dma);
10518 	iwx_dma_contig_free(&sc->prph_scratch_dma);
10519 	iwx_dma_contig_free(&sc->prph_info_dma);
10520 	return (ENXIO);
10521 }
10522 
10523 static int
10524 iwx_detach(device_t dev)
10525 {
10526 	struct iwx_softc *sc = device_get_softc(dev);
10527 	int txq_i;
10528 
10529 	iwx_stop_device(sc);
10530 
10531 	taskqueue_drain_all(sc->sc_tq);
10532 	taskqueue_free(sc->sc_tq);
10533 
10534 	ieee80211_ifdetach(&sc->sc_ic);
10535 
10536 	callout_drain(&sc->watchdog_to);
10537 
10538 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10539 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10540 	iwx_free_rx_ring(sc, &sc->rxq);
10541 
10542 	if (sc->sc_fwp != NULL) {
10543 		firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10544 		sc->sc_fwp = NULL;
10545 	}
10546 
10547 	if (sc->sc_pnvm != NULL) {
10548 		firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10549 		sc->sc_pnvm = NULL;
10550 	}
10551 
10552 	if (sc->sc_irq != NULL) {
10553 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10554 		bus_release_resource(dev, SYS_RES_IRQ,
10555 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
10556 		pci_release_msi(dev);
10557         }
10558 	if (sc->sc_mem != NULL)
10559 		bus_release_resource(dev, SYS_RES_MEMORY,
10560 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
10561 
10562 	IWX_LOCK_DESTROY(sc);
10563 
10564 	return (0);
10565 }
10566 
10567 static void
10568 iwx_radiotap_attach(struct iwx_softc *sc)
10569 {
10570 	struct ieee80211com *ic = &sc->sc_ic;
10571 
10572 	IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10573 	    "->%s begin\n", __func__);
10574 
10575 	ieee80211_radiotap_attach(ic,
10576 	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10577 		IWX_TX_RADIOTAP_PRESENT,
10578 	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10579 		IWX_RX_RADIOTAP_PRESENT);
10580 
10581 	IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10582 	    "->%s end\n", __func__);
10583 }
10584 
10585 struct ieee80211vap *
10586 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10587     enum ieee80211_opmode opmode, int flags,
10588     const uint8_t bssid[IEEE80211_ADDR_LEN],
10589     const uint8_t mac[IEEE80211_ADDR_LEN])
10590 {
10591 	struct iwx_vap *ivp;
10592 	struct ieee80211vap *vap;
10593 
10594 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
10595 		return NULL;
10596 	ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10597 	vap = &ivp->iv_vap;
10598 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10599 	vap->iv_bmissthreshold = 10;            /* override default */
10600 	/* Override with driver methods. */
10601 	ivp->iv_newstate = vap->iv_newstate;
10602 	vap->iv_newstate = iwx_newstate;
10603 
10604 	ivp->id = IWX_DEFAULT_MACID;
10605 	ivp->color = IWX_DEFAULT_COLOR;
10606 
10607 	ivp->have_wme = TRUE;
10608 	ivp->ps_disabled = FALSE;
10609 
10610 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10611 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10612 
10613 	/* h/w crypto support */
10614 	vap->iv_key_alloc = iwx_key_alloc;
10615 	vap->iv_key_delete = iwx_key_delete;
10616 	vap->iv_key_set = iwx_key_set;
10617 	vap->iv_key_update_begin = iwx_key_update_begin;
10618 	vap->iv_key_update_end = iwx_key_update_end;
10619 
10620 	ieee80211_ratectl_init(vap);
10621 	/* Complete setup. */
10622 	ieee80211_vap_attach(vap, ieee80211_media_change,
10623 	    ieee80211_media_status, mac);
10624 	ic->ic_opmode = opmode;
10625 
10626 	return vap;
10627 }
10628 
10629 static void
10630 iwx_vap_delete(struct ieee80211vap *vap)
10631 {
10632 	struct iwx_vap *ivp = IWX_VAP(vap);
10633 
10634 	ieee80211_ratectl_deinit(vap);
10635 	ieee80211_vap_detach(vap);
10636 	free(ivp, M_80211_VAP);
10637 }
10638 
10639 static void
10640 iwx_parent(struct ieee80211com *ic)
10641 {
10642 	struct iwx_softc *sc = ic->ic_softc;
10643 	IWX_LOCK(sc);
10644 
10645 	if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10646 		iwx_stop(sc);
10647 		sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10648 	} else {
10649 		iwx_init(sc);
10650 		ieee80211_start_all(ic);
10651 	}
10652 	IWX_UNLOCK(sc);
10653 }
10654 
10655 static int
10656 iwx_suspend(device_t dev)
10657 {
10658 	struct iwx_softc *sc = device_get_softc(dev);
10659 
10660 	if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10661 		iwx_stop(sc);
10662 		sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10663 	}
10664 	return (0);
10665 }
10666 
10667 static int
10668 iwx_resume(device_t dev)
10669 {
10670 	struct iwx_softc *sc = device_get_softc(dev);
10671 	int err;
10672 
10673 	err = iwx_start_hw(sc);
10674 	if (err) {
10675 		return err;
10676 	}
10677 
10678 	err = iwx_init_hw(sc);
10679 	if (err) {
10680 		iwx_stop_device(sc);
10681 		return err;
10682 	}
10683 
10684 	ieee80211_start_all(&sc->sc_ic);
10685 
10686 	return (0);
10687 }
10688 
10689 static void
10690 iwx_scan_start(struct ieee80211com *ic)
10691 {
10692 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10693 	struct iwx_softc *sc = ic->ic_softc;
10694 	int err;
10695 
10696 	IWX_LOCK(sc);
10697 	if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10698 		err = iwx_scan(sc);
10699 	else
10700 		err = iwx_bgscan(ic);
10701 	IWX_UNLOCK(sc);
10702 	if (err)
10703 		ieee80211_cancel_scan(vap);
10704 
10705 	return;
10706 }
10707 
10708 static void
10709 iwx_update_mcast(struct ieee80211com *ic)
10710 {
10711 }
10712 
10713 static void
10714 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10715 {
10716 }
10717 
10718 static void
10719 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10720 {
10721 }
10722 
10723 static void
10724 iwx_scan_end(struct ieee80211com *ic)
10725 {
10726 	iwx_endscan(ic->ic_softc);
10727 }
10728 
10729 static void
10730 iwx_set_channel(struct ieee80211com *ic)
10731 {
10732 #if 0
10733         struct iwx_softc *sc = ic->ic_softc;
10734         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10735 
10736         IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10737         iwx_phy_ctxt_task((void *)sc);
10738 #endif
10739 }
10740 
10741 static void
10742 iwx_endscan_cb(void *arg, int pending)
10743 {
10744 	struct iwx_softc *sc = arg;
10745 	struct ieee80211com *ic = &sc->sc_ic;
10746 
10747 	DPRINTF(("scan ended\n"));
10748 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10749 }
10750 
10751 static int
10752 iwx_wme_update(struct ieee80211com *ic)
10753 {
10754 	return 0;
10755 }
10756 
10757 static int
10758 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10759     const struct ieee80211_bpf_params *params)
10760 {
10761 	struct ieee80211com *ic = ni->ni_ic;
10762 	struct iwx_softc *sc = ic->ic_softc;
10763 	int err;
10764 
10765 	IWX_LOCK(sc);
10766 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10767 		err = iwx_tx(sc, m, ni);
10768 		IWX_UNLOCK(sc);
10769 		return err;
10770 	} else {
10771 		IWX_UNLOCK(sc);
10772 		return EIO;
10773 	}
10774 }
10775 
10776 static int
10777 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10778 {
10779 	struct iwx_softc *sc = ic->ic_softc;
10780 	int error;
10781 
10782 	// TODO: mbufq_enqueue in iwm
10783 	// TODO dequeue in iwm_start, counters, locking
10784 	IWX_LOCK(sc);
10785 	error = mbufq_enqueue(&sc->sc_snd, m);
10786 	if (error) {
10787 		IWX_UNLOCK(sc);
10788 		return (error);
10789 	}
10790 
10791 	iwx_start(sc);
10792 	IWX_UNLOCK(sc);
10793 	return (0);
10794 }
10795 
10796 static int
10797 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10798     int baparamset, int batimeout, int baseqctl)
10799 {
10800 	struct ieee80211com *ic = ni->ni_ic;
10801 	struct iwx_softc *sc = ic->ic_softc;
10802 	int tid;
10803 
10804 	tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10805 	sc->ni_rx_ba[tid].ba_winstart =
10806 	    _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10807 	sc->ni_rx_ba[tid].ba_winsize =
10808 	    _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10809 	sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10810 
10811 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10812 	    tid >= IWX_MAX_TID_COUNT)
10813 		return ENOSPC;
10814 
10815 	if (sc->ba_rx.start_tidmask & (1 << tid)) {
10816 		DPRINTF(("%s: tid %d already added\n", __func__, tid));
10817 		return EBUSY;
10818 	}
10819 	DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10820 
10821 	sc->ba_rx.start_tidmask |= (1 << tid);
10822 	DPRINTF(("%s: tid=%i\n", __func__, tid));
10823 	DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10824 	DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10825 	DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10826 
10827 	taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10828 
10829 	// TODO:misha move to ba_task (serialize)
10830 	sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10831 
10832 	return (0);
10833 }
10834 
10835 static void
10836 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10837 {
10838 	return;
10839 }
10840 
10841 static int
10842 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10843     int dialogtoken, int baparamset, int batimeout)
10844 {
10845 	struct iwx_softc *sc = ni->ni_ic->ic_softc;
10846 	int tid;
10847 
10848 	tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10849 	DPRINTF(("%s: tid=%i\n", __func__, tid));
10850 	sc->ba_tx.start_tidmask |= (1 << tid);
10851 	taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10852 	return 0;
10853 }
10854 
10855 
10856 static int
10857 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10858     int code, int baparamset, int batimeout)
10859 {
10860 	return 0;
10861 }
10862 
10863 static void
10864 iwx_key_update_begin(struct ieee80211vap *vap)
10865 {
10866 	return;
10867 }
10868 
10869 static void
10870 iwx_key_update_end(struct ieee80211vap *vap)
10871 {
10872 	return;
10873 }
10874 
10875 static int
10876 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10877 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
10878 {
10879 
10880 	if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
10881 		return 1;
10882 	}
10883 	if (!(&vap->iv_nw_keys[0] <= k &&
10884 	     k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
10885 		/*
10886 		 * Not in the global key table, the driver should handle this
10887 		 * by allocating a slot in the h/w key table/cache.  In
10888 		 * lieu of that return key slot 0 for any unicast key
10889 		 * request.  We disallow the request if this is a group key.
10890 		 * This default policy does the right thing for legacy hardware
10891 		 * with a 4 key table.  It also handles devices that pass
10892 		 * packets through untouched when marked with the WEP bit
10893 		 * and key index 0.
10894 		 */
10895 		if (k->wk_flags & IEEE80211_KEY_GROUP)
10896 			return 0;
10897 		*keyix = 0;	/* NB: use key index 0 for ucast key */
10898 	} else {
10899 		*keyix = ieee80211_crypto_get_key_wepidx(vap, k);
10900 	}
10901 	*rxkeyix = IEEE80211_KEYIX_NONE;	/* XXX maybe *keyix? */
10902 	return 1;
10903 }
10904 
10905 static int
10906 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
10907 {
10908 	struct ieee80211com *ic = vap->iv_ic;
10909 	struct iwx_softc *sc = ic->ic_softc;
10910 	struct iwx_add_sta_key_cmd cmd;
10911 	uint32_t status;
10912 	int err;
10913 	int id;
10914 
10915 	if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
10916 		return 1;
10917 	}
10918 
10919 	IWX_LOCK(sc);
10920 	/*
10921 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
10922 	 * Currently we only implement station mode where 'ni' is always
10923 	 * ic->ic_bss so there is no need to validate arguments beyond this:
10924 	 */
10925 
10926 	memset(&cmd, 0, sizeof(cmd));
10927 
10928 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
10929 		DPRINTF(("%s: adding group key\n", __func__));
10930 	} else {
10931 		DPRINTF(("%s: adding key\n", __func__));
10932 	}
10933 	if (k >= &vap->iv_nw_keys[0] &&
10934 	    k <  &vap->iv_nw_keys[IEEE80211_WEP_NKID])
10935 		id = (k - vap->iv_nw_keys);
10936 	else
10937 		id = (0);
10938 	DPRINTF(("%s: setting keyid=%i\n", __func__, id));
10939 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
10940 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
10941 	    ((id << IWX_STA_KEY_FLG_KEYID_POS) &
10942 	    IWX_STA_KEY_FLG_KEYID_MSK));
10943 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
10944 		cmd.common.key_offset = 1;
10945 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
10946 	} else {
10947 		cmd.common.key_offset = 0;
10948 	}
10949 	memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
10950 	    k->wk_keylen));
10951 	DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
10952 	for (int i=0; i<k->wk_keylen; i++) {
10953 		DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
10954 	}
10955 	cmd.common.sta_id = IWX_STATION_ID;
10956 
10957 	cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
10958 	DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
10959 
10960 	status = IWX_ADD_STA_SUCCESS;
10961 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
10962 	    &status);
10963 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
10964 		err = EIO;
10965 	if (err) {
10966 		printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
10967 		IWX_UNLOCK(sc);
10968 		return err;
10969 	} else
10970 		DPRINTF(("%s: key added successfully\n", __func__));
10971 	IWX_UNLOCK(sc);
10972 	return 1;
10973 }
10974 
10975 static int
10976 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
10977 {
10978 	return 1;
10979 }
10980 
10981 static device_method_t iwx_pci_methods[] = {
10982 	/* Device interface */
10983 	DEVMETHOD(device_probe,		iwx_probe),
10984 	DEVMETHOD(device_attach,	iwx_attach),
10985 	DEVMETHOD(device_detach,	iwx_detach),
10986 	DEVMETHOD(device_suspend,	iwx_suspend),
10987 	DEVMETHOD(device_resume,	iwx_resume),
10988 
10989 	DEVMETHOD_END
10990 };
10991 
10992 static driver_t iwx_pci_driver = {
10993 	"iwx",
10994 	iwx_pci_methods,
10995 	sizeof (struct iwx_softc)
10996 };
10997 
10998 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
10999 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11000     iwx_devices, nitems(iwx_devices));
11001 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11002 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11003 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11004