xref: /freebsd/sys/dev/iwx/if_iwx.c (revision 2ad0f7e91582dde5475ceb1a1942930549e5c628)
1 /*-
2  * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3  */
4 
5 /*	$OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $	*/
6 
7 /*
8  *
9  * Copyright (c) 2025 The FreeBSD Foundation
10  *
11  * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12  * under sponsorship from the FreeBSD Foundation.
13  *
14  * Permission to use, copy, modify, and distribute this software for any
15  * purpose with or without fee is hereby granted, provided that the above
16  * copyright notice and this permission notice appear in all copies.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25  *
26  */
27 
28 /*-
29  * Copyright (c) 2024 Future Crew, LLC
30  *   Author: Mikhail Pchelin <misha@FreeBSD.org>
31  *
32  * Permission to use, copy, modify, and distribute this software for any
33  * purpose with or without fee is hereby granted, provided that the above
34  * copyright notice and this permission notice appear in all copies.
35  *
36  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43  */
44 
45 /*
46  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47  *   Author: Stefan Sperling <stsp@openbsd.org>
48  * Copyright (c) 2014 Fixup Software Ltd.
49  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50  *
51  * Permission to use, copy, modify, and distribute this software for any
52  * purpose with or without fee is hereby granted, provided that the above
53  * copyright notice and this permission notice appear in all copies.
54  *
55  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62  */
63 
64 /*-
65  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66  * which were used as the reference documentation for this implementation.
67  *
68  ******************************************************************************
69  *
70  * This file is provided under a dual BSD/GPLv2 license.  When using or
71  * redistributing this file, you may do so under either license.
72  *
73  * GPL LICENSE SUMMARY
74  *
75  * Copyright(c) 2017 Intel Deutschland GmbH
76  * Copyright(c) 2018 - 2019 Intel Corporation
77  *
78  * This program is free software; you can redistribute it and/or modify
79  * it under the terms of version 2 of the GNU General Public License as
80  * published by the Free Software Foundation.
81  *
82  * This program is distributed in the hope that it will be useful, but
83  * WITHOUT ANY WARRANTY; without even the implied warranty of
84  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
85  * General Public License for more details.
86  *
87  * BSD LICENSE
88  *
89  * Copyright(c) 2017 Intel Deutschland GmbH
90  * Copyright(c) 2018 - 2019 Intel Corporation
91  * All rights reserved.
92  *
93  * Redistribution and use in source and binary forms, with or without
94  * modification, are permitted provided that the following conditions
95  * are met:
96  *
97  *  * Redistributions of source code must retain the above copyright
98  *    notice, this list of conditions and the following disclaimer.
99  *  * Redistributions in binary form must reproduce the above copyright
100  *    notice, this list of conditions and the following disclaimer in
101  *    the documentation and/or other materials provided with the
102  *    distribution.
103  *  * Neither the name Intel Corporation nor the names of its
104  *    contributors may be used to endorse or promote products derived
105  *    from this software without specific prior written permission.
106  *
107  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118  *
119  *****************************************************************************
120  */
121 
122 /*-
123  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124  *
125  * Permission to use, copy, modify, and distribute this software for any
126  * purpose with or without fee is hereby granted, provided that the above
127  * copyright notice and this permission notice appear in all copies.
128  *
129  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136  */
137 
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157 
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161 
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164 
165 #include <net/bpf.h>
166 
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171 
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174 
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180 
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183 
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186 
187 #define DEVNAME(_sc)    (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189 
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192 
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195 
196 #include <dev/iwx/if_iwx_debug.h>
197 
198 #define PCI_VENDOR_INTEL		0x8086
199 #define	PCI_PRODUCT_INTEL_WL_22500_1	0x2723		/* Wi-Fi 6 AX200 */
200 #define	PCI_PRODUCT_INTEL_WL_22500_2	0x02f0		/* Wi-Fi 6 AX201 */
201 #define	PCI_PRODUCT_INTEL_WL_22500_3	0xa0f0		/* Wi-Fi 6 AX201 */
202 #define	PCI_PRODUCT_INTEL_WL_22500_4	0x34f0		/* Wi-Fi 6 AX201 */
203 #define	PCI_PRODUCT_INTEL_WL_22500_5	0x06f0		/* Wi-Fi 6 AX201 */
204 #define	PCI_PRODUCT_INTEL_WL_22500_6	0x43f0		/* Wi-Fi 6 AX201 */
205 #define	PCI_PRODUCT_INTEL_WL_22500_7	0x3df0		/* Wi-Fi 6 AX201 */
206 #define	PCI_PRODUCT_INTEL_WL_22500_8	0x4df0		/* Wi-Fi 6 AX201 */
207 #define	PCI_PRODUCT_INTEL_WL_22500_9	0x2725		/* Wi-Fi 6 AX210 */
208 #define	PCI_PRODUCT_INTEL_WL_22500_10	0x2726		/* Wi-Fi 6 AX211 */
209 #define	PCI_PRODUCT_INTEL_WL_22500_11	0x51f0		/* Wi-Fi 6 AX211 */
210 #define	PCI_PRODUCT_INTEL_WL_22500_12	0x7a70		/* Wi-Fi 6 AX211 */
211 #define	PCI_PRODUCT_INTEL_WL_22500_13	0x7af0		/* Wi-Fi 6 AX211 */
212 #define	PCI_PRODUCT_INTEL_WL_22500_14	0x7e40		/* Wi-Fi 6 AX210 */
213 #define	PCI_PRODUCT_INTEL_WL_22500_15	0x7f70		/* Wi-Fi 6 AX211 */
214 #define	PCI_PRODUCT_INTEL_WL_22500_16	0x54f0		/* Wi-Fi 6 AX211 */
215 #define	PCI_PRODUCT_INTEL_WL_22500_17	0x51f1		/* Wi-Fi 6 AX211 */
216 
217 static const struct iwx_devices {
218 	uint16_t		device;
219 	char			*name;
220 } iwx_devices[] = {
221 	{ PCI_PRODUCT_INTEL_WL_22500_1,		"Wi-Fi 6 AX200"	},
222 	{ PCI_PRODUCT_INTEL_WL_22500_2,		"Wi-Fi 6 AX201"	},
223 	{ PCI_PRODUCT_INTEL_WL_22500_3,		"Wi-Fi 6 AX201"	},
224 	{ PCI_PRODUCT_INTEL_WL_22500_4,		"Wi-Fi 6 AX201"	},
225 	{ PCI_PRODUCT_INTEL_WL_22500_5,		"Wi-Fi 6 AX201"	},
226 	{ PCI_PRODUCT_INTEL_WL_22500_6,		"Wi-Fi 6 AX201"	},
227 	{ PCI_PRODUCT_INTEL_WL_22500_7,		"Wi-Fi 6 AX201"	},
228 	{ PCI_PRODUCT_INTEL_WL_22500_8,		"Wi-Fi 6 AX201"	},
229 	{ PCI_PRODUCT_INTEL_WL_22500_9,		"Wi-Fi 6 AX210"	},
230 	{ PCI_PRODUCT_INTEL_WL_22500_10,	"Wi-Fi 6 AX211"	},
231 	{ PCI_PRODUCT_INTEL_WL_22500_11,	"Wi-Fi 6 AX211"	},
232 	{ PCI_PRODUCT_INTEL_WL_22500_12,	"Wi-Fi 6 AX211"	},
233 	{ PCI_PRODUCT_INTEL_WL_22500_13,	"Wi-Fi 6 AX211"	},
234 	{ PCI_PRODUCT_INTEL_WL_22500_14,	"Wi-Fi 6 AX210"	},
235 	{ PCI_PRODUCT_INTEL_WL_22500_15,	"Wi-Fi 6 AX211"	},
236 	{ PCI_PRODUCT_INTEL_WL_22500_16,	"Wi-Fi 6 AX211"	},
237 	{ PCI_PRODUCT_INTEL_WL_22500_17,	"Wi-Fi 6 AX211"	},
238 };
239 
240 static const uint8_t iwx_nvm_channels_8000[] = {
241 	/* 2.4 GHz */
242 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
243 	/* 5 GHz */
244 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
245 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
246 	149, 153, 157, 161, 165, 169, 173, 177, 181
247 };
248 
249 static const uint8_t iwx_nvm_channels_uhb[] = {
250 	/* 2.4 GHz */
251 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
252 	/* 5 GHz */
253 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
254 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
255 	149, 153, 157, 161, 165, 169, 173, 177, 181,
256 	/* 6-7 GHz */
257 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
258 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
259 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
260 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
261 };
262 
263 #define IWX_NUM_2GHZ_CHANNELS	14
264 #define IWX_NUM_5GHZ_CHANNELS	37
265 
266 const struct iwx_rate {
267 	uint16_t rate;
268 	uint8_t plcp;
269 	uint8_t ht_plcp;
270 } iwx_rates[] = {
271 		/* Legacy */		/* HT */
272 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
273 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
274 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
275 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
277 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
278 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
279 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
280 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
281 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
282 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
283 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
284 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
285 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
286 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
287 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
288 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
289 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
290 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
291 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
292 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
293 };
294 #define IWX_RIDX_CCK	0
295 #define IWX_RIDX_OFDM	4
296 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
297 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
298 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
299 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
300 
301 /* Convert an MCS index into an iwx_rates[] index. */
302 const int iwx_mcs2ridx[] = {
303 	IWX_RATE_MCS_0_INDEX,
304 	IWX_RATE_MCS_1_INDEX,
305 	IWX_RATE_MCS_2_INDEX,
306 	IWX_RATE_MCS_3_INDEX,
307 	IWX_RATE_MCS_4_INDEX,
308 	IWX_RATE_MCS_5_INDEX,
309 	IWX_RATE_MCS_6_INDEX,
310 	IWX_RATE_MCS_7_INDEX,
311 	IWX_RATE_MCS_8_INDEX,
312 	IWX_RATE_MCS_9_INDEX,
313 	IWX_RATE_MCS_10_INDEX,
314 	IWX_RATE_MCS_11_INDEX,
315 	IWX_RATE_MCS_12_INDEX,
316 	IWX_RATE_MCS_13_INDEX,
317 	IWX_RATE_MCS_14_INDEX,
318 	IWX_RATE_MCS_15_INDEX,
319 };
320 
321 static uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
322 static uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
323 static int	iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
324 #if 0
325 static int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
326 static int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
327 #endif
328 static int	iwx_apply_debug_destination(struct iwx_softc *);
329 static void	iwx_set_ltr(struct iwx_softc *);
330 static int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
331 static int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
332 	    const struct iwx_fw_sects *);
333 static void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
334 static void	iwx_ctxt_info_free_paging(struct iwx_softc *);
335 static int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
336 	    struct iwx_context_info_dram *);
337 static void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
338 static int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
339 	    const uint8_t *, size_t);
340 static int	iwx_set_default_calib(struct iwx_softc *, const void *);
341 static void	iwx_fw_info_free(struct iwx_fw_info *);
342 static int	iwx_read_firmware(struct iwx_softc *);
343 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
344 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
345 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
346 static void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
347 static void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
348 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
349 static void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
351 static int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
352 static int	iwx_nic_lock(struct iwx_softc *);
353 static void	iwx_nic_assert_locked(struct iwx_softc *);
354 static void	iwx_nic_unlock(struct iwx_softc *);
355 static int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
356 	    uint32_t);
357 static int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
358 static int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
359 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
360 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
361     bus_size_t, bus_size_t);
362 static void iwx_dma_contig_free(struct iwx_dma_info *);
363 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
364 static void	iwx_disable_rx_dma(struct iwx_softc *);
365 static void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
367 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
368 static void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
369 static void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
370 static void	iwx_enable_rfkill_int(struct iwx_softc *);
371 static int	iwx_check_rfkill(struct iwx_softc *);
372 static void	iwx_enable_interrupts(struct iwx_softc *);
373 static void	iwx_enable_fwload_interrupt(struct iwx_softc *);
374 #if 0
375 static void	iwx_restore_interrupts(struct iwx_softc *);
376 #endif
377 static void	iwx_disable_interrupts(struct iwx_softc *);
378 static void	iwx_ict_reset(struct iwx_softc *);
379 static int	iwx_set_hw_ready(struct iwx_softc *);
380 static int	iwx_prepare_card_hw(struct iwx_softc *);
381 static int	iwx_force_power_gating(struct iwx_softc *);
382 static void	iwx_apm_config(struct iwx_softc *);
383 static int	iwx_apm_init(struct iwx_softc *);
384 static void	iwx_apm_stop(struct iwx_softc *);
385 static int	iwx_allow_mcast(struct iwx_softc *);
386 static void	iwx_init_msix_hw(struct iwx_softc *);
387 static void	iwx_conf_msix_hw(struct iwx_softc *, int);
388 static int	iwx_clear_persistence_bit(struct iwx_softc *);
389 static int	iwx_start_hw(struct iwx_softc *);
390 static void	iwx_stop_device(struct iwx_softc *);
391 static void	iwx_nic_config(struct iwx_softc *);
392 static int	iwx_nic_rx_init(struct iwx_softc *);
393 static int	iwx_nic_init(struct iwx_softc *);
394 static int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
395 static int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
396 static void	iwx_post_alive(struct iwx_softc *);
397 static int	iwx_schedule_session_protection(struct iwx_softc *,
398     struct iwx_node *, uint32_t);
399 static void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
400 static void	iwx_init_channel_map(struct ieee80211com *, int, int *,
401     struct ieee80211_channel[]);
402 static int	iwx_mimo_enabled(struct iwx_softc *);
403 static void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
404 	    uint16_t);
405 static void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
406 static void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
407 	    uint16_t, uint16_t, int, int);
408 static void	iwx_sta_tx_agg_start(struct iwx_softc *,
409     struct ieee80211_node *, uint8_t);
410 static void	iwx_ba_rx_task(void *, int);
411 static void	iwx_ba_tx_task(void *, int);
412 static void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
413 static int	iwx_is_valid_mac_addr(const uint8_t *);
414 static void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
415 static int	iwx_nvm_get(struct iwx_softc *);
416 static int	iwx_load_firmware(struct iwx_softc *);
417 static int	iwx_start_fw(struct iwx_softc *);
418 static int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
419 static int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
420 static void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
421 static int	iwx_load_pnvm(struct iwx_softc *);
422 static int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
423 static int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
424 static int	iwx_load_ucode_wait_alive(struct iwx_softc *);
425 static int	iwx_send_dqa_cmd(struct iwx_softc *);
426 static int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
427 static int	iwx_config_ltr(struct iwx_softc *);
428 static void 	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
429 static int 	iwx_rx_addbuf(struct iwx_softc *, int, int);
430 static int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
431 static void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
432     struct iwx_rx_data *);
433 static int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
434 static int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
435 #if 0
436 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
437 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
438 #endif
439 static void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
440     int, int, uint32_t, uint8_t);
441 static void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
442 static void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
443     struct iwx_tx_data *);
444 static void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
445 static void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
446 	    struct iwx_rx_data *);
447 static void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
448 static void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
449     struct iwx_rx_data *);
450 static int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
451 static uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
452 static int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
453     struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
454 #if 0
455 static int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
456     uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
457 #endif
458 static int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
459     uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
460 static int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
461 static int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
462 	    const void *);
463 static int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
464 	    uint32_t *);
465 static int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
466 	    const void *, uint32_t *);
467 static void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
468 static void	iwx_cmd_done(struct iwx_softc *, int, int, int);
469 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
470 static uint32_t iwx_fw_rateidx_cck(uint8_t);
471 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
472     struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
473     struct mbuf *);
474 static void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
475 	    uint16_t, uint16_t);
476 static int	iwx_tx(struct iwx_softc *, struct mbuf *,
477     struct ieee80211_node *);
478 static int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
479 static int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
480 static int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
481 static int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
482 	    struct iwx_beacon_filter_cmd *);
483 static int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
484     int);
485 static void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
486 	    struct iwx_mac_power_cmd *);
487 static int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
488 static int	iwx_power_update_device(struct iwx_softc *);
489 #if 0
490 static int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
491 #endif
492 static int	iwx_disable_beacon_filter(struct iwx_softc *);
493 static int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
494 static int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
495 static int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
496 static int	iwx_fill_probe_req(struct iwx_softc *,
497     struct iwx_scan_probe_req *);
498 static int	iwx_config_umac_scan_reduced(struct iwx_softc *);
499 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
500 static void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
501 	    struct iwx_scan_general_params_v10 *, int);
502 static void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
503 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
504 static void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
505 	    struct iwx_scan_channel_params_v6 *, uint32_t, int);
506 static int	iwx_umac_scan_v14(struct iwx_softc *, int);
507 static void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
508 static uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
509 static int	iwx_rval2ridx(int);
510 static void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
511     int *);
512 static void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
513 	    struct iwx_mac_ctx_cmd *, uint32_t);
514 static void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
515 	    struct iwx_mac_data_sta *, int);
516 static int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
517     uint32_t, int);
518 static int	iwx_clear_statistics(struct iwx_softc *);
519 static int	iwx_scan(struct iwx_softc *);
520 static int	iwx_bgscan(struct ieee80211com *);
521 static int	iwx_enable_mgmt_queue(struct iwx_softc *);
522 static int	iwx_disable_mgmt_queue(struct iwx_softc *);
523 static int	iwx_rs_rval2idx(uint8_t);
524 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
525     int);
526 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
527 static int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
528 static int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
529 static int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
530 static int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
531 	    uint8_t, uint8_t);
532 static int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
533 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
534 	    uint8_t);
535 static int	iwx_auth(struct ieee80211vap *, struct iwx_softc *);
536 static int	iwx_deauth(struct iwx_softc *);
537 static int	iwx_run(struct ieee80211vap *, struct iwx_softc *);
538 static int	iwx_run_stop(struct iwx_softc *);
539 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
540     const uint8_t[IEEE80211_ADDR_LEN]);
541 #if 0
542 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
543 	    struct ieee80211_key *);
544 void	iwx_setkey_task(void *);
545 void	iwx_delete_key(struct ieee80211com *,
546 	    struct ieee80211_node *, struct ieee80211_key *);
547 #endif
548 static int	iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
549 static void	iwx_endscan(struct iwx_softc *);
550 static void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
551 	    struct ieee80211_node *);
552 static int	iwx_sf_config(struct iwx_softc *, int);
553 static int	iwx_send_bt_init_conf(struct iwx_softc *);
554 static int	iwx_send_soc_conf(struct iwx_softc *);
555 static int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
556 static int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
557 static int	iwx_init_hw(struct iwx_softc *);
558 static int	iwx_init(struct iwx_softc *);
559 static void	iwx_stop(struct iwx_softc *);
560 static void	iwx_watchdog(void *);
561 static const char *iwx_desc_lookup(uint32_t);
562 static void	iwx_nic_error(struct iwx_softc *);
563 static void	iwx_dump_driver_status(struct iwx_softc *);
564 static void	iwx_nic_umac_error(struct iwx_softc *);
565 static void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
566 static int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
567 static void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
568 	    struct mbuf *);
569 static void	iwx_notif_intr(struct iwx_softc *);
570 #if 0
571 /* XXX-THJ - I don't have hardware for this */
572 static int	iwx_intr(void *);
573 #endif
574 static void	iwx_intr_msix(void *);
575 static int	iwx_preinit(struct iwx_softc *);
576 static void	iwx_attach_hook(void *);
577 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
578 static int	iwx_probe(device_t);
579 static int	iwx_attach(device_t);
580 static int	iwx_detach(device_t);
581 
582 /* FreeBSD specific glue */
583 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
584     { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
585 
586 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
587     { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
588 
589 #if IWX_DEBUG
590 #define DPRINTF(x)	do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
591 #else
592 #define DPRINTF(x)	do { ; } while (0)
593 #endif
594 
595 /* FreeBSD specific functions */
596 static struct	ieee80211vap * iwx_vap_create(struct ieee80211com *,
597     const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
598     const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
599 static void	iwx_vap_delete(struct ieee80211vap *);
600 static void	iwx_parent(struct ieee80211com *);
601 static void	iwx_scan_start(struct ieee80211com *);
602 static void	iwx_scan_end(struct ieee80211com *);
603 static void	iwx_update_mcast(struct ieee80211com *ic);
604 static void	iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
605 static void	iwx_scan_mindwell(struct ieee80211_scan_state *);
606 static void	iwx_set_channel(struct ieee80211com *);
607 static void	iwx_endscan_cb(void *, int );
608 static int	iwx_wme_update(struct ieee80211com *);
609 static int	iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
610     const struct ieee80211_bpf_params *);
611 static int	iwx_transmit(struct ieee80211com *, struct mbuf *);
612 static void	iwx_start(struct iwx_softc *);
613 static int	iwx_ampdu_rx_start(struct ieee80211_node *,
614     struct ieee80211_rx_ampdu *, int, int, int);
615 static void	iwx_ampdu_rx_stop(struct ieee80211_node *,
616     struct ieee80211_rx_ampdu *);
617 static int	iwx_addba_request(struct ieee80211_node *,
618     struct ieee80211_tx_ampdu *, int, int, int);
619 static int	iwx_addba_response(struct ieee80211_node *,
620     struct ieee80211_tx_ampdu *, int, int, int);
621 static void	iwx_key_update_begin(struct ieee80211vap *);
622 static void	iwx_key_update_end(struct ieee80211vap *);
623 static int	iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
624     ieee80211_keyix *,ieee80211_keyix *);
625 static int	iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
626 static int	iwx_key_delete(struct ieee80211vap *,
627     const struct ieee80211_key *);
628 static int	iwx_suspend(device_t);
629 static int	iwx_resume(device_t);
630 static void	iwx_radiotap_attach(struct iwx_softc *);
631 
632 /* OpenBSD compat defines */
633 #define IEEE80211_HTOP0_SCO_SCN 0
634 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
635 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
636 
637 #define IEEE80211_HT_RATESET_SISO 0
638 #define IEEE80211_HT_RATESET_MIMO2 2
639 
640 const struct ieee80211_rateset ieee80211_std_rateset_11a =
641 	{ 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
642 
643 const struct ieee80211_rateset ieee80211_std_rateset_11b =
644 	{ 4, { 2, 4, 11, 22 } };
645 
646 const struct ieee80211_rateset ieee80211_std_rateset_11g =
647 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
648 
649 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)650 ieee80211_has_addr4(const struct ieee80211_frame *wh)
651 {
652 	return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
653 	    IEEE80211_FC1_DIR_DSTODS;
654 }
655 
656 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)657 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
658 {
659 	const struct iwx_fw_cmd_version *entry;
660 	int i;
661 
662 	for (i = 0; i < sc->n_cmd_versions; i++) {
663 		entry = &sc->cmd_versions[i];
664 		if (entry->group == grp && entry->cmd == cmd)
665 			return entry->cmd_ver;
666 	}
667 
668 	return IWX_FW_CMD_VER_UNKNOWN;
669 }
670 
671 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)672 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
673 {
674 	const struct iwx_fw_cmd_version *entry;
675 	int i;
676 
677 	for (i = 0; i < sc->n_cmd_versions; i++) {
678 		entry = &sc->cmd_versions[i];
679 		if (entry->group == grp && entry->cmd == cmd)
680 			return entry->notif_ver;
681 	}
682 
683 	return IWX_FW_CMD_VER_UNKNOWN;
684 }
685 
686 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)687 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
688 {
689 	const struct iwx_fw_cscheme_list *l = (const void *)data;
690 
691 	if (dlen < sizeof(*l) ||
692 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
693 		return EINVAL;
694 
695 	/* we don't actually store anything for now, always use s/w crypto */
696 
697 	return 0;
698 }
699 
700 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)701 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
702     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
703 {
704 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
705 	if (err) {
706 		printf("%s: could not allocate context info DMA memory\n",
707 		    DEVNAME(sc));
708 		return err;
709 	}
710 
711 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
712 
713 	return 0;
714 }
715 
716 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)717 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
718 {
719 	struct iwx_self_init_dram *dram = &sc->init_dram;
720 	int i;
721 
722 	if (!dram->paging)
723 		return;
724 
725 	/* free paging*/
726 	for (i = 0; i < dram->paging_cnt; i++)
727 		iwx_dma_contig_free(&dram->paging[i]);
728 
729 	free(dram->paging, M_DEVBUF);
730 	dram->paging_cnt = 0;
731 	dram->paging = NULL;
732 }
733 
734 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)735 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
736 {
737 	int i = 0;
738 
739 	while (start < fws->fw_count &&
740 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
741 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
742 		start++;
743 		i++;
744 	}
745 
746 	return i;
747 }
748 
749 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)750 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
751     struct iwx_context_info_dram *ctxt_dram)
752 {
753 	struct iwx_self_init_dram *dram = &sc->init_dram;
754 	int i, ret, fw_cnt = 0;
755 
756 	KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
757 
758 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
759 	/* add 1 due to separator */
760 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
761 	/* add 2 due to separators */
762 	dram->paging_cnt = iwx_get_num_sections(fws,
763 	    dram->lmac_cnt + dram->umac_cnt + 2);
764 
765 	IWX_UNLOCK(sc);
766 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
767 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
768 	if (!dram->fw) {
769 		printf("%s: could not allocate memory for firmware sections\n",
770 		    DEVNAME(sc));
771 		IWX_LOCK(sc);
772 		return ENOMEM;
773 	}
774 
775 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
776 	    M_DEVBUF, M_ZERO | M_WAITOK);
777 	IWX_LOCK(sc);
778 	if (!dram->paging) {
779 		printf("%s: could not allocate memory for firmware paging\n",
780 		    DEVNAME(sc));
781 		return ENOMEM;
782 	}
783 
784 	/* initialize lmac sections */
785 	for (i = 0; i < dram->lmac_cnt; i++) {
786 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
787 						   &dram->fw[fw_cnt]);
788 		if (ret)
789 			return ret;
790 		ctxt_dram->lmac_img[i] =
791 			htole64(dram->fw[fw_cnt].paddr);
792 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
793 		    "%s: firmware LMAC section %d at 0x%llx size %lld\n",
794 		    __func__, i,
795 		    (unsigned long long)dram->fw[fw_cnt].paddr,
796 		    (unsigned long long)dram->fw[fw_cnt].size);
797 		fw_cnt++;
798 	}
799 
800 	/* initialize umac sections */
801 	for (i = 0; i < dram->umac_cnt; i++) {
802 		/* access FW with +1 to make up for lmac separator */
803 		ret = iwx_ctxt_info_alloc_dma(sc,
804 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
805 		if (ret)
806 			return ret;
807 		ctxt_dram->umac_img[i] =
808 			htole64(dram->fw[fw_cnt].paddr);
809 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
810 		    "%s: firmware UMAC section %d at 0x%llx size %lld\n",
811 		    __func__, i,
812 		    (unsigned long long)dram->fw[fw_cnt].paddr,
813 		    (unsigned long long)dram->fw[fw_cnt].size);
814 		fw_cnt++;
815 	}
816 
817 	/*
818 	 * Initialize paging.
819 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
820 	 * stored separately.
821 	 * This is since the timing of its release is different -
822 	 * while fw memory can be released on alive, the paging memory can be
823 	 * freed only when the device goes down.
824 	 * Given that, the logic here in accessing the fw image is a bit
825 	 * different - fw_cnt isn't changing so loop counter is added to it.
826 	 */
827 	for (i = 0; i < dram->paging_cnt; i++) {
828 		/* access FW with +2 to make up for lmac & umac separators */
829 		int fw_idx = fw_cnt + i + 2;
830 
831 		ret = iwx_ctxt_info_alloc_dma(sc,
832 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
833 		if (ret)
834 			return ret;
835 
836 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
837 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
838 		    "%s: firmware paging section %d at 0x%llx size %lld\n",
839 		    __func__, i,
840 		    (unsigned long long)dram->paging[i].paddr,
841 		    (unsigned long long)dram->paging[i].size);
842 	}
843 
844 	return 0;
845 }
846 
847 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)848 iwx_fw_version_str(char *buf, size_t bufsize,
849     uint32_t major, uint32_t minor, uint32_t api)
850 {
851 	/*
852 	 * Starting with major version 35 the Linux driver prints the minor
853 	 * version in hexadecimal.
854 	 */
855 	if (major >= 35)
856 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
857 	else
858 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
859 }
860 #if 0
861 static int
862 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
863     uint8_t min_power)
864 {
865 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
866 	uint32_t size = 0;
867 	uint8_t power;
868 	int err;
869 
870 	if (fw_mon->size)
871 		return 0;
872 
873 	for (power = max_power; power >= min_power; power--) {
874 		size = (1 << power);
875 
876 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
877 		if (err)
878 			continue;
879 
880 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
881 		    "%s: allocated 0x%08x bytes for firmware monitor.\n",
882 		    DEVNAME(sc), size);
883 		break;
884 	}
885 
886 	if (err) {
887 		fw_mon->size = 0;
888 		return err;
889 	}
890 
891 	if (power != max_power)
892 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
893 		    "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
894 		    DEVNAME(sc), (unsigned long)(1 << (power - 10)),
895 		    (unsigned long)(1 << (max_power - 10)));
896 
897 	return 0;
898 }
899 
900 static int
901 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
902 {
903 	if (!max_power) {
904 		/* default max_power is maximum */
905 		max_power = 26;
906 	} else {
907 		max_power += 11;
908 	}
909 
910 	if (max_power > 26) {
911 		 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
912 		     "%s: External buffer size for monitor is too big %d, "
913 		     "check the FW TLV\n", DEVNAME(sc), max_power);
914 		return 0;
915 	}
916 
917 	if (sc->fw_mon.size)
918 		return 0;
919 
920 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
921 }
922 #endif
923 
924 static int
iwx_apply_debug_destination(struct iwx_softc * sc)925 iwx_apply_debug_destination(struct iwx_softc *sc)
926 {
927 #if 0
928 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
929 	int i, err;
930 	uint8_t mon_mode, size_power, base_shift, end_shift;
931 	uint32_t base_reg, end_reg;
932 
933 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
934 	mon_mode = dest_v1->monitor_mode;
935 	size_power = dest_v1->size_power;
936 	base_reg = le32toh(dest_v1->base_reg);
937 	end_reg = le32toh(dest_v1->end_reg);
938 	base_shift = dest_v1->base_shift;
939 	end_shift = dest_v1->end_shift;
940 
941 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
942 
943 	if (mon_mode == EXTERNAL_MODE) {
944 		err = iwx_alloc_fw_monitor(sc, size_power);
945 		if (err)
946 			return err;
947 	}
948 
949 	if (!iwx_nic_lock(sc))
950 		return EBUSY;
951 
952 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
953 		uint32_t addr, val;
954 		uint8_t op;
955 
956 		addr = le32toh(dest_v1->reg_ops[i].addr);
957 		val = le32toh(dest_v1->reg_ops[i].val);
958 		op = dest_v1->reg_ops[i].op;
959 
960 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
961 		switch (op) {
962 		case CSR_ASSIGN:
963 			IWX_WRITE(sc, addr, val);
964 			break;
965 		case CSR_SETBIT:
966 			IWX_SETBITS(sc, addr, (1 << val));
967 			break;
968 		case CSR_CLEARBIT:
969 			IWX_CLRBITS(sc, addr, (1 << val));
970 			break;
971 		case PRPH_ASSIGN:
972 			iwx_write_prph(sc, addr, val);
973 			break;
974 		case PRPH_SETBIT:
975 			err = iwx_set_bits_prph(sc, addr, (1 << val));
976 			if (err)
977 				return err;
978 			break;
979 		case PRPH_CLEARBIT:
980 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
981 			if (err)
982 				return err;
983 			break;
984 		case PRPH_BLOCKBIT:
985 			if (iwx_read_prph(sc, addr) & (1 << val))
986 				goto monitor;
987 			break;
988 		default:
989 			DPRINTF(("%s: FW debug - unknown OP %d\n",
990 			    DEVNAME(sc), op));
991 			break;
992 		}
993 	}
994 
995 monitor:
996 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
997 		iwx_write_prph(sc, le32toh(base_reg),
998 		    sc->fw_mon.paddr >> base_shift);
999 		iwx_write_prph(sc, end_reg,
1000 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1001 		    >> end_shift);
1002 	}
1003 
1004 	iwx_nic_unlock(sc);
1005 	return 0;
1006 #else
1007 	return 0;
1008 #endif
1009 }
1010 
1011 static void
iwx_set_ltr(struct iwx_softc * sc)1012 iwx_set_ltr(struct iwx_softc *sc)
1013 {
1014 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1015 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1016 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1017 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1018 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1019 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1020 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1021 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1022 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1023 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1024 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1025 
1026 	/*
1027 	 * To workaround hardware latency issues during the boot process,
1028 	 * initialize the LTR to ~250 usec (see ltr_val above).
1029 	 * The firmware initializes this again later (to a smaller value).
1030 	 */
1031 	if (!sc->sc_integrated) {
1032 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1033 	} else if (sc->sc_integrated &&
1034 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1035 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1036 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
1037 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1038 	}
1039 }
1040 
1041 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1042 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1043 {
1044 	struct iwx_context_info *ctxt_info;
1045 	struct iwx_context_info_rbd_cfg *rx_cfg;
1046 	uint32_t control_flags = 0;
1047 	uint64_t paddr;
1048 	int err;
1049 
1050 	ctxt_info = sc->ctxt_info_dma.vaddr;
1051 	memset(ctxt_info, 0, sizeof(*ctxt_info));
1052 
1053 	ctxt_info->version.version = 0;
1054 	ctxt_info->version.mac_id =
1055 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1056 	/* size is in DWs */
1057 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1058 
1059 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1060 	    ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1061 
1062 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1063 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1064 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1065 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1066 	ctxt_info->control.control_flags = htole32(control_flags);
1067 
1068 	/* initialize RX default queue */
1069 	rx_cfg = &ctxt_info->rbd_cfg;
1070 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1071 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1072 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1073 
1074 	/* initialize TX command queue */
1075 	ctxt_info->hcmd_cfg.cmd_queue_addr =
1076 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1077 	ctxt_info->hcmd_cfg.cmd_queue_size =
1078 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1079 
1080 	/* allocate ucode sections in dram and set addresses */
1081 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1082 	if (err) {
1083 		iwx_ctxt_info_free_fw_img(sc);
1084 		return err;
1085 	}
1086 
1087 	/* Configure debug, if exists */
1088 	if (sc->sc_fw.dbg_dest_tlv_v1) {
1089 #if 1
1090 		err = iwx_apply_debug_destination(sc);
1091 		if (err) {
1092 			iwx_ctxt_info_free_fw_img(sc);
1093 			return err;
1094 		}
1095 #endif
1096 	}
1097 
1098 	/*
1099 	 * Write the context info DMA base address. The device expects a
1100 	 * 64-bit address but a simple bus_space_write_8 to this register
1101 	 * won't work on some devices, such as the AX201.
1102 	 */
1103 	paddr = sc->ctxt_info_dma.paddr;
1104 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1105 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1106 
1107 	/* kick FW self load */
1108 	if (!iwx_nic_lock(sc)) {
1109 		iwx_ctxt_info_free_fw_img(sc);
1110 		return EBUSY;
1111 	}
1112 
1113 	iwx_set_ltr(sc);
1114 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1115 	iwx_nic_unlock(sc);
1116 
1117 	/* Context info will be released upon alive or failure to get one */
1118 
1119 	return 0;
1120 }
1121 
1122 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1123 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1124 {
1125 	struct iwx_context_info_gen3 *ctxt_info_gen3;
1126 	struct iwx_prph_scratch *prph_scratch;
1127 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1128 	uint16_t cb_size;
1129 	uint32_t control_flags, scratch_size;
1130 	uint64_t paddr;
1131 	int err;
1132 
1133 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1134 		printf("%s: no image loader found in firmware file\n",
1135 		    DEVNAME(sc));
1136 		iwx_ctxt_info_free_fw_img(sc);
1137 		return EINVAL;
1138 	}
1139 
1140 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1141 	    sc->sc_fw.iml_len, 1);
1142 	if (err) {
1143 		printf("%s: could not allocate DMA memory for "
1144 		    "firmware image loader\n", DEVNAME(sc));
1145 		iwx_ctxt_info_free_fw_img(sc);
1146 		return ENOMEM;
1147 	}
1148 
1149 	prph_scratch = sc->prph_scratch_dma.vaddr;
1150 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1151 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1152 	prph_sc_ctrl->version.version = 0;
1153 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1154 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1155 
1156 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1157 	    IWX_PRPH_SCRATCH_MTR_MODE |
1158 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1159 	if (sc->sc_imr_enabled)
1160 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1161 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1162 
1163 	/* initialize RX default queue */
1164 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1165 	    htole64(sc->rxq.free_desc_dma.paddr);
1166 
1167 	/* allocate ucode sections in dram and set addresses */
1168 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1169 	if (err) {
1170 		iwx_dma_contig_free(&sc->iml_dma);
1171 		iwx_ctxt_info_free_fw_img(sc);
1172 		return err;
1173 	}
1174 
1175 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1176 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1177 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1178 	ctxt_info_gen3->prph_scratch_base_addr =
1179 	    htole64(sc->prph_scratch_dma.paddr);
1180 	scratch_size = sizeof(*prph_scratch);
1181 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1182 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1183 	    htole64(sc->rxq.stat_dma.paddr);
1184 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1185 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1186 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1187 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1188 	ctxt_info_gen3->mtr_base_addr =
1189 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1190 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1191 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1192 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1193 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1194 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1195 
1196 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1197 
1198 	paddr = sc->ctxt_info_dma.paddr;
1199 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1200 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1201 
1202 	paddr = sc->iml_dma.paddr;
1203 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1204 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1205 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1206 
1207 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1208 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1209 
1210 	IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1211 	    "%s:%d kicking fw to get going\n", __func__, __LINE__);
1212 
1213 	/* kick FW self load */
1214 	if (!iwx_nic_lock(sc)) {
1215 		iwx_dma_contig_free(&sc->iml_dma);
1216 		iwx_ctxt_info_free_fw_img(sc);
1217 		return EBUSY;
1218 	}
1219 	iwx_set_ltr(sc);
1220 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1221 	iwx_nic_unlock(sc);
1222 
1223 	/* Context info will be released upon alive or failure to get one */
1224 	return 0;
1225 }
1226 
1227 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1228 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1229 {
1230 	struct iwx_self_init_dram *dram = &sc->init_dram;
1231 	int i;
1232 
1233 	if (!dram->fw)
1234 		return;
1235 
1236 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1237 		iwx_dma_contig_free(&dram->fw[i]);
1238 
1239 	free(dram->fw, M_DEVBUF);
1240 	dram->lmac_cnt = 0;
1241 	dram->umac_cnt = 0;
1242 	dram->fw = NULL;
1243 }
1244 
1245 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1246 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1247     const uint8_t *data, size_t dlen)
1248 {
1249 	struct iwx_fw_sects *fws;
1250 	struct iwx_fw_onesect *fwone;
1251 
1252 	if (type >= IWX_UCODE_TYPE_MAX)
1253 		return EINVAL;
1254 	if (dlen < sizeof(uint32_t))
1255 		return EINVAL;
1256 
1257 	fws = &sc->sc_fw.fw_sects[type];
1258 	IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1259 	    "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1260 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1261 		return EINVAL;
1262 
1263 	fwone = &fws->fw_sect[fws->fw_count];
1264 
1265 	/* first 32bit are device load offset */
1266 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1267 
1268 	/* rest is data */
1269 	fwone->fws_data = data + sizeof(uint32_t);
1270 	fwone->fws_len = dlen - sizeof(uint32_t);
1271 
1272 	fws->fw_count++;
1273 	fws->fw_totlen += fwone->fws_len;
1274 
1275 	return 0;
1276 }
1277 
1278 #define IWX_DEFAULT_SCAN_CHANNELS	40
1279 /* Newer firmware might support more channels. Raise this value if needed. */
1280 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1281 
1282 struct iwx_tlv_calib_data {
1283 	uint32_t ucode_type;
1284 	struct iwx_tlv_calib_ctrl calib;
1285 } __packed;
1286 
1287 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1288 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1289 {
1290 	const struct iwx_tlv_calib_data *def_calib = data;
1291 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1292 
1293 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1294 		return EINVAL;
1295 
1296 	sc->sc_default_calib[ucode_type].flow_trigger =
1297 	    def_calib->calib.flow_trigger;
1298 	sc->sc_default_calib[ucode_type].event_trigger =
1299 	    def_calib->calib.event_trigger;
1300 
1301 	return 0;
1302 }
1303 
1304 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1305 iwx_fw_info_free(struct iwx_fw_info *fw)
1306 {
1307 	free(fw->fw_rawdata, M_DEVBUF);
1308 	fw->fw_rawdata = NULL;
1309 	fw->fw_rawsize = 0;
1310 	/* don't touch fw->fw_status */
1311 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1312 	free(fw->iml, M_DEVBUF);
1313 	fw->iml = NULL;
1314 	fw->iml_len = 0;
1315 }
1316 
1317 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1318 
1319 static int
iwx_read_firmware(struct iwx_softc * sc)1320 iwx_read_firmware(struct iwx_softc *sc)
1321 {
1322 	struct iwx_fw_info *fw = &sc->sc_fw;
1323 	const struct iwx_tlv_ucode_header *uhdr;
1324 	struct iwx_ucode_tlv tlv;
1325 	uint32_t tlv_type;
1326 	const uint8_t *data;
1327 	int err = 0;
1328 	size_t len;
1329 	const struct firmware *fwp;
1330 
1331 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1332 		return 0;
1333 
1334 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1335 	fwp = firmware_get(sc->sc_fwname);
1336 	sc->sc_fwp = fwp;
1337 
1338 	if (fwp == NULL) {
1339 		printf("%s: could not read firmware %s\n",
1340 		    DEVNAME(sc), sc->sc_fwname);
1341 		err = ENOENT;
1342 		goto out;
1343 	}
1344 
1345 	IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1346 		__func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1347 
1348 
1349 	sc->sc_capaflags = 0;
1350 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1351 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1352 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1353 	sc->n_cmd_versions = 0;
1354 
1355 	uhdr = (const void *)(fwp->data);
1356 	if (*(const uint32_t *)fwp->data != 0
1357 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1358 		printf("%s: invalid firmware %s\n",
1359 		    DEVNAME(sc), sc->sc_fwname);
1360 		err = EINVAL;
1361 		goto out;
1362 	}
1363 
1364 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1365 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1366 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1367 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1368 
1369 	data = uhdr->data;
1370 	len = fwp->datasize - sizeof(*uhdr);
1371 
1372 	while (len >= sizeof(tlv)) {
1373 		size_t tlv_len;
1374 		const void *tlv_data;
1375 
1376 		memcpy(&tlv, data, sizeof(tlv));
1377 		tlv_len = le32toh(tlv.length);
1378 		tlv_type = le32toh(tlv.type);
1379 
1380 		len -= sizeof(tlv);
1381 		data += sizeof(tlv);
1382 		tlv_data = data;
1383 
1384 		if (len < tlv_len) {
1385 			printf("%s: firmware too short: %zu bytes\n",
1386 			    DEVNAME(sc), len);
1387 			err = EINVAL;
1388 			goto parse_out;
1389 		}
1390 
1391 		switch (tlv_type) {
1392 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1393 			if (tlv_len < sizeof(uint32_t)) {
1394 				err = EINVAL;
1395 				goto parse_out;
1396 			}
1397 			sc->sc_capa_max_probe_len
1398 			    = le32toh(*(const uint32_t *)tlv_data);
1399 			if (sc->sc_capa_max_probe_len >
1400 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1401 				err = EINVAL;
1402 				goto parse_out;
1403 			}
1404 			break;
1405 		case IWX_UCODE_TLV_PAN:
1406 			if (tlv_len) {
1407 				err = EINVAL;
1408 				goto parse_out;
1409 			}
1410 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1411 			break;
1412 		case IWX_UCODE_TLV_FLAGS:
1413 			if (tlv_len < sizeof(uint32_t)) {
1414 				err = EINVAL;
1415 				goto parse_out;
1416 			}
1417 			/*
1418 			 * Apparently there can be many flags, but Linux driver
1419 			 * parses only the first one, and so do we.
1420 			 *
1421 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1422 			 * Intentional or a bug?  Observations from
1423 			 * current firmware file:
1424 			 *  1) TLV_PAN is parsed first
1425 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1426 			 * ==> this resets TLV_PAN to itself... hnnnk
1427 			 */
1428 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1429 			break;
1430 		case IWX_UCODE_TLV_CSCHEME:
1431 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1432 			if (err)
1433 				goto parse_out;
1434 			break;
1435 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1436 			uint32_t num_cpu;
1437 			if (tlv_len != sizeof(uint32_t)) {
1438 				err = EINVAL;
1439 				goto parse_out;
1440 			}
1441 			num_cpu = le32toh(*(const uint32_t *)tlv_data);
1442 			if (num_cpu < 1 || num_cpu > 2) {
1443 				err = EINVAL;
1444 				goto parse_out;
1445 			}
1446 			break;
1447 		}
1448 		case IWX_UCODE_TLV_SEC_RT:
1449 			err = iwx_firmware_store_section(sc,
1450 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1451 			if (err)
1452 				goto parse_out;
1453 			break;
1454 		case IWX_UCODE_TLV_SEC_INIT:
1455 			err = iwx_firmware_store_section(sc,
1456 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1457 			if (err)
1458 				goto parse_out;
1459 			break;
1460 		case IWX_UCODE_TLV_SEC_WOWLAN:
1461 			err = iwx_firmware_store_section(sc,
1462 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1463 			if (err)
1464 				goto parse_out;
1465 			break;
1466 		case IWX_UCODE_TLV_DEF_CALIB:
1467 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1468 				err = EINVAL;
1469 				goto parse_out;
1470 			}
1471 			err = iwx_set_default_calib(sc, tlv_data);
1472 			if (err)
1473 				goto parse_out;
1474 			break;
1475 		case IWX_UCODE_TLV_PHY_SKU:
1476 			if (tlv_len != sizeof(uint32_t)) {
1477 				err = EINVAL;
1478 				goto parse_out;
1479 			}
1480 			sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1481 			break;
1482 
1483 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1484 			const struct iwx_ucode_api *api;
1485 			int idx, i;
1486 			if (tlv_len != sizeof(*api)) {
1487 				err = EINVAL;
1488 				goto parse_out;
1489 			}
1490 			api = (const struct iwx_ucode_api *)tlv_data;
1491 			idx = le32toh(api->api_index);
1492 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1493 				err = EINVAL;
1494 				goto parse_out;
1495 			}
1496 			for (i = 0; i < 32; i++) {
1497 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1498 					continue;
1499 				setbit(sc->sc_ucode_api, i + (32 * idx));
1500 			}
1501 			break;
1502 		}
1503 
1504 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1505 			const struct iwx_ucode_capa *capa;
1506 			int idx, i;
1507 			if (tlv_len != sizeof(*capa)) {
1508 				err = EINVAL;
1509 				goto parse_out;
1510 			}
1511 			capa = (const struct iwx_ucode_capa *)tlv_data;
1512 			idx = le32toh(capa->api_index);
1513 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1514 				goto parse_out;
1515 			}
1516 			for (i = 0; i < 32; i++) {
1517 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1518 					continue;
1519 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1520 			}
1521 			break;
1522 		}
1523 
1524 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1525 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1526 			/* ignore, not used by current driver */
1527 			break;
1528 
1529 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1530 			err = iwx_firmware_store_section(sc,
1531 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1532 			    tlv_len);
1533 			if (err)
1534 				goto parse_out;
1535 			break;
1536 
1537 		case IWX_UCODE_TLV_PAGING:
1538 			if (tlv_len != sizeof(uint32_t)) {
1539 				err = EINVAL;
1540 				goto parse_out;
1541 			}
1542 			break;
1543 
1544 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1545 			if (tlv_len != sizeof(uint32_t)) {
1546 				err = EINVAL;
1547 				goto parse_out;
1548 			}
1549 			sc->sc_capa_n_scan_channels =
1550 			  le32toh(*(const uint32_t *)tlv_data);
1551 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1552 				err = ERANGE;
1553 				goto parse_out;
1554 			}
1555 			break;
1556 
1557 		case IWX_UCODE_TLV_FW_VERSION:
1558 			if (tlv_len != sizeof(uint32_t) * 3) {
1559 				err = EINVAL;
1560 				goto parse_out;
1561 			}
1562 
1563 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1564 			    le32toh(((const uint32_t *)tlv_data)[0]),
1565 			    le32toh(((const uint32_t *)tlv_data)[1]),
1566 			    le32toh(((const uint32_t *)tlv_data)[2]));
1567 			break;
1568 
1569 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1570 			const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1571 
1572 			fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1573 			if (*fw->dbg_dest_ver != 0) {
1574 				err = EINVAL;
1575 				goto parse_out;
1576 			}
1577 
1578 			if (fw->dbg_dest_tlv_init)
1579 				break;
1580 			fw->dbg_dest_tlv_init = true;
1581 
1582 			dest_v1 = (const void *)tlv_data;
1583 			fw->dbg_dest_tlv_v1 = dest_v1;
1584 			fw->n_dest_reg = tlv_len -
1585 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1586 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1587 			IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1588 			    "%s: found debug dest; n_dest_reg=%d\n",
1589 			    __func__, fw->n_dest_reg);
1590 			break;
1591 		}
1592 
1593 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1594 			const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1595 
1596 			if (!fw->dbg_dest_tlv_init ||
1597 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1598 			    fw->dbg_conf_tlv[conf->id] != NULL)
1599 				break;
1600 
1601 			IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1602 			    "Found debug configuration: %d\n", conf->id);
1603 			fw->dbg_conf_tlv[conf->id] = conf;
1604 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1605 			break;
1606 		}
1607 
1608 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1609 			const struct iwx_umac_debug_addrs *dbg_ptrs =
1610 				(const void *)tlv_data;
1611 
1612 			if (tlv_len != sizeof(*dbg_ptrs)) {
1613 				err = EINVAL;
1614 				goto parse_out;
1615 			}
1616 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1617 				break;
1618 			sc->sc_uc.uc_umac_error_event_table =
1619 				le32toh(dbg_ptrs->error_info_addr) &
1620 				~IWX_FW_ADDR_CACHE_CONTROL;
1621 			sc->sc_uc.error_event_table_tlv_status |=
1622 				IWX_ERROR_EVENT_TABLE_UMAC;
1623 			break;
1624 		}
1625 
1626 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1627 			const struct iwx_lmac_debug_addrs *dbg_ptrs =
1628 				(const void *)tlv_data;
1629 
1630 			if (tlv_len != sizeof(*dbg_ptrs)) {
1631 				err = EINVAL;
1632 				goto parse_out;
1633 			}
1634 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1635 				break;
1636 			sc->sc_uc.uc_lmac_error_event_table[0] =
1637 				le32toh(dbg_ptrs->error_event_table_ptr) &
1638 				~IWX_FW_ADDR_CACHE_CONTROL;
1639 			sc->sc_uc.error_event_table_tlv_status |=
1640 				IWX_ERROR_EVENT_TABLE_LMAC1;
1641 			break;
1642 		}
1643 
1644 		case IWX_UCODE_TLV_FW_MEM_SEG:
1645 			break;
1646 
1647 		case IWX_UCODE_TLV_IML:
1648 			if (sc->sc_fw.iml != NULL) {
1649 				free(fw->iml, M_DEVBUF);
1650 				fw->iml_len = 0;
1651 			}
1652 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1653 			    M_WAITOK | M_ZERO);
1654 			if (sc->sc_fw.iml == NULL) {
1655 				err = ENOMEM;
1656 				goto parse_out;
1657 			}
1658 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1659 			sc->sc_fw.iml_len = tlv_len;
1660 			break;
1661 
1662 		case IWX_UCODE_TLV_CMD_VERSIONS:
1663 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1664 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1665 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1666 			}
1667 			if (sc->n_cmd_versions != 0) {
1668 				err = EINVAL;
1669 				goto parse_out;
1670 			}
1671 			if (tlv_len > sizeof(sc->cmd_versions)) {
1672 				err = EINVAL;
1673 				goto parse_out;
1674 			}
1675 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1676 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1677 			break;
1678 
1679 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1680 			break;
1681 
1682 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1683 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1684 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1685 		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1686 			break;
1687 
1688 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1689 		case 58:
1690 		case 0x1000003:
1691 		case 0x1000004:
1692 			break;
1693 
1694 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1695 		case 0x1000000:
1696 		case 0x1000002:
1697 			break;
1698 
1699 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1700 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1701 		case IWX_UCODE_TLV_TYPE_HCMD:
1702 		case IWX_UCODE_TLV_TYPE_REGIONS:
1703 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1704 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1705 		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1706 		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1707 		case IWX_UCODE_TLV_CURRENT_PC:
1708 			break;
1709 
1710 		/* undocumented TLV found in iwx-cc-a0-67 image */
1711 		case 0x100000b:
1712 			break;
1713 
1714 		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1715 		case 0x101:
1716 			break;
1717 
1718 		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1719 		case 0x100000c:
1720 			break;
1721 
1722 		/* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1723 		case 69:
1724 			break;
1725 
1726 		default:
1727 			err = EINVAL;
1728 			goto parse_out;
1729 		}
1730 
1731 		/*
1732 		 * Check for size_t overflow and ignore missing padding at
1733 		 * end of firmware file.
1734 		 */
1735 		if (roundup(tlv_len, 4) > len)
1736 			break;
1737 
1738 		len -= roundup(tlv_len, 4);
1739 		data += roundup(tlv_len, 4);
1740 	}
1741 
1742 	KASSERT(err == 0, ("unhandled fw parse error"));
1743 
1744 parse_out:
1745 	if (err) {
1746 		printf("%s: firmware parse error %d, "
1747 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1748 	}
1749 
1750 out:
1751 	if (err) {
1752 		fw->fw_status = IWX_FW_STATUS_NONE;
1753 		if (fw->fw_rawdata != NULL)
1754 			iwx_fw_info_free(fw);
1755 	} else
1756 		fw->fw_status = IWX_FW_STATUS_DONE;
1757 	return err;
1758 }
1759 
1760 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1761 iwx_prph_addr_mask(struct iwx_softc *sc)
1762 {
1763 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1764 		return 0x00ffffff;
1765 	else
1766 		return 0x000fffff;
1767 }
1768 
1769 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1770 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1771 {
1772 	uint32_t mask = iwx_prph_addr_mask(sc);
1773 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1774 	IWX_BARRIER_READ_WRITE(sc);
1775 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1776 }
1777 
1778 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1779 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1780 {
1781 	iwx_nic_assert_locked(sc);
1782 	return iwx_read_prph_unlocked(sc, addr);
1783 }
1784 
1785 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1786 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1787 {
1788 	uint32_t mask = iwx_prph_addr_mask(sc);
1789 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1790 	IWX_BARRIER_WRITE(sc);
1791 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1792 }
1793 
1794 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1795 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1796 {
1797 	iwx_nic_assert_locked(sc);
1798 	iwx_write_prph_unlocked(sc, addr, val);
1799 }
1800 
1801 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1802 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1803 {
1804 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1805 }
1806 
1807 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1808 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1809 {
1810 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1811 }
1812 
1813 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1814 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1815 {
1816 	int offs, err = 0;
1817 	uint32_t *vals = buf;
1818 
1819 	if (iwx_nic_lock(sc)) {
1820 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1821 		for (offs = 0; offs < dwords; offs++)
1822 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1823 		iwx_nic_unlock(sc);
1824 	} else {
1825 		err = EBUSY;
1826 	}
1827 	return err;
1828 }
1829 
1830 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1831 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1832     int timo)
1833 {
1834 	for (;;) {
1835 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1836 			return 1;
1837 		}
1838 		if (timo < 10) {
1839 			return 0;
1840 		}
1841 		timo -= 10;
1842 		DELAY(10);
1843 	}
1844 }
1845 
1846 static int
iwx_nic_lock(struct iwx_softc * sc)1847 iwx_nic_lock(struct iwx_softc *sc)
1848 {
1849 	if (sc->sc_nic_locks > 0) {
1850 		iwx_nic_assert_locked(sc);
1851 		sc->sc_nic_locks++;
1852 		return 1; /* already locked */
1853 	}
1854 
1855 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1856 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1857 
1858 	DELAY(2);
1859 
1860 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1861 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1862 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1863 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1864 		sc->sc_nic_locks++;
1865 		return 1;
1866 	}
1867 
1868 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1869 	return 0;
1870 }
1871 
1872 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1873 iwx_nic_assert_locked(struct iwx_softc *sc)
1874 {
1875 	if (sc->sc_nic_locks <= 0)
1876 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1877 }
1878 
1879 static void
iwx_nic_unlock(struct iwx_softc * sc)1880 iwx_nic_unlock(struct iwx_softc *sc)
1881 {
1882 	if (sc->sc_nic_locks > 0) {
1883 		if (--sc->sc_nic_locks == 0)
1884 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1885 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1886 	} else
1887 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1888 }
1889 
1890 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1891 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1892     uint32_t mask)
1893 {
1894 	uint32_t val;
1895 
1896 	if (iwx_nic_lock(sc)) {
1897 		val = iwx_read_prph(sc, reg) & mask;
1898 		val |= bits;
1899 		iwx_write_prph(sc, reg, val);
1900 		iwx_nic_unlock(sc);
1901 		return 0;
1902 	}
1903 	return EBUSY;
1904 }
1905 
1906 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1907 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1908 {
1909 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1910 }
1911 
1912 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1913 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1914 {
1915 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1916 }
1917 
1918 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1919 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1920 {
1921         if (error != 0)
1922                 return;
1923 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1924 	*(bus_addr_t *)arg = segs[0].ds_addr;
1925 }
1926 
1927 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1928 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1929     bus_size_t size, bus_size_t alignment)
1930 {
1931 	int error;
1932 
1933 	dma->tag = NULL;
1934 	dma->map = NULL;
1935 	dma->size = size;
1936 	dma->vaddr = NULL;
1937 
1938 	error = bus_dma_tag_create(tag, alignment,
1939             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1940             1, size, 0, NULL, NULL, &dma->tag);
1941         if (error != 0)
1942                 goto fail;
1943 
1944         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1945             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1946         if (error != 0)
1947                 goto fail;
1948 
1949         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1950             iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1951         if (error != 0) {
1952 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1953 		dma->vaddr = NULL;
1954 		goto fail;
1955 	}
1956 
1957 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1958 
1959 	return 0;
1960 
1961 fail:
1962 	iwx_dma_contig_free(dma);
1963 	return error;
1964 }
1965 
1966 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1967 iwx_dma_contig_free(struct iwx_dma_info *dma)
1968 {
1969 	if (dma->vaddr != NULL) {
1970 		bus_dmamap_sync(dma->tag, dma->map,
1971 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1972 		bus_dmamap_unload(dma->tag, dma->map);
1973 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1974 		dma->vaddr = NULL;
1975 	}
1976 	if (dma->tag != NULL) {
1977 		bus_dma_tag_destroy(dma->tag);
1978 		dma->tag = NULL;
1979 	}
1980 }
1981 
1982 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1983 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1984 {
1985 	bus_size_t size;
1986 	int i, err;
1987 
1988 	ring->cur = 0;
1989 
1990 	/* Allocate RX descriptors (256-byte aligned). */
1991 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1992 		size = sizeof(struct iwx_rx_transfer_desc);
1993 	else
1994 		size = sizeof(uint64_t);
1995 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1996 	    size * IWX_RX_MQ_RING_COUNT, 256);
1997 	if (err) {
1998 		device_printf(sc->sc_dev,
1999 		    "could not allocate RX ring DMA memory\n");
2000 		goto fail;
2001 	}
2002 	ring->desc = ring->free_desc_dma.vaddr;
2003 
2004 	/* Allocate RX status area (16-byte aligned). */
2005 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2006 		size = sizeof(uint16_t);
2007 	else
2008 		size = sizeof(*ring->stat);
2009 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2010 	if (err) {
2011 		device_printf(sc->sc_dev,
2012 		    "could not allocate RX status DMA memory\n");
2013 		goto fail;
2014 	}
2015 	ring->stat = ring->stat_dma.vaddr;
2016 
2017 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2018 		size = sizeof(struct iwx_rx_completion_desc);
2019 	else
2020 		size = sizeof(uint32_t);
2021 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2022 	    size * IWX_RX_MQ_RING_COUNT, 256);
2023 	if (err) {
2024 		device_printf(sc->sc_dev,
2025 		    "could not allocate RX ring DMA memory\n");
2026 		goto fail;
2027 	}
2028 
2029 	err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2030 	    BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2031 	    0, NULL, NULL, &ring->data_dmat);
2032 
2033 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2034 		struct iwx_rx_data *data = &ring->data[i];
2035 
2036 		memset(data, 0, sizeof(*data));
2037 		err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2038 		if (err) {
2039 			device_printf(sc->sc_dev,
2040 			    "could not create RX buf DMA map\n");
2041 			goto fail;
2042 		}
2043 
2044 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2045 		if (err)
2046 			goto fail;
2047 	}
2048 	return 0;
2049 
2050 fail:	iwx_free_rx_ring(sc, ring);
2051 	return err;
2052 }
2053 
2054 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2055 iwx_disable_rx_dma(struct iwx_softc *sc)
2056 {
2057 	int ntries;
2058 
2059 	if (iwx_nic_lock(sc)) {
2060 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2061 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2062 		else
2063 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2064 		for (ntries = 0; ntries < 1000; ntries++) {
2065 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2066 				if (iwx_read_umac_prph(sc,
2067 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2068 					break;
2069 			} else {
2070 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2071 				    IWX_RXF_DMA_IDLE)
2072 					break;
2073 			}
2074 			DELAY(10);
2075 		}
2076 		iwx_nic_unlock(sc);
2077 	}
2078 }
2079 
2080 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2081 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2082 {
2083 	ring->cur = 0;
2084 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2085 	    BUS_DMASYNC_PREWRITE);
2086 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2087 		uint16_t *status = sc->rxq.stat_dma.vaddr;
2088 		*status = 0;
2089 	} else
2090 		memset(ring->stat, 0, sizeof(*ring->stat));
2091 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2092 	    BUS_DMASYNC_POSTWRITE);
2093 
2094 }
2095 
2096 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2097 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2098 {
2099 	int i;
2100 
2101 	iwx_dma_contig_free(&ring->free_desc_dma);
2102 	iwx_dma_contig_free(&ring->stat_dma);
2103 	iwx_dma_contig_free(&ring->used_desc_dma);
2104 
2105 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2106 		struct iwx_rx_data *data = &ring->data[i];
2107 		if (data->m != NULL) {
2108 			bus_dmamap_sync(ring->data_dmat, data->map,
2109 			    BUS_DMASYNC_POSTREAD);
2110 			bus_dmamap_unload(ring->data_dmat, data->map);
2111 			m_freem(data->m);
2112 			data->m = NULL;
2113 		}
2114 		if (data->map != NULL) {
2115 			bus_dmamap_destroy(ring->data_dmat, data->map);
2116 			data->map = NULL;
2117 		}
2118 	}
2119 	if (ring->data_dmat != NULL) {
2120 		bus_dma_tag_destroy(ring->data_dmat);
2121 		ring->data_dmat = NULL;
2122 	}
2123 }
2124 
2125 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2126 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2127 {
2128 	bus_addr_t paddr;
2129 	bus_size_t size;
2130 	int i, err;
2131 	size_t bc_tbl_size;
2132 	bus_size_t bc_align;
2133 	size_t mapsize;
2134 
2135 	ring->qid = qid;
2136 	ring->queued = 0;
2137 	ring->cur = 0;
2138 	ring->cur_hw = 0;
2139 	ring->tail = 0;
2140 	ring->tail_hw = 0;
2141 
2142 	/* Allocate TX descriptors (256-byte aligned). */
2143 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2144 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2145 	if (err) {
2146 		device_printf(sc->sc_dev,
2147 		    "could not allocate TX ring DMA memory\n");
2148 		goto fail;
2149 	}
2150 	ring->desc = ring->desc_dma.vaddr;
2151 
2152 	/*
2153 	 * The hardware supports up to 512 Tx rings which is more
2154 	 * than we currently need.
2155 	 *
2156 	 * In DQA mode we use 1 command queue + 1 default queue for
2157 	 * management, control, and non-QoS data frames.
2158 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2159 	 *
2160 	 * Tx aggregation requires additional queues, one queue per TID for
2161 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2162 	 * Firmware may assign its own internal IDs for these queues
2163 	 * depending on which TID gets aggregation enabled first.
2164 	 * The driver maintains a table mapping driver-side queue IDs
2165 	 * to firmware-side queue IDs.
2166 	 */
2167 
2168 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2169 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2170 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2171 		bc_align = 128;
2172 	} else {
2173 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2174 		bc_align = 64;
2175 	}
2176 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2177 	    bc_align);
2178 	if (err) {
2179 		device_printf(sc->sc_dev,
2180 		    "could not allocate byte count table DMA memory\n");
2181 		goto fail;
2182 	}
2183 
2184 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2185 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2186 	    IWX_FIRST_TB_SIZE_ALIGN);
2187 	if (err) {
2188 		device_printf(sc->sc_dev,
2189 		    "could not allocate cmd DMA memory\n");
2190 		goto fail;
2191 	}
2192 	ring->cmd = ring->cmd_dma.vaddr;
2193 
2194 	/* FW commands may require more mapped space than packets. */
2195 	if (qid == IWX_DQA_CMD_QUEUE)
2196 		mapsize = (sizeof(struct iwx_cmd_header) +
2197 		    IWX_MAX_CMD_PAYLOAD_SIZE);
2198 	else
2199 		mapsize = MCLBYTES;
2200 	err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2201 	    BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2202 	    mapsize, 0, NULL, NULL, &ring->data_dmat);
2203 
2204 	paddr = ring->cmd_dma.paddr;
2205 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2206 		struct iwx_tx_data *data = &ring->data[i];
2207 
2208 		data->cmd_paddr = paddr;
2209 		paddr += sizeof(struct iwx_device_cmd);
2210 
2211 		err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2212 		if (err) {
2213 			device_printf(sc->sc_dev,
2214 			    "could not create TX buf DMA map\n");
2215 			goto fail;
2216 		}
2217 	}
2218 	KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2219 	return 0;
2220 
2221 fail:
2222 	return err;
2223 }
2224 
2225 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2226 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2227 {
2228 	int i;
2229 
2230 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2231 		struct iwx_tx_data *data = &ring->data[i];
2232 
2233 		if (data->m != NULL) {
2234 			bus_dmamap_sync(ring->data_dmat, data->map,
2235 			    BUS_DMASYNC_POSTWRITE);
2236 			bus_dmamap_unload(ring->data_dmat, data->map);
2237 			m_freem(data->m);
2238 			data->m = NULL;
2239 		}
2240 	}
2241 
2242 	/* Clear byte count table. */
2243 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2244 
2245 	/* Clear TX descriptors. */
2246 	memset(ring->desc, 0, ring->desc_dma.size);
2247 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2248 	    BUS_DMASYNC_PREWRITE);
2249 	sc->qfullmsk &= ~(1 << ring->qid);
2250 	sc->qenablemsk &= ~(1 << ring->qid);
2251 	for (i = 0; i < nitems(sc->aggqid); i++) {
2252 		if (sc->aggqid[i] == ring->qid) {
2253 			sc->aggqid[i] = 0;
2254 			break;
2255 		}
2256 	}
2257 	ring->queued = 0;
2258 	ring->cur = 0;
2259 	ring->cur_hw = 0;
2260 	ring->tail = 0;
2261 	ring->tail_hw = 0;
2262 	ring->tid = 0;
2263 }
2264 
2265 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2266 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2267 {
2268 	int i;
2269 
2270 	iwx_dma_contig_free(&ring->desc_dma);
2271 	iwx_dma_contig_free(&ring->cmd_dma);
2272 	iwx_dma_contig_free(&ring->bc_tbl);
2273 
2274 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2275 		struct iwx_tx_data *data = &ring->data[i];
2276 
2277 		if (data->m != NULL) {
2278 			bus_dmamap_sync(ring->data_dmat, data->map,
2279 			    BUS_DMASYNC_POSTWRITE);
2280 			bus_dmamap_unload(ring->data_dmat, data->map);
2281 			m_freem(data->m);
2282 			data->m = NULL;
2283 		}
2284 		if (data->map != NULL) {
2285 			bus_dmamap_destroy(ring->data_dmat, data->map);
2286 			data->map = NULL;
2287 		}
2288 	}
2289 	if (ring->data_dmat != NULL) {
2290 		bus_dma_tag_destroy(ring->data_dmat);
2291 		ring->data_dmat = NULL;
2292 	}
2293 }
2294 
2295 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2296 iwx_enable_rfkill_int(struct iwx_softc *sc)
2297 {
2298 	if (!sc->sc_msix) {
2299 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2300 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2301 	} else {
2302 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2303 		    sc->sc_fh_init_mask);
2304 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2305 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2306 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2307 	}
2308 
2309 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2310 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2311 }
2312 
2313 static int
iwx_check_rfkill(struct iwx_softc * sc)2314 iwx_check_rfkill(struct iwx_softc *sc)
2315 {
2316 	uint32_t v;
2317 	int rv;
2318 
2319 	/*
2320 	 * "documentation" is not really helpful here:
2321 	 *  27:	HW_RF_KILL_SW
2322 	 *	Indicates state of (platform's) hardware RF-Kill switch
2323 	 *
2324 	 * But apparently when it's off, it's on ...
2325 	 */
2326 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2327 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2328 	if (rv) {
2329 		sc->sc_flags |= IWX_FLAG_RFKILL;
2330 	} else {
2331 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2332 	}
2333 
2334 	return rv;
2335 }
2336 
2337 static void
iwx_enable_interrupts(struct iwx_softc * sc)2338 iwx_enable_interrupts(struct iwx_softc *sc)
2339 {
2340 	if (!sc->sc_msix) {
2341 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2342 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2343 	} else {
2344 		/*
2345 		 * fh/hw_mask keeps all the unmasked causes.
2346 		 * Unlike msi, in msix cause is enabled when it is unset.
2347 		 */
2348 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2349 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2350 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2351 		    ~sc->sc_fh_mask);
2352 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2353 		    ~sc->sc_hw_mask);
2354 	}
2355 }
2356 
2357 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2358 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2359 {
2360 	if (!sc->sc_msix) {
2361 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2362 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2363 	} else {
2364 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2365 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2366 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2367 		/*
2368 		 * Leave all the FH causes enabled to get the ALIVE
2369 		 * notification.
2370 		 */
2371 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2372 		    ~sc->sc_fh_init_mask);
2373 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2374 	}
2375 }
2376 
2377 #if 0
2378 static void
2379 iwx_restore_interrupts(struct iwx_softc *sc)
2380 {
2381 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2382 }
2383 #endif
2384 
2385 static void
iwx_disable_interrupts(struct iwx_softc * sc)2386 iwx_disable_interrupts(struct iwx_softc *sc)
2387 {
2388 	if (!sc->sc_msix) {
2389 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2390 
2391 		/* acknowledge all interrupts */
2392 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2393 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2394 	} else {
2395 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2396 		    sc->sc_fh_init_mask);
2397 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2398 		    sc->sc_hw_init_mask);
2399 	}
2400 }
2401 
2402 static void
iwx_ict_reset(struct iwx_softc * sc)2403 iwx_ict_reset(struct iwx_softc *sc)
2404 {
2405 	iwx_disable_interrupts(sc);
2406 
2407 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2408 	sc->ict_cur = 0;
2409 
2410 	/* Set physical address of ICT (4KB aligned). */
2411 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2412 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2413 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2414 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2415 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2416 
2417 	/* Switch to ICT interrupt mode in driver. */
2418 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2419 
2420 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2421 	iwx_enable_interrupts(sc);
2422 }
2423 
2424 #define IWX_HW_READY_TIMEOUT 50
2425 static int
iwx_set_hw_ready(struct iwx_softc * sc)2426 iwx_set_hw_ready(struct iwx_softc *sc)
2427 {
2428 	int ready;
2429 
2430 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2431 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2432 
2433 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2434 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2435 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2436 	    IWX_HW_READY_TIMEOUT);
2437 	if (ready)
2438 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2439 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2440 
2441 	DPRINTF(("%s: ready=%d\n", __func__, ready));
2442 	return ready;
2443 }
2444 #undef IWX_HW_READY_TIMEOUT
2445 
2446 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2447 iwx_prepare_card_hw(struct iwx_softc *sc)
2448 {
2449 	int t = 0;
2450 	int ntries;
2451 
2452 	if (iwx_set_hw_ready(sc))
2453 		return 0;
2454 
2455 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2456 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2457 	DELAY(1000);
2458 
2459 	for (ntries = 0; ntries < 10; ntries++) {
2460 		/* If HW is not ready, prepare the conditions to check again */
2461 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2462 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2463 
2464 		do {
2465 			if (iwx_set_hw_ready(sc))
2466 				return 0;
2467 			DELAY(200);
2468 			t += 200;
2469 		} while (t < 150000);
2470 		DELAY(25000);
2471 	}
2472 
2473 	return ETIMEDOUT;
2474 }
2475 
2476 static int
iwx_force_power_gating(struct iwx_softc * sc)2477 iwx_force_power_gating(struct iwx_softc *sc)
2478 {
2479 	int err;
2480 
2481 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2482 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2483 	if (err)
2484 		return err;
2485 	DELAY(20);
2486 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2487 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2488 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2489 	if (err)
2490 		return err;
2491 	DELAY(20);
2492 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2493 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2494 	return err;
2495 }
2496 
2497 static void
iwx_apm_config(struct iwx_softc * sc)2498 iwx_apm_config(struct iwx_softc *sc)
2499 {
2500 	uint16_t lctl, cap;
2501 	int pcie_ptr;
2502 	int error;
2503 
2504 	/*
2505 	 * L0S states have been found to be unstable with our devices
2506 	 * and in newer hardware they are not officially supported at
2507 	 * all, so we must always set the L0S_DISABLED bit.
2508 	 */
2509 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2510 
2511 	error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2512 	if (error != 0) {
2513 		printf("can't fill pcie_ptr\n");
2514 		return;
2515 	}
2516 
2517 	lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2518 	    sizeof(lctl));
2519 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2520 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2521 #define PCI_PCIE_DCSR2 0x28
2522 	cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2523 	    sizeof(lctl));
2524 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2525 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2526 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2527 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2528 	    DEVNAME(sc),
2529 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2530 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2531 #undef PCI_PCIE_LCSR_ASPM_L0S
2532 #undef PCI_PCIE_DCSR2
2533 #undef PCI_PCIE_DCSR2_LTREN
2534 #undef PCI_PCIE_LCSR_ASPM_L1
2535 }
2536 
2537 /*
2538  * Start up NIC's basic functionality after it has been reset
2539  * e.g. after platform boot or shutdown.
2540  * NOTE:  This does not load uCode nor start the embedded processor
2541  */
2542 static int
iwx_apm_init(struct iwx_softc * sc)2543 iwx_apm_init(struct iwx_softc *sc)
2544 {
2545 	int err = 0;
2546 
2547 	/*
2548 	 * Disable L0s without affecting L1;
2549 	 *  don't wait for ICH L0s (ICH bug W/A)
2550 	 */
2551 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2552 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2553 
2554 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2555 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2556 
2557 	/*
2558 	 * Enable HAP INTA (interrupt from management bus) to
2559 	 * wake device's PCI Express link L1a -> L0s
2560 	 */
2561 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2562 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2563 
2564 	iwx_apm_config(sc);
2565 
2566 	/*
2567 	 * Set "initialization complete" bit to move adapter from
2568 	 * D0U* --> D0A* (powered-up active) state.
2569 	 */
2570 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2571 
2572 	/*
2573 	 * Wait for clock stabilization; once stabilized, access to
2574 	 * device-internal resources is supported, e.g. iwx_write_prph()
2575 	 * and accesses to uCode SRAM.
2576 	 */
2577 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2578 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2579 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2580 		printf("%s: timeout waiting for clock stabilization\n",
2581 		    DEVNAME(sc));
2582 		err = ETIMEDOUT;
2583 		goto out;
2584 	}
2585  out:
2586 	if (err)
2587 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2588 	return err;
2589 }
2590 
2591 static void
iwx_apm_stop(struct iwx_softc * sc)2592 iwx_apm_stop(struct iwx_softc *sc)
2593 {
2594 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2595 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2596 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2597 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2598 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2599 	DELAY(1000);
2600 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2601 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2602 	DELAY(5000);
2603 
2604 	/* stop device's busmaster DMA activity */
2605 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2606 
2607 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2608 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2609 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2610 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2611 
2612 	/*
2613 	 * Clear "initialization complete" bit to move adapter from
2614 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2615 	 */
2616 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2617 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2618 }
2619 
2620 static void
iwx_init_msix_hw(struct iwx_softc * sc)2621 iwx_init_msix_hw(struct iwx_softc *sc)
2622 {
2623 	iwx_conf_msix_hw(sc, 0);
2624 
2625 	if (!sc->sc_msix)
2626 		return;
2627 
2628 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2629 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2630 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2631 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2632 }
2633 
2634 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2635 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2636 {
2637 	int vector = 0;
2638 
2639 	if (!sc->sc_msix) {
2640 		/* Newer chips default to MSIX. */
2641 		if (!stopped && iwx_nic_lock(sc)) {
2642 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2643 			    IWX_UREG_CHICK_MSI_ENABLE);
2644 			iwx_nic_unlock(sc);
2645 		}
2646 		return;
2647 	}
2648 
2649 	if (!stopped && iwx_nic_lock(sc)) {
2650 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2651 		    IWX_UREG_CHICK_MSIX_ENABLE);
2652 		iwx_nic_unlock(sc);
2653 	}
2654 
2655 	/* Disable all interrupts */
2656 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2657 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2658 
2659 	/* Map fallback-queue (command/mgmt) to a single vector */
2660 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2661 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2662 	/* Map RSS queue (data) to the same vector */
2663 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2664 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2665 
2666 	/* Enable the RX queues cause interrupts */
2667 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2668 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2669 
2670 	/* Map non-RX causes to the same vector */
2671 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2672 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2673 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2674 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2676 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2678 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2680 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2682 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2684 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2686 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2688 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2690 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2692 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2694 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2696 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2698 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2700 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 
2702 	/* Enable non-RX causes interrupts */
2703 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2704 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2705 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2706 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2707 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2708 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2709 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2710 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2711 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2712 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2713 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2714 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2715 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2716 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2717 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2718 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2719 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2720 }
2721 
2722 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2723 iwx_clear_persistence_bit(struct iwx_softc *sc)
2724 {
2725 	uint32_t hpm, wprot;
2726 
2727 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2728 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2729 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2730 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2731 			printf("%s: cannot clear persistence bit\n",
2732 			    DEVNAME(sc));
2733 			return EPERM;
2734 		}
2735 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2736 		    hpm & ~IWX_PERSISTENCE_BIT);
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 static int
iwx_start_hw(struct iwx_softc * sc)2743 iwx_start_hw(struct iwx_softc *sc)
2744 {
2745 	int err;
2746 
2747 	err = iwx_prepare_card_hw(sc);
2748 	if (err)
2749 		return err;
2750 
2751 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2752 		err = iwx_clear_persistence_bit(sc);
2753 		if (err)
2754 			return err;
2755 	}
2756 
2757 	/* Reset the entire device */
2758 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2759 	DELAY(5000);
2760 
2761 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2762 	    sc->sc_integrated) {
2763 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2764 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2765 		DELAY(20);
2766 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2767 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2768 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2769 			printf("%s: timeout waiting for clock stabilization\n",
2770 			    DEVNAME(sc));
2771 			return ETIMEDOUT;
2772 		}
2773 
2774 		err = iwx_force_power_gating(sc);
2775 		if (err)
2776 			return err;
2777 
2778 		/* Reset the entire device */
2779 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2780 		DELAY(5000);
2781 	}
2782 
2783 	err = iwx_apm_init(sc);
2784 	if (err)
2785 		return err;
2786 
2787 	iwx_init_msix_hw(sc);
2788 
2789 	iwx_enable_rfkill_int(sc);
2790 	iwx_check_rfkill(sc);
2791 
2792 	return 0;
2793 }
2794 
2795 static void
iwx_stop_device(struct iwx_softc * sc)2796 iwx_stop_device(struct iwx_softc *sc)
2797 {
2798 	int i;
2799 
2800 	iwx_disable_interrupts(sc);
2801 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2802 
2803 	iwx_disable_rx_dma(sc);
2804 	iwx_reset_rx_ring(sc, &sc->rxq);
2805 	for (i = 0; i < nitems(sc->txq); i++)
2806 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2807 #if 0
2808 	/* XXX-THJ: Tidy up BA state on stop */
2809 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2810 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2811 		if (ba->ba_state != IEEE80211_BA_AGREED)
2812 			continue;
2813 		ieee80211_delba_request(ic, ni, 0, 1, i);
2814 	}
2815 #endif
2816 	/* Make sure (redundant) we've released our request to stay awake */
2817 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2818 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2819 	if (sc->sc_nic_locks > 0)
2820 		printf("%s: %d active NIC locks forcefully cleared\n",
2821 		    DEVNAME(sc), sc->sc_nic_locks);
2822 	sc->sc_nic_locks = 0;
2823 
2824 	/* Stop the device, and put it in low power state */
2825 	iwx_apm_stop(sc);
2826 
2827 	/* Reset the on-board processor. */
2828 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2829 	DELAY(5000);
2830 
2831 	/*
2832 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2833 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2834 	 * that enables radio won't fire on the correct irq, and the
2835 	 * driver won't be able to handle the interrupt.
2836 	 * Configure the IVAR table again after reset.
2837 	 */
2838 	iwx_conf_msix_hw(sc, 1);
2839 
2840 	/*
2841 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2842 	 * Clear the interrupt again.
2843 	 */
2844 	iwx_disable_interrupts(sc);
2845 
2846 	/* Even though we stop the HW we still want the RF kill interrupt. */
2847 	iwx_enable_rfkill_int(sc);
2848 	iwx_check_rfkill(sc);
2849 
2850 	iwx_prepare_card_hw(sc);
2851 
2852 	iwx_ctxt_info_free_paging(sc);
2853 	iwx_dma_contig_free(&sc->pnvm_dma);
2854 }
2855 
2856 static void
iwx_nic_config(struct iwx_softc * sc)2857 iwx_nic_config(struct iwx_softc *sc)
2858 {
2859 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2860 	uint32_t mask, val, reg_val = 0;
2861 
2862 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2863 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2864 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2865 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2866 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2867 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2868 
2869 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2870 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2871 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2872 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2873 
2874 	/* radio configuration */
2875 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2876 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2877 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2878 
2879 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2880 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2881 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2882 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2883 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2884 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2885 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2886 
2887 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2888 	val &= ~mask;
2889 	val |= reg_val;
2890 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2891 }
2892 
2893 static int
iwx_nic_rx_init(struct iwx_softc * sc)2894 iwx_nic_rx_init(struct iwx_softc *sc)
2895 {
2896 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2897 
2898 	/*
2899 	 * We don't configure the RFH; the firmware will do that.
2900 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2901 	 */
2902 	return 0;
2903 }
2904 
2905 static int
iwx_nic_init(struct iwx_softc * sc)2906 iwx_nic_init(struct iwx_softc *sc)
2907 {
2908 	int err;
2909 
2910 	iwx_apm_init(sc);
2911 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2912 		iwx_nic_config(sc);
2913 
2914 	err = iwx_nic_rx_init(sc);
2915 	if (err)
2916 		return err;
2917 
2918 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2919 
2920 	return 0;
2921 }
2922 
2923 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2924 const uint8_t iwx_ac_to_tx_fifo[] = {
2925 	IWX_GEN2_EDCA_TX_FIFO_BE,
2926 	IWX_GEN2_EDCA_TX_FIFO_BK,
2927 	IWX_GEN2_EDCA_TX_FIFO_VI,
2928 	IWX_GEN2_EDCA_TX_FIFO_VO,
2929 };
2930 
2931 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2932 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2933     int num_slots)
2934 {
2935 	struct iwx_rx_packet *pkt;
2936 	struct iwx_tx_queue_cfg_rsp *resp;
2937 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2938 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2939 	struct iwx_host_cmd hcmd = {
2940 		.flags = IWX_CMD_WANT_RESP,
2941 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2942 	};
2943 	struct iwx_tx_ring *ring = &sc->txq[qid];
2944 	int err, fwqid, cmd_ver;
2945 	uint32_t wr_idx;
2946 	size_t resp_len;
2947 
2948 	DPRINTF(("%s: tid=%i\n", __func__, tid));
2949 	DPRINTF(("%s: qid=%i\n", __func__, qid));
2950 	iwx_reset_tx_ring(sc, ring);
2951 
2952 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2953 	    IWX_SCD_QUEUE_CONFIG_CMD);
2954 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2955 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2956 		cmd_v0.sta_id = sta_id;
2957 		cmd_v0.tid = tid;
2958 		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2959 		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2960 		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2961 		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2962 		hcmd.id = IWX_SCD_QUEUE_CFG;
2963 		hcmd.data[0] = &cmd_v0;
2964 		hcmd.len[0] = sizeof(cmd_v0);
2965 	} else if (cmd_ver == 3) {
2966 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2967 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2968 		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2969 		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2970 		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2971 		cmd_v3.u.add.flags = htole32(0);
2972 		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2973 		cmd_v3.u.add.tid = tid;
2974 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2975 		    IWX_SCD_QUEUE_CONFIG_CMD);
2976 		hcmd.data[0] = &cmd_v3;
2977 		hcmd.len[0] = sizeof(cmd_v3);
2978 	} else {
2979 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2980 		    DEVNAME(sc), cmd_ver);
2981 		return ENOTSUP;
2982 	}
2983 
2984 	err = iwx_send_cmd(sc, &hcmd);
2985 	if (err)
2986 		return err;
2987 
2988 	pkt = hcmd.resp_pkt;
2989 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2990 		err = EIO;
2991 		goto out;
2992 	}
2993 
2994 	resp_len = iwx_rx_packet_payload_len(pkt);
2995 	if (resp_len != sizeof(*resp)) {
2996 		err = EIO;
2997 		goto out;
2998 	}
2999 
3000 	resp = (void *)pkt->data;
3001 	fwqid = le16toh(resp->queue_number);
3002 	wr_idx = le16toh(resp->write_pointer);
3003 
3004 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3005 	if (fwqid != qid) {
3006 		DPRINTF(("%s: === fwqid != qid\n", __func__));
3007 		err = EIO;
3008 		goto out;
3009 	}
3010 
3011 	if (wr_idx != ring->cur_hw) {
3012 		DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3013 		err = EIO;
3014 		goto out;
3015 	}
3016 
3017 	sc->qenablemsk |= (1 << qid);
3018 	ring->tid = tid;
3019 out:
3020 	iwx_free_resp(sc, &hcmd);
3021 	return err;
3022 }
3023 
3024 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3025 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3026 {
3027 	struct iwx_rx_packet *pkt;
3028 	struct iwx_tx_queue_cfg_rsp *resp;
3029 	struct iwx_tx_queue_cfg_cmd cmd_v0;
3030 	struct iwx_scd_queue_cfg_cmd cmd_v3;
3031 	struct iwx_host_cmd hcmd = {
3032 		.flags = IWX_CMD_WANT_RESP,
3033 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3034 	};
3035 	struct iwx_tx_ring *ring = &sc->txq[qid];
3036 	int err, cmd_ver;
3037 
3038 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3039 	    IWX_SCD_QUEUE_CONFIG_CMD);
3040 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3041 		memset(&cmd_v0, 0, sizeof(cmd_v0));
3042 		cmd_v0.sta_id = sta_id;
3043 		cmd_v0.tid = tid;
3044 		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3045 		cmd_v0.cb_size = htole32(0);
3046 		cmd_v0.byte_cnt_addr = htole64(0);
3047 		cmd_v0.tfdq_addr = htole64(0);
3048 		hcmd.id = IWX_SCD_QUEUE_CFG;
3049 		hcmd.data[0] = &cmd_v0;
3050 		hcmd.len[0] = sizeof(cmd_v0);
3051 	} else if (cmd_ver == 3) {
3052 		memset(&cmd_v3, 0, sizeof(cmd_v3));
3053 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3054 		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3055 		cmd_v3.u.remove.tid = tid;
3056 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3057 		    IWX_SCD_QUEUE_CONFIG_CMD);
3058 		hcmd.data[0] = &cmd_v3;
3059 		hcmd.len[0] = sizeof(cmd_v3);
3060 	} else {
3061 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3062 		    DEVNAME(sc), cmd_ver);
3063 		return ENOTSUP;
3064 	}
3065 
3066 	err = iwx_send_cmd(sc, &hcmd);
3067 	if (err)
3068 		return err;
3069 
3070 	pkt = hcmd.resp_pkt;
3071 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3072 		err = EIO;
3073 		goto out;
3074 	}
3075 
3076 	sc->qenablemsk &= ~(1 << qid);
3077 	iwx_reset_tx_ring(sc, ring);
3078 out:
3079 	iwx_free_resp(sc, &hcmd);
3080 	return err;
3081 }
3082 
3083 static void
iwx_post_alive(struct iwx_softc * sc)3084 iwx_post_alive(struct iwx_softc *sc)
3085 {
3086 	int txcmd_ver;
3087 
3088 	iwx_ict_reset(sc);
3089 
3090 	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3091 	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3092 		sc->sc_rate_n_flags_version = 2;
3093 	else
3094 		sc->sc_rate_n_flags_version = 1;
3095 
3096 	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3097 }
3098 
3099 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3100 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3101     uint32_t duration_tu)
3102 {
3103 
3104 	struct iwx_session_prot_cmd cmd = {
3105 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3106 		    in->in_color)),
3107 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
3108 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3109 		.duration_tu = htole32(duration_tu),
3110 	};
3111 	uint32_t cmd_id;
3112 	int err;
3113 
3114 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3115 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3116 	if (!err)
3117 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3118 	return err;
3119 }
3120 
3121 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3122 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3123 {
3124 	struct iwx_session_prot_cmd cmd = {
3125 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3126 		    in->in_color)),
3127 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3128 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3129 		.duration_tu = 0,
3130 	};
3131 	uint32_t cmd_id;
3132 
3133 	/* Do nothing if the time event has already ended. */
3134 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3135 		return;
3136 
3137 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3138 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3139 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3140 }
3141 
3142 /*
3143  * NVM read access and content parsing.  We do not support
3144  * external NVM or writing NVM.
3145  */
3146 
3147 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3148 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3149 {
3150 	uint8_t tx_ant;
3151 
3152 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3153 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3154 
3155 	if (sc->sc_nvm.valid_tx_ant)
3156 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3157 
3158 	return tx_ant;
3159 }
3160 
3161 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3162 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3163 {
3164 	uint8_t rx_ant;
3165 
3166 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3167 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3168 
3169 	if (sc->sc_nvm.valid_rx_ant)
3170 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3171 
3172 	return rx_ant;
3173 }
3174 
3175 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3176 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3177     struct ieee80211_channel chans[])
3178 {
3179 	struct iwx_softc *sc = ic->ic_softc;
3180 	struct iwx_nvm_data *data = &sc->sc_nvm;
3181 	uint8_t bands[IEEE80211_MODE_BYTES];
3182 	const uint8_t *nvm_channels;
3183 	uint32_t ch_flags;
3184 	int ch_idx, nchan;
3185 
3186 	if (sc->sc_uhb_supported) {
3187 		nchan = nitems(iwx_nvm_channels_uhb);
3188 		nvm_channels = iwx_nvm_channels_uhb;
3189 	} else {
3190 		nchan = nitems(iwx_nvm_channels_8000);
3191 		nvm_channels = iwx_nvm_channels_8000;
3192 	}
3193 
3194 	/* 2.4Ghz; 1-13: 11b/g channels. */
3195 	if (!data->sku_cap_band_24GHz_enable)
3196 		goto band_5;
3197 
3198 	memset(bands, 0, sizeof(bands));
3199 	setbit(bands, IEEE80211_MODE_11B);
3200 	setbit(bands, IEEE80211_MODE_11G);
3201 	setbit(bands, IEEE80211_MODE_11NG);
3202 	for (ch_idx = 0;
3203 	    ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3204 	    ch_idx++) {
3205 
3206 		uint32_t nflags = 0;
3207 		int cflags = 0;
3208 
3209 		if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3210 			ch_flags = le32_to_cpup(
3211 			    sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3212 		} else {
3213 			ch_flags = le16_to_cpup(
3214 			    sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3215 		}
3216 		if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3217 			continue;
3218 
3219 	          if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3220                   cflags |= NET80211_CBW_FLAG_HT40;
3221 
3222 		/* XXX-BZ nflags RADAR/DFS/INDOOR */
3223 
3224 		/* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3225 		nvm_channels[ch_idx],
3226 		ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3227 		/* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3228 		nflags, bands, cflags);
3229 	}
3230 
3231 band_5:
3232 	/* 5Ghz */
3233 	if (!data->sku_cap_band_52GHz_enable)
3234 		goto band_6;
3235 
3236 
3237 	memset(bands, 0, sizeof(bands));
3238 	setbit(bands, IEEE80211_MODE_11A);
3239 	setbit(bands, IEEE80211_MODE_11NA);
3240 	setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3241 
3242 	for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3243 	    ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3244 	    ch_idx++) {
3245 		uint32_t nflags = 0;
3246 		int cflags = 0;
3247 
3248 		if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3249 			ch_flags = le32_to_cpup(
3250 			    sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3251 		else
3252 			ch_flags = le16_to_cpup(
3253 			    sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3254 
3255 		if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3256 		continue;
3257 
3258 		if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3259 			cflags |= NET80211_CBW_FLAG_HT40;
3260 		if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3261 			cflags |= NET80211_CBW_FLAG_VHT80;
3262 		if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3263 			cflags |= NET80211_CBW_FLAG_VHT160;
3264 
3265 		/* XXX-BZ nflags RADAR/DFS/INDOOR */
3266 
3267 		/* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3268 		nvm_channels[ch_idx],
3269 		ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3270 		/* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3271 		nflags, bands, cflags);
3272 	}
3273 band_6:
3274 	/* 6GHz one day ... */
3275 	return;
3276 }
3277 
3278 static int
iwx_mimo_enabled(struct iwx_softc * sc)3279 iwx_mimo_enabled(struct iwx_softc *sc)
3280 {
3281 
3282 	return !sc->sc_nvm.sku_cap_mimo_disable;
3283 }
3284 
3285 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3286 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3287     uint16_t ssn, uint16_t buf_size)
3288 {
3289 	reorder_buf->head_sn = ssn;
3290 	reorder_buf->num_stored = 0;
3291 	reorder_buf->buf_size = buf_size;
3292 	reorder_buf->last_amsdu = 0;
3293 	reorder_buf->last_sub_index = 0;
3294 	reorder_buf->removed = 0;
3295 	reorder_buf->valid = 0;
3296 	reorder_buf->consec_oldsn_drops = 0;
3297 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3298 	reorder_buf->consec_oldsn_prev_drop = 0;
3299 }
3300 
3301 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3302 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3303 {
3304 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3305 
3306 	reorder_buf->removed = 1;
3307 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3308 }
3309 
3310 #define IWX_MAX_RX_BA_SESSIONS 16
3311 
3312 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3313 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3314 {
3315 	int i;
3316 
3317 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3318 		if (sc->sc_rxba_data[i].baid ==
3319 		    IWX_RX_REORDER_DATA_INVALID_BAID)
3320 			continue;
3321 		if (sc->sc_rxba_data[i].tid == tid)
3322 			return &sc->sc_rxba_data[i];
3323 	}
3324 
3325 	return NULL;
3326 }
3327 
3328 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3329 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3330     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3331     uint8_t *baid)
3332 {
3333 	struct iwx_rx_baid_cfg_cmd cmd;
3334 	uint32_t new_baid = 0;
3335 	int err;
3336 
3337 	IWX_ASSERT_LOCKED(sc);
3338 
3339 	memset(&cmd, 0, sizeof(cmd));
3340 
3341 	if (start) {
3342 		cmd.action = IWX_RX_BAID_ACTION_ADD;
3343 		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3344 		cmd.alloc.tid = tid;
3345 		cmd.alloc.ssn = htole16(ssn);
3346 		cmd.alloc.win_size = htole16(winsize);
3347 	} else {
3348 		struct iwx_rxba_data *rxba;
3349 
3350 		rxba = iwx_find_rxba_data(sc, tid);
3351 		if (rxba == NULL)
3352 			return ENOENT;
3353 		*baid = rxba->baid;
3354 
3355 		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3356 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3357 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3358 			cmd.remove_v1.baid = rxba->baid;
3359 		} else {
3360 			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3361 			cmd.remove.tid = tid;
3362 		}
3363 	}
3364 
3365 	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3366 	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3367 	if (err)
3368 		return err;
3369 
3370 	if (start) {
3371 		if (new_baid >= nitems(sc->sc_rxba_data))
3372 			return ERANGE;
3373 		*baid = new_baid;
3374 	}
3375 
3376 	return 0;
3377 }
3378 
3379 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3380 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3381     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3382 {
3383 	int err;
3384 	struct iwx_rxba_data *rxba = NULL;
3385 	uint8_t baid = 0;
3386 
3387 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3388 		return;
3389 	}
3390 
3391 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3392 		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3393 		    timeout_val, start, &baid);
3394 	} else {
3395 		panic("sta_rx_agg unsupported hw");
3396 	}
3397 	if (err) {
3398 		DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3399 		return;
3400 	} else
3401 		DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3402 
3403 	rxba = &sc->sc_rxba_data[baid];
3404 
3405 	/* Deaggregation is done in hardware. */
3406 	if (start) {
3407 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3408 			return;
3409 		}
3410 		rxba->sta_id = IWX_STATION_ID;
3411 		rxba->tid = tid;
3412 		rxba->baid = baid;
3413 		rxba->timeout = timeout_val;
3414 		getmicrouptime(&rxba->last_rx);
3415 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3416 		    winsize);
3417 		if (timeout_val != 0) {
3418 			DPRINTF(("%s: timeout_val != 0\n", __func__));
3419 			return;
3420 		}
3421 	} else
3422 		iwx_clear_reorder_buffer(sc, rxba);
3423 
3424 	if (start) {
3425 		sc->sc_rx_ba_sessions++;
3426 	} else if (sc->sc_rx_ba_sessions > 0)
3427 		sc->sc_rx_ba_sessions--;
3428 }
3429 
3430 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3431 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3432     uint8_t tid)
3433 {
3434 	int err, qid;
3435 
3436 	qid = sc->aggqid[tid];
3437 	if (qid == 0) {
3438 		/* Firmware should pick the next unused Tx queue. */
3439 		qid = fls(sc->qenablemsk);
3440 	}
3441 
3442 	DPRINTF(("%s: qid=%i\n", __func__, qid));
3443 
3444 	/*
3445 	 * Simply enable the queue.
3446 	 * Firmware handles Tx Ba session setup and teardown.
3447 	 */
3448 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3449 		if (!iwx_nic_lock(sc)) {
3450 			return;
3451 		}
3452 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3453 		    IWX_TX_RING_COUNT);
3454 		iwx_nic_unlock(sc);
3455 		if (err) {
3456 			printf("%s: could not enable Tx queue %d "
3457 			    "(error %d)\n", DEVNAME(sc), qid, err);
3458 			return;
3459 		}
3460 	}
3461 	ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3462 	DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3463 	sc->aggqid[tid] = qid;
3464 }
3465 
3466 static void
iwx_ba_rx_task(void * arg,int npending __unused)3467 iwx_ba_rx_task(void *arg, int npending __unused)
3468 {
3469 	struct iwx_softc *sc = arg;
3470 	struct ieee80211com *ic = &sc->sc_ic;
3471 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3472 	struct ieee80211_node *ni = vap->iv_bss;
3473 	int tid;
3474 
3475 	IWX_LOCK(sc);
3476 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3477 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3478 			break;
3479 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3480 			struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3481 			DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3482 			    ba->ba_flags));
3483 			if (ba->ba_flags == IWX_BA_DONE) {
3484 				DPRINTF(("%s: ampdu for tid %i already added\n",
3485 				    __func__, tid));
3486 				break;
3487 			}
3488 
3489 			DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3490 			    tid));
3491 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3492 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3493 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3494 			ba->ba_flags = IWX_BA_DONE;
3495 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3496 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3497 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3498 		}
3499 	}
3500 	IWX_UNLOCK(sc);
3501 }
3502 
3503 static void
iwx_ba_tx_task(void * arg,int npending __unused)3504 iwx_ba_tx_task(void *arg, int npending __unused)
3505 {
3506 	struct iwx_softc *sc = arg;
3507 	struct ieee80211com *ic = &sc->sc_ic;
3508 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3509 	struct ieee80211_node *ni = vap->iv_bss;
3510 	int tid;
3511 
3512 	IWX_LOCK(sc);
3513 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3514 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3515 			break;
3516 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3517 			DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
3518 			    tid));
3519 			iwx_sta_tx_agg_start(sc, ni, tid);
3520 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3521 			sc->sc_flags |= IWX_FLAG_AMPDUTX;
3522 		}
3523 	}
3524 
3525 	IWX_UNLOCK(sc);
3526 }
3527 
3528 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3529 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3530 {
3531 	uint32_t mac_addr0, mac_addr1;
3532 
3533 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3534 
3535 	if (!iwx_nic_lock(sc))
3536 		return;
3537 
3538 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3539 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3540 
3541 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3542 
3543 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3544 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3545 		iwx_nic_unlock(sc);
3546 		return;
3547 	}
3548 
3549 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3550 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3551 
3552 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3553 
3554 	iwx_nic_unlock(sc);
3555 }
3556 
3557 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3558 iwx_is_valid_mac_addr(const uint8_t *addr)
3559 {
3560 	static const uint8_t reserved_mac[] = {
3561 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3562 	};
3563 
3564 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3565 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3566 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3567 	    !ETHER_IS_MULTICAST(addr));
3568 }
3569 
3570 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3571 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3572 {
3573 	const uint8_t *hw_addr;
3574 
3575 	hw_addr = (const uint8_t *)&mac_addr0;
3576 	dest[0] = hw_addr[3];
3577 	dest[1] = hw_addr[2];
3578 	dest[2] = hw_addr[1];
3579 	dest[3] = hw_addr[0];
3580 
3581 	hw_addr = (const uint8_t *)&mac_addr1;
3582 	dest[4] = hw_addr[1];
3583 	dest[5] = hw_addr[0];
3584 }
3585 
3586 static int
iwx_nvm_get(struct iwx_softc * sc)3587 iwx_nvm_get(struct iwx_softc *sc)
3588 {
3589 	struct iwx_nvm_get_info cmd = {};
3590 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3591 	struct iwx_host_cmd hcmd = {
3592 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3593 		.data = { &cmd, },
3594 		.len = { sizeof(cmd) },
3595 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3596 		    IWX_NVM_GET_INFO)
3597 	};
3598 	int err = 0;
3599 	uint32_t mac_flags;
3600 	/*
3601 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3602 	 * in v3, except for the channel profile part of the
3603 	 * regulatory.  So we can just access the new struct, with the
3604 	 * exception of the latter.
3605 	 */
3606 	struct iwx_nvm_get_info_rsp *rsp;
3607 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3608 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3609 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3610 
3611 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3612 	err = iwx_send_cmd(sc, &hcmd);
3613 	if (err) {
3614 		printf("%s: failed to send cmd (error %d)", __func__, err);
3615 		return err;
3616 	}
3617 
3618 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3619 		printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3620 		    iwx_rx_packet_payload_len(hcmd.resp_pkt));
3621 		printf("%s: resp_len=%zu\n", __func__, resp_len);
3622 		err = EIO;
3623 		goto out;
3624 	}
3625 
3626 	memset(nvm, 0, sizeof(*nvm));
3627 
3628 	iwx_set_mac_addr_from_csr(sc, nvm);
3629 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3630 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3631 		err = EINVAL;
3632 		goto out;
3633 	}
3634 
3635 	rsp = (void *)hcmd.resp_pkt->data;
3636 
3637 	/* Initialize general data */
3638 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3639 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3640 
3641 	/* Initialize MAC sku data */
3642 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3643 	nvm->sku_cap_11ac_enable =
3644 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3645 	nvm->sku_cap_11n_enable =
3646 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3647 	nvm->sku_cap_11ax_enable =
3648 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3649 	nvm->sku_cap_band_24GHz_enable =
3650 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3651 	nvm->sku_cap_band_52GHz_enable =
3652 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3653 	nvm->sku_cap_mimo_disable =
3654 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3655 
3656 	/* Initialize PHY sku data */
3657 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3658 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3659 
3660 	if (le32toh(rsp->regulatory.lar_enabled) &&
3661 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3662 		nvm->lar_enabled = 1;
3663 	}
3664 
3665 	memcpy(&sc->sc_rsp_info, rsp, resp_len);
3666 	if (v4) {
3667 		sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3668 	} else {
3669 		sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3670 	}
3671 out:
3672 	iwx_free_resp(sc, &hcmd);
3673 	return err;
3674 }
3675 
3676 static int
iwx_load_firmware(struct iwx_softc * sc)3677 iwx_load_firmware(struct iwx_softc *sc)
3678 {
3679 	struct iwx_fw_sects *fws;
3680 	int err;
3681 
3682 	IWX_ASSERT_LOCKED(sc)
3683 
3684 	sc->sc_uc.uc_intr = 0;
3685 	sc->sc_uc.uc_ok = 0;
3686 
3687 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3688 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3689 		err = iwx_ctxt_info_gen3_init(sc, fws);
3690 	else
3691 		err = iwx_ctxt_info_init(sc, fws);
3692 	if (err) {
3693 		printf("%s: could not init context info\n", DEVNAME(sc));
3694 		return err;
3695 	}
3696 
3697 	/* wait for the firmware to load */
3698 	err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3699 	if (err || !sc->sc_uc.uc_ok) {
3700 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3701 		iwx_ctxt_info_free_paging(sc);
3702 	}
3703 
3704 	iwx_dma_contig_free(&sc->iml_dma);
3705 	iwx_ctxt_info_free_fw_img(sc);
3706 
3707 	if (!sc->sc_uc.uc_ok)
3708 		return EINVAL;
3709 
3710 	return err;
3711 }
3712 
3713 static int
iwx_start_fw(struct iwx_softc * sc)3714 iwx_start_fw(struct iwx_softc *sc)
3715 {
3716 	int err;
3717 
3718 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3719 
3720 	iwx_disable_interrupts(sc);
3721 
3722 	/* make sure rfkill handshake bits are cleared */
3723 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3724 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3725 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3726 
3727 	/* clear (again), then enable firmware load interrupt */
3728 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3729 
3730 	err = iwx_nic_init(sc);
3731 	if (err) {
3732 		printf("%s: unable to init nic\n", DEVNAME(sc));
3733 		return err;
3734 	}
3735 
3736 	iwx_enable_fwload_interrupt(sc);
3737 
3738 	return iwx_load_firmware(sc);
3739 }
3740 
3741 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3742 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3743     size_t len)
3744 {
3745 	const struct iwx_ucode_tlv *tlv;
3746 	uint32_t sha1 = 0;
3747 	uint16_t mac_type = 0, rf_id = 0;
3748 	uint8_t *pnvm_data = NULL, *tmp;
3749 	int hw_match = 0;
3750 	uint32_t size = 0;
3751 	int err;
3752 
3753 	while (len >= sizeof(*tlv)) {
3754 		uint32_t tlv_len, tlv_type;
3755 
3756 		len -= sizeof(*tlv);
3757 		tlv = (const void *)data;
3758 
3759 		tlv_len = le32toh(tlv->length);
3760 		tlv_type = le32toh(tlv->type);
3761 
3762 		if (len < tlv_len) {
3763 			printf("%s: invalid TLV len: %zd/%u\n",
3764 			    DEVNAME(sc), len, tlv_len);
3765 			err = EINVAL;
3766 			goto out;
3767 		}
3768 
3769 		data += sizeof(*tlv);
3770 
3771 		switch (tlv_type) {
3772 		case IWX_UCODE_TLV_PNVM_VERSION:
3773 			if (tlv_len < sizeof(uint32_t))
3774 				break;
3775 
3776 			sha1 = le32_to_cpup((const uint32_t *)data);
3777 			break;
3778 		case IWX_UCODE_TLV_HW_TYPE:
3779 			if (tlv_len < 2 * sizeof(uint16_t))
3780 				break;
3781 
3782 			if (hw_match)
3783 				break;
3784 
3785 			mac_type = le16_to_cpup((const uint16_t *)data);
3786 			rf_id = le16_to_cpup((const uint16_t *)(data +
3787 			    sizeof(uint16_t)));
3788 
3789 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3790 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3791 				hw_match = 1;
3792 			break;
3793 		case IWX_UCODE_TLV_SEC_RT: {
3794 			const struct iwx_pnvm_section *section;
3795 			uint32_t data_len;
3796 
3797 			section = (const void *)data;
3798 			data_len = tlv_len - sizeof(*section);
3799 
3800 			/* TODO: remove, this is a deprecated separator */
3801 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3802 				break;
3803 
3804 			tmp = malloc(size + data_len, M_DEVBUF,
3805 			    M_WAITOK | M_ZERO);
3806 			if (tmp == NULL) {
3807 				err = ENOMEM;
3808 				goto out;
3809 			}
3810 			// XXX:misha pnvm_data is NULL and size is 0 at first pass
3811 			memcpy(tmp, pnvm_data, size);
3812 			memcpy(tmp + size, section->data, data_len);
3813 			free(pnvm_data, M_DEVBUF);
3814 			pnvm_data = tmp;
3815 			size += data_len;
3816 			break;
3817 		}
3818 		case IWX_UCODE_TLV_PNVM_SKU:
3819 			/* New PNVM section started, stop parsing. */
3820 			goto done;
3821 		default:
3822 			break;
3823 		}
3824 
3825 		if (roundup(tlv_len, 4) > len)
3826 			break;
3827 		len -= roundup(tlv_len, 4);
3828 		data += roundup(tlv_len, 4);
3829 	}
3830 done:
3831 	if (!hw_match || size == 0) {
3832 		err = ENOENT;
3833 		goto out;
3834 	}
3835 
3836 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3837 	if (err) {
3838 		printf("%s: could not allocate DMA memory for PNVM\n",
3839 		    DEVNAME(sc));
3840 		err = ENOMEM;
3841 		goto out;
3842 	}
3843 	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3844 	iwx_ctxt_info_gen3_set_pnvm(sc);
3845 	sc->sc_pnvm_ver = sha1;
3846 out:
3847 	free(pnvm_data, M_DEVBUF);
3848 	return err;
3849 }
3850 
3851 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3852 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3853 {
3854 	const struct iwx_ucode_tlv *tlv;
3855 
3856 	while (len >= sizeof(*tlv)) {
3857 		uint32_t tlv_len, tlv_type;
3858 
3859 		len -= sizeof(*tlv);
3860 		tlv = (const void *)data;
3861 
3862 		tlv_len = le32toh(tlv->length);
3863 		tlv_type = le32toh(tlv->type);
3864 
3865 		if (len < tlv_len || roundup(tlv_len, 4) > len)
3866 			return EINVAL;
3867 
3868 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3869 			const struct iwx_sku_id *sku_id =
3870 				(const void *)(data + sizeof(*tlv));
3871 
3872 			data += sizeof(*tlv) + roundup(tlv_len, 4);
3873 			len -= roundup(tlv_len, 4);
3874 
3875 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3876 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3877 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3878 			    iwx_pnvm_handle_section(sc, data, len) == 0)
3879 				return 0;
3880 		} else {
3881 			data += sizeof(*tlv) + roundup(tlv_len, 4);
3882 			len -= roundup(tlv_len, 4);
3883 		}
3884 	}
3885 
3886 	return ENOENT;
3887 }
3888 
3889 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3890 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3891 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3892 {
3893 	struct iwx_prph_scratch *prph_scratch;
3894 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3895 
3896 	prph_scratch = sc->prph_scratch_dma.vaddr;
3897 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3898 
3899 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3900 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3901 
3902 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3903 }
3904 
3905 /*
3906  * Load platform-NVM (non-volatile-memory) data from the filesystem.
3907  * This data apparently contains regulatory information and affects device
3908  * channel configuration.
3909  * The SKU of AX210 devices tells us which PNVM file section is needed.
3910  * Pre-AX210 devices store NVM data onboard.
3911  */
3912 static int
iwx_load_pnvm(struct iwx_softc * sc)3913 iwx_load_pnvm(struct iwx_softc *sc)
3914 {
3915 	const int wait_flags = IWX_PNVM_COMPLETE;
3916 	int err = 0;
3917 	const struct firmware *pnvm;
3918 
3919 	if (sc->sc_sku_id[0] == 0 &&
3920 	    sc->sc_sku_id[1] == 0 &&
3921 	    sc->sc_sku_id[2] == 0)
3922 		return 0;
3923 
3924 	if (sc->sc_pnvm_name) {
3925 		if (sc->pnvm_dma.vaddr == NULL) {
3926 			IWX_UNLOCK(sc);
3927 			pnvm = firmware_get(sc->sc_pnvm_name);
3928 			if (pnvm == NULL) {
3929 				printf("%s: could not read %s (error %d)\n",
3930 				    DEVNAME(sc), sc->sc_pnvm_name, err);
3931 				IWX_LOCK(sc);
3932 				return EINVAL;
3933 			}
3934 			sc->sc_pnvm = pnvm;
3935 
3936 			err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3937 			IWX_LOCK(sc);
3938 			if (err && err != ENOENT) {
3939 				return EINVAL;
3940 			}
3941 		} else
3942 			iwx_ctxt_info_gen3_set_pnvm(sc);
3943 	}
3944 
3945 	if (!iwx_nic_lock(sc)) {
3946 		return EBUSY;
3947 	}
3948 
3949 	/*
3950 	 * If we don't have a platform NVM file simply ask firmware
3951 	 * to proceed without it.
3952 	 */
3953 
3954 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3955 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3956 
3957 	/* Wait for the pnvm complete notification from firmware. */
3958 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3959 		err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3960 		if (err)
3961 			break;
3962 	}
3963 
3964 	iwx_nic_unlock(sc);
3965 
3966 	return err;
3967 }
3968 
3969 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)3970 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3971 {
3972 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3973 		.valid = htole32(valid_tx_ant),
3974 	};
3975 
3976 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3977 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3978 }
3979 
3980 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)3981 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3982 {
3983 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3984 
3985 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3986 	phy_cfg_cmd.calib_control.event_trigger =
3987 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3988 	phy_cfg_cmd.calib_control.flow_trigger =
3989 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3990 
3991 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3992 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3993 }
3994 
3995 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)3996 iwx_send_dqa_cmd(struct iwx_softc *sc)
3997 {
3998 	struct iwx_dqa_enable_cmd dqa_cmd = {
3999 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4000 	};
4001 	uint32_t cmd_id;
4002 
4003 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4004 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4005 }
4006 
4007 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4008 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4009 {
4010 	int err;
4011 
4012 	IWX_UNLOCK(sc);
4013 	err = iwx_read_firmware(sc);
4014 	IWX_LOCK(sc);
4015 	if (err)
4016 		return err;
4017 
4018 	err = iwx_start_fw(sc);
4019 	if (err)
4020 		return err;
4021 
4022 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4023 		err = iwx_load_pnvm(sc);
4024 		if (err)
4025 			return err;
4026 	}
4027 
4028 	iwx_post_alive(sc);
4029 
4030 	return 0;
4031 }
4032 
4033 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4034 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4035 {
4036 	const int wait_flags = IWX_INIT_COMPLETE;
4037 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4038 	struct iwx_init_extended_cfg_cmd init_cfg = {
4039 		.init_flags = htole32(IWX_INIT_NVM),
4040 	};
4041 
4042 	int err;
4043 
4044 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4045 		printf("%s: radio is disabled by hardware switch\n",
4046 		    DEVNAME(sc));
4047 		return EPERM;
4048 	}
4049 
4050 	sc->sc_init_complete = 0;
4051 	err = iwx_load_ucode_wait_alive(sc);
4052 	if (err) {
4053 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4054 		return err;
4055 	} else {
4056 		IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4057 		    "%s: successfully loaded init firmware\n", __func__);
4058 	}
4059 
4060 	/*
4061 	 * Send init config command to mark that we are sending NVM
4062 	 * access commands
4063 	 */
4064 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4065 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4066 	if (err) {
4067 		printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4068 		    err);
4069 		return err;
4070 	}
4071 
4072 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4073 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4074 	if (err) {
4075 		return err;
4076 	}
4077 
4078 	/* Wait for the init complete notification from the firmware. */
4079 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4080 		err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4081 		if (err) {
4082 			DPRINTF(("%s: will return err=%d\n", __func__, err));
4083 			return err;
4084 		} else {
4085 			DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4086 			    __func__));
4087 		}
4088 	}
4089 
4090 	if (readnvm) {
4091 		err = iwx_nvm_get(sc);
4092 		DPRINTF(("%s: err=%d\n", __func__, err));
4093 		if (err) {
4094 			printf("%s: failed to read nvm (error %d)\n",
4095 			    DEVNAME(sc), err);
4096 			return err;
4097 		} else {
4098 			DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4099 		}
4100 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4101 	}
4102 	return 0;
4103 }
4104 
4105 static int
iwx_config_ltr(struct iwx_softc * sc)4106 iwx_config_ltr(struct iwx_softc *sc)
4107 {
4108 	struct iwx_ltr_config_cmd cmd = {
4109 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4110 	};
4111 
4112 	if (!sc->sc_ltr_enabled)
4113 		return 0;
4114 
4115 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4116 }
4117 
4118 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4119 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4120     bus_dma_segment_t *seg)
4121 {
4122 	struct iwx_rx_data *data = &ring->data[idx];
4123 
4124 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4125 		struct iwx_rx_transfer_desc *desc = ring->desc;
4126 		desc[idx].rbid = htole16(idx & 0xffff);
4127 		desc[idx].addr = htole64((*seg).ds_addr);
4128 		bus_dmamap_sync(ring->data_dmat, data->map,
4129 		    BUS_DMASYNC_PREWRITE);
4130 	} else {
4131 		((uint64_t *)ring->desc)[idx] =
4132 		    htole64((*seg).ds_addr);
4133 		bus_dmamap_sync(ring->data_dmat, data->map,
4134 		    BUS_DMASYNC_PREWRITE);
4135 	}
4136 }
4137 
4138 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4139 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4140 {
4141 	struct iwx_rx_ring *ring = &sc->rxq;
4142 	struct iwx_rx_data *data = &ring->data[idx];
4143 	struct mbuf *m;
4144 	int err;
4145 	int fatal = 0;
4146 	bus_dma_segment_t seg;
4147 	int nsegs;
4148 
4149 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4150 	if (m == NULL)
4151 		return ENOBUFS;
4152 
4153 	if (data->m != NULL) {
4154 		bus_dmamap_unload(ring->data_dmat, data->map);
4155 		fatal = 1;
4156 	}
4157 
4158 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4159 	err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4160 	    &nsegs, BUS_DMA_NOWAIT);
4161 	if (err) {
4162 		/* XXX */
4163 		if (fatal)
4164 			panic("could not load RX mbuf");
4165 		m_freem(m);
4166 		return err;
4167 	}
4168 	data->m = m;
4169 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4170 
4171 	/* Update RX descriptor. */
4172 	iwx_update_rx_desc(sc, ring, idx, &seg);
4173 	return 0;
4174 }
4175 
4176 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4177 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4178     struct iwx_rx_mpdu_desc *desc)
4179 {
4180 	int energy_a, energy_b;
4181 
4182 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4183 		energy_a = desc->v3.energy_a;
4184 		energy_b = desc->v3.energy_b;
4185 	} else {
4186 		energy_a = desc->v1.energy_a;
4187 		energy_b = desc->v1.energy_b;
4188 	}
4189 	energy_a = energy_a ? -energy_a : -256;
4190 	energy_b = energy_b ? -energy_b : -256;
4191 	return MAX(energy_a, energy_b);
4192 }
4193 
4194 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4195 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4196     struct iwx_rx_data *data)
4197 {
4198 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4199 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4200 	int qid = cmd_hdr->qid;
4201 	struct iwx_tx_ring *ring = &sc->txq[qid];
4202 
4203 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4204 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4205 }
4206 
4207 /*
4208  * Retrieve the average noise (in dBm) among receivers.
4209  */
4210 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4211 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4212 {
4213 	int i, total, nbant, noise;
4214 
4215 	total = nbant = noise = 0;
4216 	for (i = 0; i < 3; i++) {
4217 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4218 		if (noise) {
4219 			total += noise;
4220 			nbant++;
4221 		}
4222 	}
4223 
4224 	/* There should be at least one antenna but check anyway. */
4225 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4226 }
4227 
4228 #if 0
4229 int
4230 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4231     struct ieee80211_rxinfo *rxi)
4232 {
4233 	struct ieee80211com *ic = &sc->sc_ic;
4234 	struct ieee80211_key *k;
4235 	struct ieee80211_frame *wh;
4236 	uint64_t pn, *prsc;
4237 	uint8_t *ivp;
4238 	uint8_t tid;
4239 	int hdrlen, hasqos;
4240 
4241 	wh = mtod(m, struct ieee80211_frame *);
4242 	hdrlen = ieee80211_get_hdrlen(wh);
4243 	ivp = (uint8_t *)wh + hdrlen;
4244 
4245 	/* find key for decryption */
4246 	k = ieee80211_get_rxkey(ic, m, ni);
4247 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4248 		return 1;
4249 
4250 	/* Check that ExtIV bit is be set. */
4251 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4252 		return 1;
4253 
4254 	hasqos = ieee80211_has_qos(wh);
4255 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4256 	prsc = &k->k_rsc[tid];
4257 
4258 	/* Extract the 48-bit PN from the CCMP header. */
4259 	pn = (uint64_t)ivp[0]       |
4260 	     (uint64_t)ivp[1] <<  8 |
4261 	     (uint64_t)ivp[4] << 16 |
4262 	     (uint64_t)ivp[5] << 24 |
4263 	     (uint64_t)ivp[6] << 32 |
4264 	     (uint64_t)ivp[7] << 40;
4265 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4266 		if (pn < *prsc) {
4267 			ic->ic_stats.is_ccmp_replays++;
4268 			return 1;
4269 		}
4270 	} else if (pn <= *prsc) {
4271 		ic->ic_stats.is_ccmp_replays++;
4272 		return 1;
4273 	}
4274 	/* Last seen packet number is updated in ieee80211_inputm(). */
4275 
4276 	/*
4277 	 * Some firmware versions strip the MIC, and some don't. It is not
4278 	 * clear which of the capability flags could tell us what to expect.
4279 	 * For now, keep things simple and just leave the MIC in place if
4280 	 * it is present.
4281 	 *
4282 	 * The IV will be stripped by ieee80211_inputm().
4283 	 */
4284 	return 0;
4285 }
4286 #endif
4287 
4288 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4289 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4290 {
4291 	struct ieee80211_frame *wh;
4292 	int ret = 0;
4293 	uint8_t type, subtype;
4294 
4295 	wh = mtod(m, struct ieee80211_frame *);
4296 
4297 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4298 	if (type == IEEE80211_FC0_TYPE_CTL) {
4299 		return 0;
4300 	}
4301 
4302 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4303 	if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4304 		return 0;
4305 	}
4306 
4307 
4308 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4309 	    IEEE80211_FC0_TYPE_CTL)
4310 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4311 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4312 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4313 			DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4314 			ret = 1;
4315 			goto out;
4316 		}
4317 		/* Check whether decryption was successful or not. */
4318 		if ((rx_pkt_status &
4319 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4320 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4321 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4322 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4323 			DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4324 			ret = 1;
4325 			goto out;
4326 		}
4327 	}
4328 	out:
4329 	return ret;
4330 }
4331 
4332 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4333 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4334     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4335     uint32_t device_timestamp, uint8_t rssi)
4336 {
4337 	struct ieee80211com *ic = &sc->sc_ic;
4338 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4339 	struct ieee80211_frame *wh;
4340 	struct ieee80211_node *ni;
4341 
4342 	/*
4343 	 * We need to turn the hardware provided channel index into a channel
4344 	 * and then find it in our ic_channels array
4345 	 */
4346 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4347 		/*
4348 		 * OpenBSD points this at the ibss chan, which it defaults to
4349 		 * channel 1 and then never touches again. Skip a step.
4350 		 */
4351 		printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4352 		chanidx = 1;
4353 	}
4354 
4355 	int channel = chanidx;
4356 	for (int i = 0; i < ic->ic_nchans; i++) {
4357 		if (ic->ic_channels[i].ic_ieee == channel) {
4358 			chanidx = i;
4359 		}
4360 	}
4361 	ic->ic_curchan = &ic->ic_channels[chanidx];
4362 
4363 	wh = mtod(m, struct ieee80211_frame *);
4364 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4365 
4366 #if 0	/* XXX hw decrypt */
4367 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4368 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4369 		m_freem(m);
4370 		ieee80211_release_node(ic, ni);
4371 		return;
4372 	}
4373 #endif
4374 	if (ieee80211_radiotap_active_vap(vap)) {
4375 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4376 		uint16_t chan_flags;
4377 		int have_legacy_rate = 1;
4378 		uint8_t mcs, rate;
4379 
4380 		tap->wr_flags = 0;
4381 		if (is_shortpre)
4382 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4383 		tap->wr_chan_freq =
4384 		    htole16(ic->ic_channels[chanidx].ic_freq);
4385 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4386 #if 0
4387 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4388 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4389 			chan_flags &= ~IEEE80211_CHAN_HT;
4390 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4391 		}
4392 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4393 			chan_flags &= ~IEEE80211_CHAN_VHT;
4394 #else
4395 		chan_flags &= ~IEEE80211_CHAN_HT;
4396 #endif
4397 		tap->wr_chan_flags = htole16(chan_flags);
4398 		tap->wr_dbm_antsignal = rssi;
4399 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4400 		tap->wr_tsft = device_timestamp;
4401 
4402 		if (sc->sc_rate_n_flags_version >= 2) {
4403 			uint32_t mod_type = (rate_n_flags &
4404 			    IWX_RATE_MCS_MOD_TYPE_MSK);
4405 			const struct ieee80211_rateset *rs = NULL;
4406 			uint32_t ridx;
4407 			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4408 			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4409 			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4410 			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4411 			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4412 				rs = &ieee80211_std_rateset_11b;
4413 			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4414 				rs = &ieee80211_std_rateset_11a;
4415 			if (rs && ridx < rs->rs_nrates) {
4416 				rate = (rs->rs_rates[ridx] &
4417 				    IEEE80211_RATE_VAL);
4418 			} else
4419 				rate = 0;
4420 		} else {
4421 			have_legacy_rate = ((rate_n_flags &
4422 			    (IWX_RATE_MCS_HT_MSK_V1 |
4423 			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4424 			mcs = (rate_n_flags &
4425 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4426 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4427 			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4428 		}
4429 		if (!have_legacy_rate) {
4430 			tap->wr_rate = (0x80 | mcs);
4431 		} else {
4432 			switch (rate) {
4433 			/* CCK rates. */
4434 			case  10: tap->wr_rate =   2; break;
4435 			case  20: tap->wr_rate =   4; break;
4436 			case  55: tap->wr_rate =  11; break;
4437 			case 110: tap->wr_rate =  22; break;
4438 			/* OFDM rates. */
4439 			case 0xd: tap->wr_rate =  12; break;
4440 			case 0xf: tap->wr_rate =  18; break;
4441 			case 0x5: tap->wr_rate =  24; break;
4442 			case 0x7: tap->wr_rate =  36; break;
4443 			case 0x9: tap->wr_rate =  48; break;
4444 			case 0xb: tap->wr_rate =  72; break;
4445 			case 0x1: tap->wr_rate =  96; break;
4446 			case 0x3: tap->wr_rate = 108; break;
4447 			/* Unknown rate: should not happen. */
4448 			default:  tap->wr_rate =   0;
4449 			}
4450 			// XXX hack - this needs rebased with the new rate stuff anyway
4451 			tap->wr_rate = rate;
4452 		}
4453 	}
4454 
4455 	IWX_UNLOCK(sc);
4456 	if (ni == NULL) {
4457 		if (ieee80211_input_mimo_all(ic, m) == -1)
4458 			printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4459 	} else {
4460 
4461 		if (ieee80211_input_mimo(ni, m) == -1)
4462 			printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4463 		ieee80211_free_node(ni);
4464 	}
4465 	IWX_LOCK(sc);
4466 }
4467 
4468 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4469 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4470     size_t maxlen)
4471 {
4472 	struct ieee80211com *ic = &sc->sc_ic;
4473 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4474 	struct ieee80211_node *ni = vap->iv_bss;
4475 	struct ieee80211_key *k;
4476 	struct ieee80211_rx_stats rxs;
4477 	struct iwx_rx_mpdu_desc *desc;
4478 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4479 	int rssi;
4480 	uint8_t chanidx;
4481 	uint16_t phy_info;
4482 	size_t desc_size;
4483 	int pad = 0;
4484 
4485 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4486 		desc_size = sizeof(*desc);
4487 	else
4488 		desc_size = IWX_RX_DESC_SIZE_V1;
4489 
4490 	if (maxlen < desc_size) {
4491 		m_freem(m);
4492 		return; /* drop */
4493 	}
4494 
4495 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4496 
4497 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4498 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4499 		printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4500 		m_freem(m);
4501 		return; /* drop */
4502 	}
4503 
4504 	len = le16toh(desc->mpdu_len);
4505 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4506 		/* Allow control frames in monitor mode. */
4507 		if (len < sizeof(struct ieee80211_frame_cts)) {
4508 			m_freem(m);
4509 			return;
4510 		}
4511 
4512 	} else if (len < sizeof(struct ieee80211_frame)) {
4513 		m_freem(m);
4514 		return;
4515 	}
4516 	if (len > maxlen - desc_size) {
4517 		m_freem(m);
4518 		return;
4519 	}
4520 
4521 	// TODO: arithmetic on a pointer to void is a GNU extension
4522 	m->m_data = (char *)pktdata + desc_size;
4523 	m->m_pkthdr.len = m->m_len = len;
4524 
4525 	/* Account for padding following the frame header. */
4526 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4527 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4528 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4529 		if (type == IEEE80211_FC0_TYPE_CTL) {
4530 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4531 			case IEEE80211_FC0_SUBTYPE_CTS:
4532 				hdrlen = sizeof(struct ieee80211_frame_cts);
4533 				break;
4534 			case IEEE80211_FC0_SUBTYPE_ACK:
4535 				hdrlen = sizeof(struct ieee80211_frame_ack);
4536 				break;
4537 			default:
4538 				hdrlen = sizeof(struct ieee80211_frame_min);
4539 				break;
4540 			}
4541 		} else
4542 			hdrlen = ieee80211_hdrsize(wh);
4543 
4544 		if ((le16toh(desc->status) &
4545 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4546 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4547 			// CCMP header length
4548 			hdrlen += 8;
4549 		}
4550 
4551 		memmove(m->m_data + 2, m->m_data, hdrlen);
4552 		m_adj(m, 2);
4553 
4554 	}
4555 
4556 	if ((le16toh(desc->status) &
4557 	    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4558 	    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4559 		pad = 1;
4560 	}
4561 
4562 //	/*
4563 //	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4564 //	 * in place for each subframe. But it leaves the 'A-MSDU present'
4565 //	 * bit set in the frame header. We need to clear this bit ourselves.
4566 //	 * (XXX This workaround is not required on AX200/AX201 devices that
4567 //	 * have been tested by me, but it's unclear when this problem was
4568 //	 * fixed in the hardware. It definitely affects the 9k generation.
4569 //	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4570 //	 * to exist that we may eventually add support for.)
4571 //	 *
4572 //	 * And we must allow the same CCMP PN for subframes following the
4573 //	 * first subframe. Otherwise they would be discarded as replays.
4574 //	 */
4575 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4576 		DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4577 //		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4578 //		uint8_t subframe_idx = (desc->amsdu_info &
4579 //		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4580 //		if (subframe_idx > 0)
4581 //			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4582 //		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4583 //		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4584 //			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4585 //			    struct ieee80211_qosframe_addr4 *);
4586 //			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4587 //		} else if (ieee80211_has_qos(wh) &&
4588 //		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4589 //			struct ieee80211_qosframe *qwh = mtod(m,
4590 //			    struct ieee80211_qosframe *);
4591 //			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4592 //		}
4593 	}
4594 
4595 	/*
4596 	 * Verify decryption before duplicate detection. The latter uses
4597 	 * the TID supplied in QoS frame headers and this TID is implicitly
4598 	 * verified as part of the CCMP nonce.
4599 	 */
4600 	k = ieee80211_crypto_get_txkey(ni, m);
4601 	if (k != NULL &&
4602 	    (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4603 	    iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4604 		DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4605 		m_freem(m);
4606 		return;
4607 	}
4608 
4609 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4610 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
4611 		chanidx = desc->v3.channel;
4612 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4613 	} else {
4614 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
4615 		chanidx = desc->v1.channel;
4616 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4617 	}
4618 
4619 	phy_info = le16toh(desc->phy_info);
4620 
4621 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4622 	rssi = (0 - IWX_MIN_DBM) + rssi;		/* normalize */
4623 	rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM));	/* clip to max. 100% */
4624 
4625 	memset(&rxs, 0, sizeof(rxs));
4626 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4627 	rxs.r_flags |= IEEE80211_R_BAND;
4628 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4629 	rxs.r_flags |= IEEE80211_R_RSSI | IEEE80211_R_C_RSSI;
4630 	rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4631 
4632 	rxs.c_ieee = chanidx;
4633 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4634 	    chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4635 	rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4636 	rxs.c_rx_tsf = device_timestamp;
4637 	rxs.c_chain = chanidx;
4638 
4639 	/* rssi is in 1/2db units */
4640 	rxs.c_rssi = rssi * 2;
4641 	rxs.c_nf = sc->sc_noise;
4642 
4643 	if (pad) {
4644 		rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4645 		rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4646 	}
4647 
4648 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
4649 		printf("%s: ieee80211_add_rx_params failed\n", __func__);
4650 		return;
4651 	}
4652 
4653 	ieee80211_add_rx_params(m, &rxs);
4654 
4655 #if 0
4656 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4657 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4658 	    rate_n_flags, device_timestamp, &rxi, ml))
4659 		return;
4660 #endif
4661 
4662 	if (pad) {
4663 #define TRIM 8
4664 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4665 		hdrlen = ieee80211_hdrsize(wh);
4666 		memmove(m->m_data + TRIM, m->m_data, hdrlen);
4667 		m_adj(m, TRIM);
4668 #undef TRIM
4669 	}
4670 
4671 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4672 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4673 	    rate_n_flags, device_timestamp, rssi);
4674 }
4675 
4676 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4677 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4678 {
4679 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
4680 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4681 	int i;
4682 
4683 	/* First TB is never cleared - it is bidirectional DMA data. */
4684 	for (i = 1; i < num_tbs; i++) {
4685 		struct iwx_tfh_tb *tb = &desc->tbs[i];
4686 		memset(tb, 0, sizeof(*tb));
4687 	}
4688 	desc->num_tbs = htole16(1);
4689 
4690 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4691 	    BUS_DMASYNC_PREWRITE);
4692 }
4693 
4694 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4695 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4696     struct iwx_tx_data *txd)
4697 {
4698 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4699 	bus_dmamap_unload(ring->data_dmat, txd->map);
4700 
4701 	ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4702 	txd->m = NULL;
4703 	txd->in = NULL;
4704 }
4705 
4706 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4707 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4708 {
4709 	struct iwx_tx_data *txd;
4710 
4711 	while (ring->tail_hw != idx) {
4712 		txd = &ring->data[ring->tail];
4713 		if (txd->m != NULL) {
4714 			iwx_clear_tx_desc(sc, ring, ring->tail);
4715 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4716 			iwx_txd_done(sc, ring, txd);
4717 			ring->queued--;
4718 			if (ring->queued < 0)
4719 				panic("caught negative queue count");
4720 		}
4721 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4722 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4723 	}
4724 }
4725 
4726 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4727 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4728     struct iwx_rx_data *data)
4729 {
4730 	struct ieee80211com *ic = &sc->sc_ic;
4731 	struct ifnet *ifp = IC2IFP(ic);
4732 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4733 	int qid = cmd_hdr->qid, status, txfail;
4734 	struct iwx_tx_ring *ring = &sc->txq[qid];
4735 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4736 	uint32_t ssn;
4737 	uint32_t len = iwx_rx_packet_len(pkt);
4738 	int idx = cmd_hdr->idx;
4739 	struct iwx_tx_data *txd = &ring->data[idx];
4740 	struct mbuf *m = txd->m;
4741 
4742 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4743 
4744 	/* Sanity checks. */
4745 	if (sizeof(*tx_resp) > len)
4746 		return;
4747 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4748 		return;
4749 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4750 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
4751 		return;
4752 
4753 	sc->sc_tx_timer[qid] = 0;
4754 
4755 	if (tx_resp->frame_count > 1) /* A-MPDU */
4756 		return;
4757 
4758 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4759 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4760 	    status != IWX_TX_STATUS_DIRECT_DONE);
4761 
4762 #ifdef __not_yet__
4763 	/* TODO: Replace accounting below with ieee80211_tx_complete() */
4764 	ieee80211_tx_complete(&in->in_ni, m, txfail);
4765 #else
4766 	if (txfail)
4767 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4768 	else {
4769 		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4770 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4771 		if (m->m_flags & M_MCAST)
4772 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4773 	}
4774 #endif
4775 	/*
4776 	 * On hardware supported by iwx(4) the SSN counter corresponds
4777 	 * to a Tx ring index rather than a sequence number.
4778 	 * Frames up to this index (non-inclusive) can now be freed.
4779 	 */
4780 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4781 	ssn = le32toh(ssn);
4782 	if (ssn < sc->max_tfd_queue_size) {
4783 		iwx_txq_advance(sc, ring, ssn);
4784 		iwx_clear_oactive(sc, ring);
4785 	}
4786 }
4787 
4788 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4789 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4790 {
4791 	if (ring->queued < iwx_lomark) {
4792 		sc->qfullmsk &= ~(1 << ring->qid);
4793 		if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4794 			/*
4795 			 * Well, we're in interrupt context, but then again
4796 			 * I guess net80211 does all sorts of stunts in
4797 			 * interrupt context, so maybe this is no biggie.
4798 			 */
4799 			iwx_start(sc);
4800 		}
4801 	}
4802 }
4803 
4804 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4805 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4806 {
4807 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4808 	struct ieee80211com *ic = &sc->sc_ic;
4809 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4810 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
4811 	struct ieee80211_node *ni = &in->in_ni;
4812 	struct iwx_tx_ring *ring;
4813 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4814 	int qid;
4815 
4816 //	if (ic->ic_state != IEEE80211_S_RUN)
4817 //		return;
4818 
4819 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4820 		return;
4821 
4822 	if (ba_res->sta_id != IWX_STATION_ID)
4823 		return;
4824 
4825 	in = (void *)ni;
4826 
4827 	tfd_cnt = le16toh(ba_res->tfd_cnt);
4828 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4829 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4830 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4831 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
4832 		return;
4833 
4834 	for (i = 0; i < tfd_cnt; i++) {
4835 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4836 		uint8_t tid;
4837 
4838 		tid = ba_tfd->tid;
4839 		if (tid >= nitems(sc->aggqid))
4840 			continue;
4841 
4842 		qid = sc->aggqid[tid];
4843 		if (qid != htole16(ba_tfd->q_num))
4844 			continue;
4845 
4846 		ring = &sc->txq[qid];
4847 
4848 #if 0
4849 		ba = &ni->ni_tx_ba[tid];
4850 		if (ba->ba_state != IEEE80211_BA_AGREED)
4851 			continue;
4852 #endif
4853 		idx = le16toh(ba_tfd->tfd_index);
4854 		sc->sc_tx_timer[qid] = 0;
4855 		iwx_txq_advance(sc, ring, idx);
4856 		iwx_clear_oactive(sc, ring);
4857 	}
4858 }
4859 
4860 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4861 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4862     struct iwx_rx_data *data)
4863 {
4864 	struct ieee80211com *ic = &sc->sc_ic;
4865 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4866 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4867 	uint32_t missed;
4868 
4869 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4870 	    (vap->iv_state != IEEE80211_S_RUN))
4871 		return;
4872 
4873 	bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4874 	    BUS_DMASYNC_POSTREAD);
4875 
4876 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4877 	if (missed > vap->iv_bmissthreshold) {
4878 		ieee80211_beacon_miss(ic);
4879 	}
4880 
4881 }
4882 
4883 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4884 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4885 {
4886 	struct iwx_binding_cmd cmd;
4887 	struct ieee80211com *ic = &sc->sc_ic;
4888 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4889 	struct iwx_vap *ivp = IWX_VAP(vap);
4890 	struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4891 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4892 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4893 	uint32_t status;
4894 
4895 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4896 		panic("binding already added");
4897 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4898 		panic("binding already removed");
4899 
4900 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4901 		return EINVAL;
4902 
4903 	memset(&cmd, 0, sizeof(cmd));
4904 
4905 	cmd.id_and_color
4906 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4907 	cmd.action = htole32(action);
4908 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4909 
4910 	cmd.macs[0] = htole32(mac_id);
4911 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4912 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4913 
4914 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4915 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4916 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4917 	else
4918 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4919 
4920 	status = 0;
4921 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4922 	    &cmd, &status);
4923 	if (err == 0 && status != 0)
4924 		err = EIO;
4925 
4926 	return err;
4927 }
4928 
4929 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4930 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4931 {
4932 	int ctlchan = ieee80211_chan2ieee(ic, chan);
4933 	int midpoint = chan->ic_vht_ch_freq1;
4934 
4935 	/*
4936 	 * The FW is expected to check the control channel position only
4937 	 * when in HT/VHT and the channel width is not 20MHz. Return
4938 	 * this value as the default one:
4939 	 */
4940 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4941 
4942 	switch (ctlchan - midpoint) {
4943 	case -6:
4944 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4945 		break;
4946 	case -2:
4947 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4948 		break;
4949 	case 2:
4950 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4951 		break;
4952 	case 6:
4953 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4954 		break;
4955 	default:
4956 		break;
4957 	}
4958 
4959 	return pos;
4960 }
4961 
4962 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)4963 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4964     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4965     uint8_t vht_chan_width, int cmdver)
4966 {
4967 	struct ieee80211com *ic = &sc->sc_ic;
4968 	struct iwx_phy_context_cmd_uhb cmd;
4969 	uint8_t active_cnt, idle_cnt;
4970 	struct ieee80211_channel *chan = ctxt->channel;
4971 
4972 	memset(&cmd, 0, sizeof(cmd));
4973 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4974 	    ctxt->color));
4975 	cmd.action = htole32(action);
4976 
4977 	if (IEEE80211_IS_CHAN_2GHZ(chan) ||
4978 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4979 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4980 	else
4981 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4982 
4983 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4984 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4985 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4986 
4987 	if (IEEE80211_IS_CHAN_VHT80(chan)) {
4988 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
4989 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
4990 	} else if (IEEE80211_IS_CHAN_HT40(chan)) {
4991 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4992 		if (IEEE80211_IS_CHAN_HT40D(chan))
4993 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4994 		else
4995 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4996 	} else {
4997 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4998 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4999 	}
5000 
5001 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5002 	    IWX_RLC_CONFIG_CMD) != 2) {
5003 		idle_cnt = chains_static;
5004 		active_cnt = chains_dynamic;
5005 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5006 		    IWX_PHY_RX_CHAIN_VALID_POS);
5007 		cmd.rxchain_info |= htole32(idle_cnt <<
5008 		    IWX_PHY_RX_CHAIN_CNT_POS);
5009 		cmd.rxchain_info |= htole32(active_cnt <<
5010 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5011 	}
5012 
5013 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5014 }
5015 
5016 #if 0
5017 int
5018 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5019     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5020     uint8_t vht_chan_width, int cmdver)
5021 {
5022 	struct ieee80211com *ic = &sc->sc_ic;
5023 	struct iwx_phy_context_cmd cmd;
5024 	uint8_t active_cnt, idle_cnt;
5025 	struct ieee80211_channel *chan = ctxt->channel;
5026 
5027 	memset(&cmd, 0, sizeof(cmd));
5028 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5029 	    ctxt->color));
5030 	cmd.action = htole32(action);
5031 
5032 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5033 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5034 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5035 	else
5036 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5037 
5038 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5039 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5040 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5041 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5042 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5043 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5044 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5045 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5046 			/* secondary chan above -> control chan below */
5047 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5048 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5049 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5050 			/* secondary chan below -> control chan above */
5051 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5052 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5053 		} else {
5054 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5055 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5056 		}
5057 	} else {
5058 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5059 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5060 	}
5061 
5062 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5063 	    IWX_RLC_CONFIG_CMD) != 2) {
5064 		idle_cnt = chains_static;
5065 		active_cnt = chains_dynamic;
5066 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5067 		    IWX_PHY_RX_CHAIN_VALID_POS);
5068 		cmd.rxchain_info |= htole32(idle_cnt <<
5069 		    IWX_PHY_RX_CHAIN_CNT_POS);
5070 		cmd.rxchain_info |= htole32(active_cnt <<
5071 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5072 	}
5073 
5074 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5075 }
5076 #endif
5077 
5078 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5079 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5080     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5081     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5082 {
5083 	int cmdver;
5084 
5085 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5086 	if (cmdver != 3 && cmdver != 4) {
5087 		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5088 		    DEVNAME(sc));
5089 		return ENOTSUP;
5090 	}
5091 
5092 	/*
5093 	 * Intel increased the size of the fw_channel_info struct and neglected
5094 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5095 	 * member in the middle.
5096 	 * To keep things simple we use a separate function to handle the larger
5097 	 * variant of the phy context command.
5098 	 */
5099 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5100 		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5101 		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5102 	} else
5103 		panic("Unsupported old hardware contact thj@");
5104 
5105 #if 0
5106 	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5107 	    action, sco, vht_chan_width, cmdver);
5108 #endif
5109 }
5110 
5111 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5112 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5113 {
5114 #ifdef IWX_DEBUG
5115         iwx_bbl_add_entry(hcmd->id, IWX_BBL_CMD_TX, ticks);
5116 #endif
5117 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5118 	struct iwx_tfh_tfd *desc;
5119 	struct iwx_tx_data *txdata;
5120 	struct iwx_device_cmd *cmd;
5121 	struct mbuf *m;
5122 	bus_addr_t paddr;
5123 	uint64_t addr;
5124 	int err = 0, i, paylen, off/*, s*/;
5125 	int idx, code, async, group_id;
5126 	size_t hdrlen, datasz;
5127 	uint8_t *data;
5128 	int generation = sc->sc_generation;
5129 	bus_dma_segment_t seg[10];
5130 	int nsegs;
5131 
5132 	code = hcmd->id;
5133 	async = hcmd->flags & IWX_CMD_ASYNC;
5134 	idx = ring->cur;
5135 
5136 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5137 		paylen += hcmd->len[i];
5138 	}
5139 
5140 	/* If this command waits for a response, allocate response buffer. */
5141 	hcmd->resp_pkt = NULL;
5142 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5143 		uint8_t *resp_buf;
5144 		KASSERT(!async, ("async command want response"));
5145 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5146 		    ("wrong pkt len 1"));
5147 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5148 		    ("wrong pkt len 2"));
5149 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5150 			return ENOSPC;
5151 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5152 		    M_NOWAIT | M_ZERO);
5153 		if (resp_buf == NULL)
5154 			return ENOMEM;
5155 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5156 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5157 	} else {
5158 		sc->sc_cmd_resp_pkt[idx] = NULL;
5159 	}
5160 
5161 	desc = &ring->desc[idx];
5162 	txdata = &ring->data[idx];
5163 
5164 	/*
5165 	 * XXX Intel inside (tm)
5166 	 * Firmware API versions >= 50 reject old-style commands in
5167 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5168 	 * that such commands were in the LONG_GROUP instead in order
5169 	 * for firmware to accept them.
5170 	 */
5171 	if (iwx_cmd_groupid(code) == 0) {
5172 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5173 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5174 	} else
5175 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5176 
5177 	group_id = iwx_cmd_groupid(code);
5178 
5179 	hdrlen = sizeof(cmd->hdr_wide);
5180 	datasz = sizeof(cmd->data_wide);
5181 
5182 	if (paylen > datasz) {
5183 		/* Command is too large to fit in pre-allocated space. */
5184 		size_t totlen = hdrlen + paylen;
5185 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5186 			printf("%s: firmware command too long (%zd bytes)\n",
5187 			    DEVNAME(sc), totlen);
5188 			err = EINVAL;
5189 			goto out;
5190 		}
5191 		if (totlen > IWX_RBUF_SIZE)
5192 			panic("totlen > IWX_RBUF_SIZE");
5193 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5194 		if (m == NULL) {
5195 			printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5196 			    DEVNAME(sc), IWX_RBUF_SIZE);
5197 			err = ENOMEM;
5198 			goto out;
5199 		}
5200 		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5201 		err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5202 		    seg, &nsegs, BUS_DMA_NOWAIT);
5203 		if (nsegs > 20)
5204 			panic("nsegs > 20");
5205 		DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5206 		if (err) {
5207 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5208 			    DEVNAME(sc), totlen);
5209 			m_freem(m);
5210 			goto out;
5211 		}
5212 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5213 		cmd = mtod(m, struct iwx_device_cmd *);
5214 		paddr = seg[0].ds_addr;
5215 	} else {
5216 		cmd = &ring->cmd[idx];
5217 		paddr = txdata->cmd_paddr;
5218 	}
5219 
5220 	memset(cmd, 0, sizeof(*cmd));
5221 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5222 	cmd->hdr_wide.group_id = group_id;
5223 	cmd->hdr_wide.qid = ring->qid;
5224 	cmd->hdr_wide.idx = idx;
5225 	cmd->hdr_wide.length = htole16(paylen);
5226 	cmd->hdr_wide.version = iwx_cmd_version(code);
5227 	data = cmd->data_wide;
5228 
5229 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5230 		if (hcmd->len[i] == 0)
5231 			continue;
5232 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5233 		off += hcmd->len[i];
5234 	}
5235 	KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5236 
5237 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5238 	addr = htole64(paddr);
5239 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5240 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5241 		DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5242 		    paylen));
5243 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5244 		    IWX_FIRST_TB_SIZE);
5245 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5246 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5247 		desc->num_tbs = htole16(2);
5248 	} else
5249 		desc->num_tbs = htole16(1);
5250 
5251 	if (paylen > datasz) {
5252 		bus_dmamap_sync(ring->data_dmat, txdata->map,
5253 		    BUS_DMASYNC_PREWRITE);
5254 	} else {
5255 		bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5256 		    BUS_DMASYNC_PREWRITE);
5257 	}
5258 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5259 	    BUS_DMASYNC_PREWRITE);
5260 
5261 	/* Kick command ring. */
5262 	ring->queued++;
5263 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5264 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5265 	DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5266 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5267 
5268 	if (!async) {
5269 		err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5270 		if (err == 0) {
5271 			/* if hardware is no longer up, return error */
5272 			if (generation != sc->sc_generation) {
5273 				err = ENXIO;
5274 				goto out;
5275 			}
5276 
5277 			/* Response buffer will be freed in iwx_free_resp(). */
5278 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5279 			sc->sc_cmd_resp_pkt[idx] = NULL;
5280 		} else if (generation == sc->sc_generation) {
5281 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5282 			sc->sc_cmd_resp_pkt[idx] = NULL;
5283 		}
5284 	}
5285 out:
5286 	return err;
5287 }
5288 
5289 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5290 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5291     uint16_t len, const void *data)
5292 {
5293 	struct iwx_host_cmd cmd = {
5294 		.id = id,
5295 		.len = { len, },
5296 		.data = { data, },
5297 		.flags = flags,
5298 	};
5299 
5300 	return iwx_send_cmd(sc, &cmd);
5301 }
5302 
5303 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5304 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5305     uint32_t *status)
5306 {
5307 	struct iwx_rx_packet *pkt;
5308 	struct iwx_cmd_response *resp;
5309 	int err, resp_len;
5310 
5311 	KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5312 	cmd->flags |= IWX_CMD_WANT_RESP;
5313 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5314 
5315 	err = iwx_send_cmd(sc, cmd);
5316 	if (err)
5317 		return err;
5318 
5319 	pkt = cmd->resp_pkt;
5320 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5321 		return EIO;
5322 
5323 	resp_len = iwx_rx_packet_payload_len(pkt);
5324 	if (resp_len != sizeof(*resp)) {
5325 		iwx_free_resp(sc, cmd);
5326 		return EIO;
5327 	}
5328 
5329 	resp = (void *)pkt->data;
5330 	*status = le32toh(resp->status);
5331 	iwx_free_resp(sc, cmd);
5332 	return err;
5333 }
5334 
5335 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5336 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5337     const void *data, uint32_t *status)
5338 {
5339 	struct iwx_host_cmd cmd = {
5340 		.id = id,
5341 		.len = { len, },
5342 		.data = { data, },
5343 	};
5344 
5345 	return iwx_send_cmd_status(sc, &cmd, status);
5346 }
5347 
5348 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5349 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5350 {
5351 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5352 	    ("hcmd flags !IWX_CMD_WANT_RESP"));
5353 	free(hcmd->resp_pkt, M_DEVBUF);
5354 	hcmd->resp_pkt = NULL;
5355 }
5356 
5357 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5358 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5359 {
5360 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5361 	struct iwx_tx_data *data;
5362 
5363 	if (qid != IWX_DQA_CMD_QUEUE) {
5364 		return;	/* Not a command ack. */
5365 	}
5366 
5367 	data = &ring->data[idx];
5368 
5369 	if (data->m != NULL) {
5370 		bus_dmamap_sync(ring->data_dmat, data->map,
5371 		    BUS_DMASYNC_POSTWRITE);
5372 		bus_dmamap_unload(ring->data_dmat, data->map);
5373 		m_freem(data->m);
5374 		data->m = NULL;
5375 	}
5376 	wakeup(&ring->desc[idx]);
5377 
5378 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5379 	if (ring->queued == 0) {
5380 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5381 			DEVNAME(sc), code));
5382 	} else if (ring->queued > 0)
5383 		ring->queued--;
5384 }
5385 
5386 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5387 iwx_fw_rateidx_ofdm(uint8_t rval)
5388 {
5389 	/* Firmware expects indices which match our 11a rate set. */
5390 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5391 	int i;
5392 
5393 	for (i = 0; i < rs->rs_nrates; i++) {
5394 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5395 			return i;
5396 	}
5397 
5398 	return 0;
5399 }
5400 
5401 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5402 iwx_fw_rateidx_cck(uint8_t rval)
5403 {
5404 	/* Firmware expects indices which match our 11b rate set. */
5405 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5406 	int i;
5407 
5408 	for (i = 0; i < rs->rs_nrates; i++) {
5409 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5410 			return i;
5411 	}
5412 
5413 	return 0;
5414 }
5415 
5416 static int
iwx_min_basic_rate(struct ieee80211com * ic)5417 iwx_min_basic_rate(struct ieee80211com *ic)
5418 {
5419 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5420 	struct ieee80211_node *ni = vap->iv_bss;
5421 	struct ieee80211_rateset *rs = &ni->ni_rates;
5422 	struct ieee80211_channel *c = ni->ni_chan;
5423 	int i, min, rval;
5424 
5425 	min = -1;
5426 
5427 	if (c == IEEE80211_CHAN_ANYC) {
5428 		printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5429 		return -1;
5430 	}
5431 
5432 	for (i = 0; i < rs->rs_nrates; i++) {
5433 		if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5434 			continue;
5435 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5436 		if (min == -1)
5437 			min = rval;
5438 		else if (rval < min)
5439 			min = rval;
5440 	}
5441 
5442 	/* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5443 	if (min == -1)
5444 		min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5445 
5446 	return min;
5447 }
5448 
5449 /*
5450  * Determine the Tx command flags and Tx rate+flags to use.
5451  * Return the selected Tx rate.
5452  */
5453 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5454 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5455     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5456     struct mbuf *m)
5457 {
5458 	struct ieee80211com *ic = &sc->sc_ic;
5459 	struct ieee80211_node *ni = &in->in_ni;
5460 	struct ieee80211_rateset *rs = &ni->ni_rates;
5461 	const struct iwx_rate *rinfo = NULL;
5462 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5463 	int ridx = iwx_min_basic_rate(ic);
5464 	int min_ridx, rate_flags;
5465 	uint8_t rval;
5466 
5467 	/* We're in the process of clearing the node, no channel already */
5468 	if (ridx == -1)
5469 		return NULL;
5470 
5471 	min_ridx = iwx_rval2ridx(ridx);
5472 
5473 	*flags = 0;
5474 
5475 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5476 	    type != IEEE80211_FC0_TYPE_DATA) {
5477 		/* for non-data, use the lowest supported rate */
5478 		ridx = min_ridx;
5479 		*flags |= IWX_TX_FLAGS_CMD_RATE;
5480 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5481 		ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5482 		    & ~IEEE80211_RATE_MCS];
5483 	} else {
5484 		rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5485 		    & IEEE80211_RATE_VAL);
5486 		ridx = iwx_rval2ridx(rval);
5487 		if (ridx < min_ridx)
5488 			ridx = min_ridx;
5489 	}
5490 
5491 	if (m->m_flags & M_EAPOL)
5492 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
5493 
5494 	rinfo = &iwx_rates[ridx];
5495 
5496 	/*
5497 	 * Do not fill rate_n_flags if firmware controls the Tx rate.
5498 	 * For data frames we rely on Tx rate scaling in firmware by default.
5499 	 */
5500 	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5501 		*rate_n_flags = 0;
5502 		return rinfo;
5503 	}
5504 
5505 	/*
5506 	 * Forcing a CCK/OFDM legacy rate is important for management frames.
5507 	 * Association will only succeed if we do this correctly.
5508 	 */
5509 
5510 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5511 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5512 	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5513 	if (IWX_RIDX_IS_CCK(ridx)) {
5514 		if (sc->sc_rate_n_flags_version >= 2)
5515 			rate_flags |= IWX_RATE_MCS_CCK_MSK;
5516 		else
5517 			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5518 	} else if (sc->sc_rate_n_flags_version >= 2)
5519 		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5520 
5521 	rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5522 	    & IEEE80211_RATE_VAL);
5523 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5524 	    rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5525 
5526 	if (sc->sc_rate_n_flags_version >= 2) {
5527 		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5528 			rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5529 			    IWX_RATE_LEGACY_RATE_MSK);
5530 		} else {
5531 			rate_flags |= (iwx_fw_rateidx_cck(rval) &
5532 			    IWX_RATE_LEGACY_RATE_MSK);
5533 		}
5534 	} else
5535 		rate_flags |= rinfo->plcp;
5536 
5537 	*rate_n_flags = rate_flags;
5538 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5539 	    __func__, __LINE__,*flags);
5540 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5541 	    __func__, __LINE__, *rate_n_flags);
5542 
5543 	if (sc->sc_debug & IWX_DEBUG_TXRATE)
5544 		print_ratenflags(__func__, __LINE__,
5545 		    *rate_n_flags, sc->sc_rate_n_flags_version);
5546 
5547 	return rinfo;
5548 }
5549 
5550 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5551 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5552     int idx, uint16_t byte_cnt, uint16_t num_tbs)
5553 {
5554 	uint8_t filled_tfd_size, num_fetch_chunks;
5555 	uint16_t len = byte_cnt;
5556 	uint16_t bc_ent;
5557 
5558 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5559 			  num_tbs * sizeof(struct iwx_tfh_tb);
5560 	/*
5561 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5562 	 * Dividing it by 64 will give the number of chunks to fetch
5563 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5564 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5565 	 * of the TFD are used, and only one chunk of 64 bytes should
5566 	 * be fetched
5567 	 */
5568 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5569 
5570 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5571 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5572 		/* Starting from AX210, the HW expects bytes */
5573 		bc_ent = htole16(len | (num_fetch_chunks << 14));
5574 		scd_bc_tbl[idx].tfd_offset = bc_ent;
5575 	} else {
5576 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5577 		/* Before AX210, the HW expects DW */
5578 		len = howmany(len, 4);
5579 		bc_ent = htole16(len | (num_fetch_chunks << 12));
5580 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
5581 	}
5582 
5583 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5584 }
5585 
5586 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5587 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5588 {
5589 	struct ieee80211com *ic = &sc->sc_ic;
5590 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5591 	struct iwx_node *in = (void *)ni;
5592 	struct iwx_tx_ring *ring;
5593 	struct iwx_tx_data *data;
5594 	struct iwx_tfh_tfd *desc;
5595 	struct iwx_device_cmd *cmd;
5596 	struct ieee80211_frame *wh;
5597 	struct ieee80211_key *k = NULL;
5598 	const struct iwx_rate *rinfo;
5599 	uint64_t paddr;
5600 	u_int hdrlen;
5601 	uint32_t rate_n_flags;
5602 	uint16_t num_tbs, flags, offload_assist = 0;
5603 	uint8_t type, subtype;
5604 	int i, totlen, err, pad, qid;
5605 #define IWM_MAX_SCATTER 20
5606 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5607 	int nsegs;
5608 	struct mbuf *m1;
5609 	size_t txcmd_size;
5610 
5611 	wh = mtod(m, struct ieee80211_frame *);
5612 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5613 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5614 	hdrlen = ieee80211_anyhdrsize(wh);
5615 
5616 	qid = sc->first_data_qid;
5617 
5618 	/* Put QoS frames on the data queue which maps to their TID. */
5619 	if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
5620 		uint16_t qos = ieee80211_gettid(wh);
5621 		uint8_t tid = qos & IEEE80211_QOS_TID;
5622 #if 0
5623 		/*
5624 		 * XXX-THJ: TODO when we enable ba we need to manage the
5625 		 * mappings
5626 		 */
5627 		struct ieee80211_tx_ba *ba;
5628 		ba = &ni->ni_tx_ba[tid];
5629 
5630 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5631 		    type == IEEE80211_FC0_TYPE_DATA &&
5632 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5633 		    subtype != IEEE80211_FC0_SUBTYPE_BAR &&
5634 		    sc->aggqid[tid] != 0  /*&&
5635 		    ba->ba_state == IEEE80211_BA_AGREED*/) {
5636 			qid = sc->aggqid[tid];
5637 #else
5638 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5639 		    type == IEEE80211_FC0_TYPE_DATA &&
5640 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5641 		    sc->aggqid[tid] != 0) {
5642 			qid = sc->aggqid[tid];
5643 #endif
5644 		}
5645 	}
5646 
5647 	ring = &sc->txq[qid];
5648 	desc = &ring->desc[ring->cur];
5649 	memset(desc, 0, sizeof(*desc));
5650 	data = &ring->data[ring->cur];
5651 
5652 	cmd = &ring->cmd[ring->cur];
5653 	cmd->hdr.code = IWX_TX_CMD;
5654 	cmd->hdr.flags = 0;
5655 	cmd->hdr.qid = ring->qid;
5656 	cmd->hdr.idx = ring->cur;
5657 
5658 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5659 	if (rinfo == NULL)
5660 		return EINVAL;
5661 
5662 	if (ieee80211_radiotap_active_vap(vap)) {
5663 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5664 
5665 		tap->wt_flags = 0;
5666 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5667 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5668 		tap->wt_rate = rinfo->rate;
5669 		if (k != NULL)
5670 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5671 		ieee80211_radiotap_tx(vap, m);
5672 	}
5673 
5674 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5675 		k = ieee80211_crypto_get_txkey(ni, m);
5676 		if (k == NULL) {
5677 			printf("%s: k is NULL!\n", __func__);
5678 			m_freem(m);
5679 			return (ENOBUFS);
5680 		} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5681 			k->wk_keytsc++;
5682 		} else {
5683 			k->wk_cipher->ic_encap(k, m);
5684 
5685 			/* 802.11 headers may have moved */
5686 			wh = mtod(m, struct ieee80211_frame *);
5687 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5688 		}
5689 	} else
5690 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5691 
5692 	totlen = m->m_pkthdr.len;
5693 
5694 	if (hdrlen & 3) {
5695 		/* First segment length must be a multiple of 4. */
5696 		pad = 4 - (hdrlen & 3);
5697 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5698 	} else
5699 		pad = 0;
5700 
5701 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5702 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5703 		memset(tx, 0, sizeof(*tx));
5704 		tx->len = htole16(totlen);
5705 		tx->offload_assist = htole32(offload_assist);
5706 		tx->flags = htole16(flags);
5707 		tx->rate_n_flags = htole32(rate_n_flags);
5708 		memcpy(tx->hdr, wh, hdrlen);
5709 		txcmd_size = sizeof(*tx);
5710 	} else {
5711 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5712 		memset(tx, 0, sizeof(*tx));
5713 		tx->len = htole16(totlen);
5714 		tx->offload_assist = htole16(offload_assist);
5715 		tx->flags = htole32(flags);
5716 		tx->rate_n_flags = htole32(rate_n_flags);
5717 		memcpy(tx->hdr, wh, hdrlen);
5718 		txcmd_size = sizeof(*tx);
5719 	}
5720 #if IWX_DEBUG
5721 	iwx_bbl_add_entry(totlen, IWX_BBL_PKT_TX, ticks);
5722 #endif
5723 
5724 	/* Trim 802.11 header. */
5725 	m_adj(m, hdrlen);
5726 
5727 	err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5728 	    &nsegs, BUS_DMA_NOWAIT);
5729 	if (err && err != EFBIG) {
5730 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5731 		m_freem(m);
5732 		return err;
5733 	}
5734 	if (err) {
5735 		/* Too many DMA segments, linearize mbuf. */
5736 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5737 		if (m1 == NULL) {
5738 			printf("%s: could not defrag mbufs\n", __func__);
5739 			m_freem(m);
5740 			return (ENOBUFS);
5741 		}
5742 		m = m1;
5743 		err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5744 		    segs, &nsegs, BUS_DMA_NOWAIT);
5745 		if (err) {
5746 			printf("%s: can't map mbuf (error %d)\n", __func__,
5747 			    err);
5748 			m_freem(m);
5749 			return (err);
5750 		}
5751 	}
5752 	data->m = m;
5753 	data->in = in;
5754 
5755 	/* Fill TX descriptor. */
5756 	num_tbs = 2 + nsegs;
5757 	desc->num_tbs = htole16(num_tbs);
5758 
5759 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5760 	paddr = htole64(data->cmd_paddr);
5761 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5762 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5763 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5764 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5765 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5766 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5767 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5768 
5769 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5770 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5771 
5772 	/* Other DMA segments are for data payload. */
5773 	for (i = 0; i < nsegs; i++) {
5774 		seg = &segs[i];
5775 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5776 		paddr = htole64(seg->ds_addr);
5777 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5778 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5779 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5780 	}
5781 
5782 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5783 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5784 	    BUS_DMASYNC_PREWRITE);
5785 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5786 	    BUS_DMASYNC_PREWRITE);
5787 
5788 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5789 
5790 	/* Kick TX ring. */
5791 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5792 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5793 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5794 
5795 	/* Mark TX ring as full if we reach a certain threshold. */
5796 	if (++ring->queued > iwx_himark) {
5797 		sc->qfullmsk |= 1 << ring->qid;
5798 	}
5799 
5800 	sc->sc_tx_timer[ring->qid] = 15;
5801 
5802 	return 0;
5803 }
5804 
5805 static int
5806 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5807 {
5808 	struct iwx_rx_packet *pkt;
5809 	struct iwx_tx_path_flush_cmd_rsp *resp;
5810 	struct iwx_tx_path_flush_cmd flush_cmd = {
5811 		.sta_id = htole32(sta_id),
5812 		.tid_mask = htole16(tids),
5813 	};
5814 	struct iwx_host_cmd hcmd = {
5815 		.id = IWX_TXPATH_FLUSH,
5816 		.len = { sizeof(flush_cmd), },
5817 		.data = { &flush_cmd, },
5818 		.flags = IWX_CMD_WANT_RESP,
5819 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5820 	};
5821 	int err, resp_len, i, num_flushed_queues;
5822 
5823 	err = iwx_send_cmd(sc, &hcmd);
5824 	if (err)
5825 		return err;
5826 
5827 	pkt = hcmd.resp_pkt;
5828 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5829 		err = EIO;
5830 		goto out;
5831 	}
5832 
5833 	resp_len = iwx_rx_packet_payload_len(pkt);
5834 	/* Some firmware versions don't provide a response. */
5835 	if (resp_len == 0)
5836 		goto out;
5837 	else if (resp_len != sizeof(*resp)) {
5838 		err = EIO;
5839 		goto out;
5840 	}
5841 
5842 	resp = (void *)pkt->data;
5843 
5844 	if (le16toh(resp->sta_id) != sta_id) {
5845 		err = EIO;
5846 		goto out;
5847 	}
5848 
5849 	num_flushed_queues = le16toh(resp->num_flushed_queues);
5850 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5851 		err = EIO;
5852 		goto out;
5853 	}
5854 
5855 	for (i = 0; i < num_flushed_queues; i++) {
5856 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5857 		uint16_t tid = le16toh(queue_info->tid);
5858 		uint16_t read_after = le16toh(queue_info->read_after_flush);
5859 		uint16_t qid = le16toh(queue_info->queue_num);
5860 		struct iwx_tx_ring *txq;
5861 
5862 		if (qid >= nitems(sc->txq))
5863 			continue;
5864 
5865 		txq = &sc->txq[qid];
5866 		if (tid != txq->tid)
5867 			continue;
5868 
5869 		iwx_txq_advance(sc, txq, read_after);
5870 	}
5871 out:
5872 	iwx_free_resp(sc, &hcmd);
5873 	return err;
5874 }
5875 
5876 #define IWX_FLUSH_WAIT_MS	2000
5877 
5878 static int
5879 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5880 {
5881 	struct iwx_add_sta_cmd cmd;
5882 	int err;
5883 	uint32_t status;
5884 
5885 	memset(&cmd, 0, sizeof(cmd));
5886 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5887 	    in->in_color));
5888 	cmd.sta_id = IWX_STATION_ID;
5889 	cmd.add_modify = IWX_STA_MODE_MODIFY;
5890 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5891 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5892 
5893 	status = IWX_ADD_STA_SUCCESS;
5894 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5895 	    sizeof(cmd), &cmd, &status);
5896 	if (err) {
5897 		printf("%s: could not update sta (error %d)\n",
5898 		    DEVNAME(sc), err);
5899 		return err;
5900 	}
5901 
5902 	switch (status & IWX_ADD_STA_STATUS_MASK) {
5903 	case IWX_ADD_STA_SUCCESS:
5904 		break;
5905 	default:
5906 		err = EIO;
5907 		printf("%s: Couldn't %s draining for station\n",
5908 		    DEVNAME(sc), drain ? "enable" : "disable");
5909 		break;
5910 	}
5911 
5912 	return err;
5913 }
5914 
5915 static int
5916 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5917 {
5918 	int err;
5919 
5920 	IWX_ASSERT_LOCKED(sc);
5921 
5922 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
5923 
5924 	err = iwx_drain_sta(sc, in, 1);
5925 	if (err)
5926 		goto done;
5927 
5928 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5929 	if (err) {
5930 		printf("%s: could not flush Tx path (error %d)\n",
5931 		    DEVNAME(sc), err);
5932 		goto done;
5933 	}
5934 
5935 	/*
5936 	 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5937 	 * fc drive rand has has been replaced in OpenBSD.
5938 	 */
5939 
5940 	err = iwx_drain_sta(sc, in, 0);
5941 done:
5942 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5943 	return err;
5944 }
5945 
5946 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
5947 
5948 static int
5949 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5950     struct iwx_beacon_filter_cmd *cmd)
5951 {
5952 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5953 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5954 }
5955 
5956 static int
5957 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5958 {
5959 	struct iwx_beacon_filter_cmd cmd = {
5960 		IWX_BF_CMD_CONFIG_DEFAULTS,
5961 		.bf_enable_beacon_filter = htole32(1),
5962 		.ba_enable_beacon_abort = htole32(enable),
5963 	};
5964 
5965 	if (!sc->sc_bf.bf_enabled)
5966 		return 0;
5967 
5968 	sc->sc_bf.ba_enabled = enable;
5969 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5970 }
5971 
5972 static void
5973 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5974     struct iwx_mac_power_cmd *cmd)
5975 {
5976 	struct ieee80211com *ic = &sc->sc_ic;
5977 	struct ieee80211_node *ni = &in->in_ni;
5978 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5979 	int dtim_period, dtim_msec, keep_alive;
5980 
5981 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5982 	    in->in_color));
5983 	if (vap->iv_dtim_period)
5984 		dtim_period = vap->iv_dtim_period;
5985 	else
5986 		dtim_period = 1;
5987 
5988 	/*
5989 	 * Regardless of power management state the driver must set
5990 	 * keep alive period. FW will use it for sending keep alive NDPs
5991 	 * immediately after association. Check that keep alive period
5992 	 * is at least 3 * DTIM.
5993 	 */
5994 	dtim_msec = dtim_period * ni->ni_intval;
5995 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5996 	keep_alive = roundup(keep_alive, 1000) / 1000;
5997 	cmd->keep_alive_seconds = htole16(keep_alive);
5998 
5999 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6000 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6001 }
6002 
6003 static int
6004 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6005 {
6006 	int err;
6007 	int ba_enable;
6008 	struct iwx_mac_power_cmd cmd;
6009 
6010 	memset(&cmd, 0, sizeof(cmd));
6011 
6012 	iwx_power_build_cmd(sc, in, &cmd);
6013 
6014 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6015 	    sizeof(cmd), &cmd);
6016 	if (err != 0)
6017 		return err;
6018 
6019 	ba_enable = !!(cmd.flags &
6020 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6021 	return iwx_update_beacon_abort(sc, in, ba_enable);
6022 }
6023 
6024 static int
6025 iwx_power_update_device(struct iwx_softc *sc)
6026 {
6027 	struct iwx_device_power_cmd cmd = { };
6028 	struct ieee80211com *ic = &sc->sc_ic;
6029 
6030 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6031 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6032 
6033 	return iwx_send_cmd_pdu(sc,
6034 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6035 }
6036 #if 0
6037 static int
6038 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6039 {
6040 	struct iwx_beacon_filter_cmd cmd = {
6041 		IWX_BF_CMD_CONFIG_DEFAULTS,
6042 		.bf_enable_beacon_filter = htole32(1),
6043 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6044 	};
6045 	int err;
6046 
6047 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6048 	if (err == 0)
6049 		sc->sc_bf.bf_enabled = 1;
6050 
6051 	return err;
6052 }
6053 #endif
6054 static int
6055 iwx_disable_beacon_filter(struct iwx_softc *sc)
6056 {
6057 	struct iwx_beacon_filter_cmd cmd;
6058 	int err;
6059 
6060 	memset(&cmd, 0, sizeof(cmd));
6061 
6062 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6063 	if (err == 0)
6064 		sc->sc_bf.bf_enabled = 0;
6065 
6066 	return err;
6067 }
6068 
6069 static int
6070 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6071 {
6072 	struct iwx_add_sta_cmd add_sta_cmd;
6073 	int err, i;
6074 	uint32_t status, aggsize;
6075 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6076 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6077 	struct ieee80211com *ic = &sc->sc_ic;
6078 	struct ieee80211_node *ni = &in->in_ni;
6079 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6080 
6081 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6082 		panic("STA already added");
6083 
6084 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6085 
6086 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6087 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6088 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6089 	} else {
6090 		add_sta_cmd.sta_id = IWX_STATION_ID;
6091 		add_sta_cmd.station_type = IWX_STA_LINK;
6092 	}
6093 	add_sta_cmd.mac_id_n_color
6094 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6095 	if (!update) {
6096 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6097 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6098 			    etheranyaddr);
6099 		else
6100 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6101 			    in->in_macaddr);
6102 	}
6103 	DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6104 	    ether_sprintf(add_sta_cmd.addr)));
6105 	add_sta_cmd.add_modify = update ? 1 : 0;
6106 	add_sta_cmd.station_flags_msk
6107 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6108 
6109 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6110 		add_sta_cmd.station_flags_msk
6111 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6112 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6113 
6114 		if (iwx_mimo_enabled(sc)) {
6115 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
6116 				add_sta_cmd.station_flags |=
6117 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6118 			} else {
6119 				int hasmimo = 0;
6120 				for (i = 0; i < htrs->rs_nrates; i++) {
6121 					if (htrs->rs_rates[i] > 7) {
6122 						hasmimo = 1;
6123 						break;
6124 					}
6125 				}
6126 				if (hasmimo) {
6127 					add_sta_cmd.station_flags |=
6128 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6129 				}
6130 			}
6131 		}
6132 
6133 		if (ni->ni_flags & IEEE80211_NODE_HT &&
6134 		    IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6135 			add_sta_cmd.station_flags |= htole32(
6136 			    IWX_STA_FLG_FAT_EN_40MHZ);
6137 		}
6138 
6139 
6140 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6141 			if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6142 				add_sta_cmd.station_flags |= htole32(
6143 				    IWX_STA_FLG_FAT_EN_80MHZ);
6144 			}
6145 			// XXX-misha: TODO get real ampdu size
6146 			aggsize = max_aggsize;
6147 		} else {
6148 			aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6149 			    IEEE80211_HTCAP_MAXRXAMPDU);
6150 		}
6151 
6152 		if (aggsize > max_aggsize)
6153 			aggsize = max_aggsize;
6154 		add_sta_cmd.station_flags |= htole32((aggsize <<
6155 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6156 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6157 
6158 		switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6159 		    IEEE80211_HTCAP_MPDUDENSITY)) {
6160 		case IEEE80211_HTCAP_MPDUDENSITY_2:
6161 			add_sta_cmd.station_flags
6162 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6163 			break;
6164 		case IEEE80211_HTCAP_MPDUDENSITY_4:
6165 			add_sta_cmd.station_flags
6166 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6167 			break;
6168 		case IEEE80211_HTCAP_MPDUDENSITY_8:
6169 			add_sta_cmd.station_flags
6170 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6171 			break;
6172 		case IEEE80211_HTCAP_MPDUDENSITY_16:
6173 			add_sta_cmd.station_flags
6174 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6175 			break;
6176 		default:
6177 			break;
6178 		}
6179 	}
6180 
6181 	status = IWX_ADD_STA_SUCCESS;
6182 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6183 	    &add_sta_cmd, &status);
6184 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6185 		err = EIO;
6186 
6187 	return err;
6188 }
6189 
6190 static int
6191 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6192 {
6193 	struct ieee80211com *ic = &sc->sc_ic;
6194 	struct iwx_rm_sta_cmd rm_sta_cmd;
6195 	int err;
6196 
6197 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6198 		panic("sta already removed");
6199 
6200 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6201 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6202 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6203 	else
6204 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6205 
6206 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6207 	    &rm_sta_cmd);
6208 
6209 	return err;
6210 }
6211 
6212 static int
6213 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6214 {
6215 	int err, i, cmd_ver;
6216 
6217 	err = iwx_flush_sta(sc, in);
6218 	if (err) {
6219 		printf("%s: could not flush Tx path (error %d)\n",
6220 		    DEVNAME(sc), err);
6221 		return err;
6222 	}
6223 
6224 	/*
6225 	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6226 	 * before a station gets removed.
6227 	 */
6228 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6229 	    IWX_SCD_QUEUE_CONFIG_CMD);
6230 	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6231 		err = iwx_disable_mgmt_queue(sc);
6232 		if (err)
6233 			return err;
6234 		for (i = IWX_FIRST_AGG_TX_QUEUE;
6235 		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
6236 			struct iwx_tx_ring *ring = &sc->txq[i];
6237 			if ((sc->qenablemsk & (1 << i)) == 0)
6238 				continue;
6239 			err = iwx_disable_txq(sc, IWX_STATION_ID,
6240 			    ring->qid, ring->tid);
6241 			if (err) {
6242 				printf("%s: could not disable Tx queue %d "
6243 				    "(error %d)\n", DEVNAME(sc), ring->qid,
6244 				    err);
6245 				return err;
6246 			}
6247 		}
6248 	}
6249 
6250 	err = iwx_rm_sta_cmd(sc, in);
6251 	if (err) {
6252 		printf("%s: could not remove STA (error %d)\n",
6253 		    DEVNAME(sc), err);
6254 		return err;
6255 	}
6256 
6257 	in->in_flags = 0;
6258 
6259 	sc->sc_rx_ba_sessions = 0;
6260 	sc->ba_rx.start_tidmask = 0;
6261 	sc->ba_rx.stop_tidmask = 0;
6262 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6263 	sc->ba_tx.start_tidmask = 0;
6264 	sc->ba_tx.stop_tidmask = 0;
6265 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6266 		sc->qenablemsk &= ~(1 << i);
6267 
6268 #if 0
6269 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6270 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6271 		if (ba->ba_state != IEEE80211_BA_AGREED)
6272 			continue;
6273 		ieee80211_delba_request(ic, ni, 0, 1, i);
6274 	}
6275 #endif
6276 	/* Clear ampdu rx state (GOS-1525) */
6277 	for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6278 		struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6279 		ba->ba_flags = 0;
6280 	}
6281 
6282 	return 0;
6283 }
6284 
6285 static uint8_t
6286 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6287     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6288     int n_ssids, uint32_t channel_cfg_flags)
6289 {
6290 	struct ieee80211com *ic = &sc->sc_ic;
6291 	struct ieee80211_scan_state *ss = ic->ic_scan;
6292 	struct ieee80211_channel *c;
6293 	uint8_t nchan;
6294 	int j;
6295 
6296 	for (nchan = j = 0;
6297 	    j < ss->ss_last &&
6298 	    nchan < sc->sc_capa_n_scan_channels;
6299 	    j++) {
6300 		uint8_t channel_num;
6301 
6302 		c = ss->ss_chans[j];
6303 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6304 		if (isset(sc->sc_ucode_api,
6305 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6306 			chan->v2.channel_num = channel_num;
6307 			if (IEEE80211_IS_CHAN_2GHZ(c))
6308 				chan->v2.band = IWX_PHY_BAND_24;
6309 			else
6310 				chan->v2.band = IWX_PHY_BAND_5;
6311 			chan->v2.iter_count = 1;
6312 			chan->v2.iter_interval = 0;
6313 		} else {
6314 			chan->v1.channel_num = channel_num;
6315 			chan->v1.iter_count = 1;
6316 			chan->v1.iter_interval = htole16(0);
6317 		}
6318 		chan->flags |= htole32(channel_cfg_flags);
6319 		chan++;
6320 		nchan++;
6321 	}
6322 
6323 	return nchan;
6324 }
6325 
6326 static int
6327 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6328 {
6329 	struct ieee80211com *ic = &sc->sc_ic;
6330 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6331 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6332 	struct ieee80211_rateset *rs;
6333 	size_t remain = sizeof(preq->buf);
6334 	uint8_t *frm, *pos;
6335 
6336 	memset(preq, 0, sizeof(*preq));
6337 
6338 	if (remain < sizeof(*wh) + 2)
6339 		return ENOBUFS;
6340 
6341 	/*
6342 	 * Build a probe request frame.  Most of the following code is a
6343 	 * copy & paste of what is done in net80211.
6344 	 */
6345 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6346 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6347 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6348 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6349 	IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6350 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6351 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6352 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6353 
6354 	frm = (uint8_t *)(wh + 1);
6355 	*frm++ = IEEE80211_ELEMID_SSID;
6356 	*frm++ = 0;
6357 	/* hardware inserts SSID */
6358 
6359 	/* Tell the firmware where the MAC header is. */
6360 	preq->mac_header.offset = 0;
6361 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6362 	remain -= frm - (uint8_t *)wh;
6363 
6364 	/* Fill in 2GHz IEs and tell firmware where they are. */
6365 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6366 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6367 		if (remain < 4 + rs->rs_nrates)
6368 			return ENOBUFS;
6369 	} else if (remain < 2 + rs->rs_nrates)
6370 		return ENOBUFS;
6371 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6372 	pos = frm;
6373 	frm = ieee80211_add_rates(frm, rs);
6374 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6375 		frm = ieee80211_add_xrates(frm, rs);
6376 	remain -= frm - pos;
6377 
6378 	if (isset(sc->sc_enabled_capa,
6379 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6380 		if (remain < 3)
6381 			return ENOBUFS;
6382 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6383 		*frm++ = 1;
6384 		*frm++ = 0;
6385 		remain -= 3;
6386 	}
6387 	preq->band_data[0].len = htole16(frm - pos);
6388 
6389 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6390 		/* Fill in 5GHz IEs. */
6391 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6392 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6393 			if (remain < 4 + rs->rs_nrates)
6394 				return ENOBUFS;
6395 		} else if (remain < 2 + rs->rs_nrates)
6396 			return ENOBUFS;
6397 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6398 		pos = frm;
6399 		frm = ieee80211_add_rates(frm, rs);
6400 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6401 			frm = ieee80211_add_xrates(frm, rs);
6402 		preq->band_data[1].len = htole16(frm - pos);
6403 		remain -= frm - pos;
6404 		if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6405 			if (remain < 14)
6406 				return ENOBUFS;
6407 			frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6408 			remain -= frm - pos;
6409 			preq->band_data[1].len = htole16(frm - pos);
6410 		}
6411 	}
6412 
6413 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6414 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6415 	pos = frm;
6416 	if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6417 		if (remain < 28)
6418 			return ENOBUFS;
6419 		frm = ieee80211_add_htcap(frm, vap->iv_bss);
6420 		/* XXX add WME info? */
6421 		remain -= frm - pos;
6422 	}
6423 
6424 	preq->common_data.len = htole16(frm - pos);
6425 
6426 	return 0;
6427 }
6428 
6429 static int
6430 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6431 {
6432 	struct iwx_scan_config scan_cfg;
6433 	struct iwx_host_cmd hcmd = {
6434 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6435 		.len[0] = sizeof(scan_cfg),
6436 		.data[0] = &scan_cfg,
6437 		.flags = 0,
6438 	};
6439 	int cmdver;
6440 
6441 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6442 		printf("%s: firmware does not support reduced scan config\n",
6443 		    DEVNAME(sc));
6444 		return ENOTSUP;
6445 	}
6446 
6447 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6448 
6449 	/*
6450 	 * SCAN_CFG version >= 5 implies that the broadcast
6451 	 * STA ID field is deprecated.
6452 	 */
6453 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6454 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6455 		scan_cfg.bcast_sta_id = 0xff;
6456 
6457 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6458 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6459 
6460 	return iwx_send_cmd(sc, &hcmd);
6461 }
6462 
6463 static uint16_t
6464 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6465 {
6466 	struct ieee80211com *ic = &sc->sc_ic;
6467 	struct ieee80211_scan_state *ss = ic->ic_scan;
6468 	uint16_t flags = 0;
6469 
6470 	if (ss->ss_nssid == 0) {
6471 		DPRINTF(("%s: Passive scan started\n", __func__));
6472 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6473 	}
6474 
6475 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6476 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6477 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6478 
6479 	return flags;
6480 }
6481 
6482 #define IWX_SCAN_DWELL_ACTIVE		10
6483 #define IWX_SCAN_DWELL_PASSIVE		110
6484 
6485 /* adaptive dwell max budget time [TU] for full scan */
6486 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6487 /* adaptive dwell max budget time [TU] for directed scan */
6488 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6489 /* adaptive dwell default high band APs number */
6490 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6491 /* adaptive dwell default low band APs number */
6492 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6493 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6494 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6495 /* adaptive dwell number of APs override for p2p friendly GO channels */
6496 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6497 /* adaptive dwell number of APs override for social channels */
6498 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6499 
6500 static void
6501 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6502     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6503 {
6504 	uint32_t suspend_time, max_out_time;
6505 	uint8_t active_dwell, passive_dwell;
6506 
6507 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6508 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6509 
6510 	general_params->adwell_default_social_chn =
6511 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6512 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6513 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6514 
6515 	if (bgscan)
6516 		general_params->adwell_max_budget =
6517 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6518 	else
6519 		general_params->adwell_max_budget =
6520 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6521 
6522 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6523 	if (bgscan) {
6524 		max_out_time = htole32(120);
6525 		suspend_time = htole32(120);
6526 	} else {
6527 		max_out_time = htole32(0);
6528 		suspend_time = htole32(0);
6529 	}
6530 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6531 		htole32(max_out_time);
6532 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6533 		htole32(suspend_time);
6534 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6535 		htole32(max_out_time);
6536 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6537 		htole32(suspend_time);
6538 
6539 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6540 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6541 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6542 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6543 }
6544 
6545 static void
6546 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6547     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6548 {
6549 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6550 
6551 	gp->flags = htole16(gen_flags);
6552 
6553 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6554 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6555 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6556 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6557 
6558 	gp->scan_start_mac_id = 0;
6559 }
6560 
6561 static void
6562 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6563     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6564     int n_ssid)
6565 {
6566 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6567 
6568 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6569 	    nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6570 
6571 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6572 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6573 }
6574 
6575 static int
6576 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6577 {
6578 	struct ieee80211com *ic = &sc->sc_ic;
6579 	struct ieee80211_scan_state *ss = ic->ic_scan;
6580 	struct iwx_host_cmd hcmd = {
6581 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6582 		.len = { 0, },
6583 		.data = { NULL, },
6584 		.flags = 0,
6585 	};
6586 	struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6587 	struct iwx_scan_req_params_v14 *scan_p;
6588 	int err, async = bgscan, n_ssid = 0;
6589 	uint16_t gen_flags;
6590 	uint32_t bitmap_ssid = 0;
6591 
6592 	IWX_ASSERT_LOCKED(sc);
6593 
6594 	bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6595 
6596 	scan_p = &cmd->scan_params;
6597 
6598 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6599 	cmd->uid = htole32(0);
6600 
6601 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6602 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6603 	    gen_flags, bgscan);
6604 
6605 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6606 	scan_p->periodic_params.schedule[0].iter_count = 1;
6607 
6608 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6609 	if (err) {
6610 		printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6611 		    err);
6612 		return err;
6613 	}
6614 
6615 	for (int i=0; i < ss->ss_nssid; i++) {
6616 		scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6617 		scan_p->probe_params.direct_scan[i].len =
6618 		    MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6619 		DPRINTF(("%s: Active scan started for ssid ", __func__));
6620 		memcpy(scan_p->probe_params.direct_scan[i].ssid,
6621 		    ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6622 		n_ssid++;
6623 		bitmap_ssid |= (1 << i);
6624 	}
6625 	DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6626 
6627 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6628 	    n_ssid);
6629 
6630 	hcmd.len[0] = sizeof(*cmd);
6631 	hcmd.data[0] = (void *)cmd;
6632 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6633 
6634 	err = iwx_send_cmd(sc, &hcmd);
6635 	return err;
6636 }
6637 
6638 static void
6639 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6640 {
6641 	char alpha2[3];
6642 
6643 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6644 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6645 
6646 	IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6647 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6648 
6649 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6650 }
6651 
6652 uint8_t
6653 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6654 {
6655 	int i;
6656 	uint8_t rval;
6657 
6658 	for (i = 0; i < rs->rs_nrates; i++) {
6659 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6660 		if (rval == iwx_rates[ridx].rate)
6661 			return rs->rs_rates[i];
6662 	}
6663 
6664 	return 0;
6665 }
6666 
6667 static int
6668 iwx_rval2ridx(int rval)
6669 {
6670 	int ridx;
6671 
6672 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6673 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6674 			continue;
6675 		if (rval == iwx_rates[ridx].rate)
6676 			break;
6677 	}
6678 
6679        return ridx;
6680 }
6681 
6682 static void
6683 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6684     int *ofdm_rates)
6685 {
6686 	struct ieee80211_node *ni = &in->in_ni;
6687 	struct ieee80211_rateset *rs = &ni->ni_rates;
6688 	int lowest_present_ofdm = -1;
6689 	int lowest_present_cck = -1;
6690 	uint8_t cck = 0;
6691 	uint8_t ofdm = 0;
6692 	int i;
6693 
6694 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6695 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6696 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6697 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6698 				continue;
6699 			cck |= (1 << i);
6700 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6701 				lowest_present_cck = i;
6702 		}
6703 	}
6704 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6705 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6706 			continue;
6707 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6708 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6709 			lowest_present_ofdm = i;
6710 	}
6711 
6712 	/*
6713 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6714 	 * variables. This isn't sufficient though, as there might not
6715 	 * be all the right rates in the bitmap. E.g. if the only basic
6716 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6717 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6718 	 *
6719 	 *    [...] a STA responding to a received frame shall transmit
6720 	 *    its Control Response frame [...] at the highest rate in the
6721 	 *    BSSBasicRateSet parameter that is less than or equal to the
6722 	 *    rate of the immediately previous frame in the frame exchange
6723 	 *    sequence ([...]) and that is of the same modulation class
6724 	 *    ([...]) as the received frame. If no rate contained in the
6725 	 *    BSSBasicRateSet parameter meets these conditions, then the
6726 	 *    control frame sent in response to a received frame shall be
6727 	 *    transmitted at the highest mandatory rate of the PHY that is
6728 	 *    less than or equal to the rate of the received frame, and
6729 	 *    that is of the same modulation class as the received frame.
6730 	 *
6731 	 * As a consequence, we need to add all mandatory rates that are
6732 	 * lower than all of the basic rates to these bitmaps.
6733 	 */
6734 
6735 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6736 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6737 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6738 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6739 	/* 6M already there or needed so always add */
6740 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6741 
6742 	/*
6743 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6744 	 * Note, however:
6745 	 *  - if no CCK rates are basic, it must be ERP since there must
6746 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6747 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6748 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6749 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6750 	 *  - if 2M is basic, 1M is mandatory
6751 	 *  - if 1M is basic, that's the only valid ACK rate.
6752 	 * As a consequence, it's not as complicated as it sounds, just add
6753 	 * any lower rates to the ACK rate bitmap.
6754 	 */
6755 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
6756 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6757 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
6758 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6759 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
6760 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6761 	/* 1M already there or needed so always add */
6762 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6763 
6764 	*cck_rates = cck;
6765 	*ofdm_rates = ofdm;
6766 }
6767 
6768 static void
6769 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6770     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6771 {
6772 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6773 	struct ieee80211com *ic = &sc->sc_ic;
6774 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6775 	struct ieee80211_node *ni = vap->iv_bss;
6776 	int cck_ack_rates, ofdm_ack_rates;
6777 
6778 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6779 	    in->in_color));
6780 	cmd->action = htole32(action);
6781 
6782 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
6783 		return;
6784 
6785 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6786 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6787 	else if (ic->ic_opmode == IEEE80211_M_STA)
6788 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6789 	else
6790 		panic("unsupported operating mode %d", ic->ic_opmode);
6791 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
6792 
6793 	IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6794 	DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6795 	    ether_sprintf(cmd->node_addr)));
6796 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6797 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6798 		return;
6799 	}
6800 
6801 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6802 	DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6803 	    ether_sprintf(cmd->bssid_addr)));
6804 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6805 	cmd->cck_rates = htole32(cck_ack_rates);
6806 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6807 
6808 	cmd->cck_short_preamble
6809 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6810 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6811 	cmd->short_slot
6812 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6813 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
6814 
6815 	struct chanAccParams chp;
6816 	ieee80211_wme_vap_getparams(vap, &chp);
6817 
6818 	for (int i = 0; i < WME_NUM_AC; i++) {
6819 		int txf = iwx_ac_to_tx_fifo[i];
6820 		cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6821 		cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6822 		cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6823 		cmd->ac[txf].fifos_mask = (1 << txf);
6824 		cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6825 
6826 		cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6827 	}
6828 
6829 	if (ni->ni_flags & IEEE80211_NODE_QOS) {
6830 		DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6831 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6832 	}
6833 
6834 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6835 		switch (vap->iv_curhtprotmode) {
6836 		case IEEE80211_HTINFO_OPMODE_PURE:
6837 			break;
6838 		case IEEE80211_HTINFO_OPMODE_PROTOPT:
6839 		case IEEE80211_HTINFO_OPMODE_MIXED:
6840 			cmd->protection_flags |=
6841 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6842 			    IWX_MAC_PROT_FLG_FAT_PROT);
6843 			break;
6844 		case IEEE80211_HTINFO_OPMODE_HT20PR:
6845 			if (in->in_phyctxt &&
6846 			    (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6847 			    in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6848 				cmd->protection_flags |=
6849 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6850 				    IWX_MAC_PROT_FLG_FAT_PROT);
6851 			}
6852 			break;
6853 		default:
6854 			break;
6855 		}
6856 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6857 		DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6858 	}
6859 
6860 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6861 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6862 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6863 #undef IWX_EXP2
6864 }
6865 
6866 static void
6867 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6868     struct iwx_mac_data_sta *sta, int assoc)
6869 {
6870 	struct ieee80211_node *ni = &in->in_ni;
6871 	struct ieee80211com *ic = &sc->sc_ic;
6872 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6873 	uint32_t dtim_off;
6874 	uint64_t tsf;
6875 	int dtim_period;
6876 
6877 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6878 	tsf = le64toh(ni->ni_tstamp.tsf);
6879 	dtim_period = vap->iv_dtim_period;
6880 
6881 	sta->is_assoc = htole32(assoc);
6882 
6883 	if (assoc) {
6884 		sta->dtim_time = htole32(tsf + dtim_off);
6885 		sta->dtim_tsf = htole64(tsf + dtim_off);
6886 		// XXX: unset in iwm
6887 		sta->assoc_beacon_arrive_time = 0;
6888 	}
6889 	sta->bi = htole32(ni->ni_intval);
6890 	sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6891 	sta->data_policy = htole32(0);
6892 	sta->listen_interval = htole32(10);
6893 	sta->assoc_id = htole32(ni->ni_associd);
6894 }
6895 
6896 static int
6897 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6898     int assoc)
6899 {
6900 	struct ieee80211com *ic = &sc->sc_ic;
6901 	struct ieee80211_node *ni = &in->in_ni;
6902 	struct iwx_mac_ctx_cmd cmd;
6903 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6904 
6905 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
6906 		panic("MAC already added");
6907 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6908 		panic("MAC already removed");
6909 
6910 	memset(&cmd, 0, sizeof(cmd));
6911 
6912 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6913 
6914 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6915 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6916 		    sizeof(cmd), &cmd);
6917 	}
6918 
6919 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6920 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6921 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6922 		    IWX_MAC_FILTER_ACCEPT_GRP |
6923 		    IWX_MAC_FILTER_IN_BEACON |
6924 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
6925 		    IWX_MAC_FILTER_IN_CRC32);
6926 	// XXX: dtim period is in vap
6927 	} else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6928 		/*
6929 		 * Allow beacons to pass through as long as we are not
6930 		 * associated or we do not have dtim period information.
6931 		 */
6932 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6933 	}
6934 	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6935 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6936 }
6937 
6938 static int
6939 iwx_clear_statistics(struct iwx_softc *sc)
6940 {
6941 	struct iwx_statistics_cmd scmd = {
6942 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6943 	};
6944 	struct iwx_host_cmd cmd = {
6945 		.id = IWX_STATISTICS_CMD,
6946 		.len[0] = sizeof(scmd),
6947 		.data[0] = &scmd,
6948 		.flags = IWX_CMD_WANT_RESP,
6949 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6950 	};
6951 	int err;
6952 
6953 	err = iwx_send_cmd(sc, &cmd);
6954 	if (err)
6955 		return err;
6956 
6957 	iwx_free_resp(sc, &cmd);
6958 	return 0;
6959 }
6960 
6961 static int
6962 iwx_scan(struct iwx_softc *sc)
6963 {
6964 	int err;
6965 	err = iwx_umac_scan_v14(sc, 0);
6966 
6967 	if (err) {
6968 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6969 		return err;
6970 	}
6971 	return 0;
6972 }
6973 
6974 static int
6975 iwx_bgscan(struct ieee80211com *ic)
6976 {
6977 	struct iwx_softc *sc = ic->ic_softc;
6978 	int err;
6979 
6980 	err = iwx_umac_scan_v14(sc, 1);
6981 	if (err) {
6982 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6983 		return err;
6984 	}
6985 	return 0;
6986 }
6987 
6988 static int
6989 iwx_enable_mgmt_queue(struct iwx_softc *sc)
6990 {
6991 	int err;
6992 
6993 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
6994 
6995 	/*
6996 	 * Non-QoS frames use the "MGMT" TID and queue.
6997 	 * Other TIDs and data queues are reserved for QoS data frames.
6998 	 */
6999 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7000 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
7001 	if (err) {
7002 		printf("%s: could not enable Tx queue %d (error %d)\n",
7003 		    DEVNAME(sc), sc->first_data_qid, err);
7004 		return err;
7005 	}
7006 
7007 	return 0;
7008 }
7009 
7010 static int
7011 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7012 {
7013 	int err, cmd_ver;
7014 
7015 	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7016 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7017 	    IWX_SCD_QUEUE_CONFIG_CMD);
7018 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7019 		return 0;
7020 
7021 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7022 
7023 	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7024 	    IWX_MGMT_TID);
7025 	if (err) {
7026 		printf("%s: could not disable Tx queue %d (error %d)\n",
7027 		    DEVNAME(sc), sc->first_data_qid, err);
7028 		return err;
7029 	}
7030 
7031 	return 0;
7032 }
7033 
7034 static int
7035 iwx_rs_rval2idx(uint8_t rval)
7036 {
7037 	/* Firmware expects indices which match our 11g rate set. */
7038 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7039 	int i;
7040 
7041 	for (i = 0; i < rs->rs_nrates; i++) {
7042 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7043 			return i;
7044 	}
7045 
7046 	return -1;
7047 }
7048 
7049 static uint16_t
7050 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7051 {
7052 	uint16_t htrates = 0;
7053 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7054 	int i;
7055 
7056 	if (rsidx == IEEE80211_HT_RATESET_SISO) {
7057 		for (i = 0; i < htrs->rs_nrates; i++) {
7058 			if (htrs->rs_rates[i] <= 7)
7059 				htrates |= (1 << htrs->rs_rates[i]);
7060 		}
7061 	} else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7062 		for (i = 0; i < htrs->rs_nrates; i++) {
7063 			if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7064 				htrates |= (1 << (htrs->rs_rates[i] - 8));
7065 		}
7066 	} else
7067 		panic(("iwx_rs_ht_rates"));
7068 
7069 	IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7070 	    "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7071 
7072 	return htrates;
7073 }
7074 
7075 uint16_t
7076 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7077 {
7078 	uint16_t rx_mcs;
7079 	int max_mcs = -1;
7080 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n)        (0x3 << (2*((n)-1)))
7081 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n)       (2*((n)-1))
7082 	rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7083 	    IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7084 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7085 
7086 	switch (rx_mcs) {
7087 	case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7088 		break;
7089 	case IEEE80211_VHT_MCS_SUPPORT_0_7:
7090 		max_mcs = 7;
7091 		break;
7092 	case IEEE80211_VHT_MCS_SUPPORT_0_8:
7093 		max_mcs = 8;
7094 		break;
7095 	case IEEE80211_VHT_MCS_SUPPORT_0_9:
7096 		/* Disable VHT MCS 9 for 20MHz-only stations. */
7097 		if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7098 			max_mcs = 8;
7099 		else
7100 			max_mcs = 9;
7101 		break;
7102 	default:
7103 		/* Should not happen; Values above cover the possible range. */
7104 		panic("invalid VHT Rx MCS value %u", rx_mcs);
7105 	}
7106 
7107 	return ((1 << (max_mcs + 1)) - 1);
7108 }
7109 
7110 static int
7111 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7112 {
7113 #if 1
7114 	panic("iwx: Trying to init rate set on untested version");
7115 #else
7116 	struct ieee80211_node *ni = &in->in_ni;
7117 	struct ieee80211_rateset *rs = &ni->ni_rates;
7118 	struct iwx_tlc_config_cmd_v3 cfg_cmd;
7119 	uint32_t cmd_id;
7120 	int i;
7121 	size_t cmd_size = sizeof(cfg_cmd);
7122 
7123 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7124 
7125 	for (i = 0; i < rs->rs_nrates; i++) {
7126 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7127 		int idx = iwx_rs_rval2idx(rval);
7128 		if (idx == -1)
7129 			return EINVAL;
7130 		cfg_cmd.non_ht_rates |= (1 << idx);
7131 	}
7132 
7133 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7134 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7135 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7136 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7137 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7138 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7139 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7140 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7141 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7142 		    htole16(iwx_rs_ht_rates(sc, ni,
7143 		    IEEE80211_HT_RATESET_SISO));
7144 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7145 		    htole16(iwx_rs_ht_rates(sc, ni,
7146 		    IEEE80211_HT_RATESET_MIMO2));
7147 	} else
7148 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7149 
7150 	cfg_cmd.sta_id = IWX_STATION_ID;
7151 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7152 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7153 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7154 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7155 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7156 	else
7157 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7158 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7159 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7160 		cfg_cmd.max_mpdu_len = htole16(3895);
7161 	else
7162 		cfg_cmd.max_mpdu_len = htole16(3839);
7163 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7164 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7165 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7166 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7167 		}
7168 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7169 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7170 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7171 		}
7172 	}
7173 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7174 	    ieee80211_node_supports_vht_sgi80(ni))
7175 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7176 
7177 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7178 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7179 #endif
7180 }
7181 
7182 static int
7183 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7184 {
7185 	struct ieee80211_node *ni = &in->in_ni;
7186 	struct ieee80211_rateset *rs = &ni->ni_rates;
7187 	struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7188 	struct iwx_tlc_config_cmd_v4 cfg_cmd;
7189 	uint32_t cmd_id;
7190 	int i;
7191 	int sgi80 = 0;
7192 	size_t cmd_size = sizeof(cfg_cmd);
7193 
7194 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7195 
7196 	for (i = 0; i < rs->rs_nrates; i++) {
7197 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7198 		int idx = iwx_rs_rval2idx(rval);
7199 		if (idx == -1)
7200 			return EINVAL;
7201 		cfg_cmd.non_ht_rates |= (1 << idx);
7202 	}
7203 	for (i = 0; i < htrs->rs_nrates; i++) {
7204 		DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7205 	}
7206 
7207 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7208 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7209 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7210 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7211 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7212 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7213 
7214 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7215 		    __func__, __LINE__,
7216 		    cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7217 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7218 		    __func__, __LINE__,
7219 		    cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7220 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7221 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7222 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7223 		    htole16(iwx_rs_ht_rates(sc, ni,
7224 		    IEEE80211_HT_RATESET_SISO));
7225 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7226 		    htole16(iwx_rs_ht_rates(sc, ni,
7227 		    IEEE80211_HT_RATESET_MIMO2));
7228 
7229 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7230 		    __func__, __LINE__,
7231 		    cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7232 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7233 		    __func__, __LINE__,
7234 		    cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7235 	} else
7236 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7237 
7238 	cfg_cmd.sta_id = IWX_STATION_ID;
7239 #if 0
7240 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7241 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7242 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7243 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7244 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7245 	else
7246 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7247 #endif
7248 	if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7249 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7250 	} else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7251 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7252 	} else {
7253 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7254 	}
7255 
7256 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7257 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7258 		cfg_cmd.max_mpdu_len = htole16(3895);
7259 	else
7260 		cfg_cmd.max_mpdu_len = htole16(3839);
7261 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7262 		if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7263 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7264 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7265 		}
7266 		if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7267 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7268 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7269 		}
7270 	}
7271 	sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7272 	    IEEE80211_VHTCAP_SHORT_GI_80);
7273 	if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7274 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7275 	}
7276 
7277 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7278 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7279 }
7280 
7281 static int
7282 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7283 {
7284 	int cmd_ver;
7285 
7286 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7287 	    IWX_TLC_MNG_CONFIG_CMD);
7288 	if (cmd_ver == 4)
7289 		return iwx_rs_init_v4(sc, in);
7290 	else
7291 		return iwx_rs_init_v3(sc, in);
7292 }
7293 
7294 static void
7295 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7296 {
7297 	struct ieee80211com *ic = &sc->sc_ic;
7298 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7299 	struct ieee80211_node *ni = (void *)vap->iv_bss;
7300 
7301 	struct ieee80211_rateset *rs = &ni->ni_rates;
7302 	uint32_t rate_n_flags;
7303 	uint8_t plcp, rval;
7304 	int i, cmd_ver, rate_n_flags_ver2 = 0;
7305 
7306 	if (notif->sta_id != IWX_STATION_ID ||
7307 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7308 		return;
7309 
7310 	rate_n_flags = le32toh(notif->rate);
7311 
7312 	if (sc->sc_debug & IWX_DEBUG_TXRATE)
7313 		print_ratenflags(__func__, __LINE__,
7314 		    rate_n_flags, sc->sc_rate_n_flags_version);
7315 
7316 	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7317 	    IWX_TLC_MNG_UPDATE_NOTIF);
7318 	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7319 		rate_n_flags_ver2 = 1;
7320 
7321 	if (rate_n_flags_ver2) {
7322 		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7323 		if (mod_type == IWX_RATE_MCS_HT_MSK) {
7324 
7325 			ieee80211_node_set_txrate_dot11rate(ni,
7326 				IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
7327 				IEEE80211_RATE_MCS);
7328 			IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7329 			    "%s:%d new MCS: %d rate_n_flags: %x\n",
7330 			    __func__, __LINE__,
7331 			    ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
7332 			    rate_n_flags);
7333 			return;
7334 		}
7335 	} else {
7336 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7337 			ieee80211_node_set_txrate_dot11rate(ni,
7338 			    rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7339 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
7340 
7341 			IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7342 			    "%s:%d new MCS idx: %d rate_n_flags: %x\n",
7343 			    __func__, __LINE__,
7344 			    ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
7345 			return;
7346 		}
7347 	}
7348 
7349 	if (rate_n_flags_ver2) {
7350 		const struct ieee80211_rateset *rs;
7351 		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7352 		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7353 			rs = &ieee80211_std_rateset_11a;
7354 		else
7355 			rs = &ieee80211_std_rateset_11b;
7356 		if (ridx < rs->rs_nrates)
7357 			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7358 		else
7359 			rval = 0;
7360 	} else {
7361 		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7362 
7363 		rval = 0;
7364 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7365 			if (iwx_rates[i].plcp == plcp) {
7366 				rval = iwx_rates[i].rate;
7367 				break;
7368 			}
7369 		}
7370 	}
7371 
7372 	if (rval) {
7373 		uint8_t rv;
7374 		for (i = 0; i < rs->rs_nrates; i++) {
7375 			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7376 			if (rv == rval) {
7377 				ieee80211_node_set_txrate_dot11rate(ni, i);
7378 				break;
7379 			}
7380 		}
7381 		IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7382 		    "%s:%d new rate %d\n", __func__, __LINE__,
7383 		    ieee80211_node_get_txrate_dot11rate(ni));
7384 	}
7385 }
7386 
7387 static int
7388 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7389     uint8_t chains_static, uint8_t chains_dynamic)
7390 {
7391 	struct iwx_rlc_config_cmd cmd;
7392 	uint32_t cmd_id;
7393 	uint8_t active_cnt, idle_cnt;
7394 
7395 	memset(&cmd, 0, sizeof(cmd));
7396 
7397 	idle_cnt = chains_static;
7398 	active_cnt = chains_dynamic;
7399 
7400 	cmd.phy_id = htole32(phyctxt->id);
7401 	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7402 	    IWX_PHY_RX_CHAIN_VALID_POS);
7403 	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7404 	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7405 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7406 
7407 	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7408 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7409 }
7410 
7411 static int
7412 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7413     struct ieee80211_channel *chan, uint8_t chains_static,
7414     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7415     uint8_t vht_chan_width)
7416 {
7417 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7418 	int err;
7419 
7420 	if (chan == IEEE80211_CHAN_ANYC) {
7421 		printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7422 		    DEVNAME(sc));
7423 		    return EIO;
7424 	}
7425 
7426 	if (isset(sc->sc_enabled_capa,
7427 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7428 	    (phyctxt->channel->ic_flags & band_flags) !=
7429 	    (chan->ic_flags & band_flags)) {
7430 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7431 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7432 		    vht_chan_width);
7433 		if (err) {
7434 			printf("%s: could not remove PHY context "
7435 			    "(error %d)\n", DEVNAME(sc), err);
7436 			return err;
7437 		}
7438 		phyctxt->channel = chan;
7439 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7440 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7441 		    vht_chan_width);
7442 		if (err) {
7443 			printf("%s: could not add PHY context "
7444 			    "(error %d)\n", DEVNAME(sc), err);
7445 			return err;
7446 		}
7447 	} else {
7448 		phyctxt->channel = chan;
7449 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7450 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7451 		    vht_chan_width);
7452 		if (err) {
7453 			printf("%s: could not update PHY context (error %d)\n",
7454 			    DEVNAME(sc), err);
7455 			return err;
7456 		}
7457 	}
7458 
7459 	phyctxt->sco = sco;
7460 	phyctxt->vht_chan_width = vht_chan_width;
7461 
7462 	DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7463 	    phyctxt->channel->ic_ieee));
7464 	DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7465 	DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7466 	    phyctxt->vht_chan_width));
7467 
7468 	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7469 	    IWX_RLC_CONFIG_CMD) == 2)
7470 		return iwx_phy_send_rlc(sc, phyctxt,
7471 		    chains_static, chains_dynamic);
7472 
7473 	return 0;
7474 }
7475 
7476 static int
7477 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7478 {
7479 	struct ieee80211com *ic = &sc->sc_ic;
7480 	struct iwx_node *in;
7481 	struct iwx_vap *ivp = IWX_VAP(vap);
7482 	struct ieee80211_node *ni;
7483 	uint32_t duration;
7484 	int generation = sc->sc_generation, err;
7485 
7486 	IWX_ASSERT_LOCKED(sc);
7487 
7488 	ni = ieee80211_ref_node(vap->iv_bss);
7489 	in = IWX_NODE(ni);
7490 
7491 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7492 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7493 		    ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7494 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7495 		if (err)
7496 			return err;
7497 	} else {
7498 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7499 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7500 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7501 		if (err)
7502 			return err;
7503 	}
7504 	ivp->phy_ctxt = &sc->sc_phyctxt[0];
7505 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7506 	DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7507 	    ether_sprintf(in->in_macaddr)));
7508 
7509 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7510 	if (err) {
7511 		printf("%s: could not add MAC context (error %d)\n",
7512 		    DEVNAME(sc), err);
7513 		return err;
7514  	}
7515 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7516 
7517 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7518 	if (err) {
7519 		printf("%s: could not add binding (error %d)\n",
7520 		    DEVNAME(sc), err);
7521 		goto rm_mac_ctxt;
7522 	}
7523 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7524 
7525 	err = iwx_add_sta_cmd(sc, in, 0);
7526 	if (err) {
7527 		printf("%s: could not add sta (error %d)\n",
7528 		    DEVNAME(sc), err);
7529 		goto rm_binding;
7530 	}
7531 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7532 
7533 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7534 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7535 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7536 		    IWX_TX_RING_COUNT);
7537 		if (err)
7538 			goto rm_sta;
7539 		return 0;
7540 	}
7541 
7542 	err = iwx_enable_mgmt_queue(sc);
7543 	if (err)
7544 		goto rm_sta;
7545 
7546 	err = iwx_clear_statistics(sc);
7547 	if (err)
7548 		goto rm_mgmt_queue;
7549 
7550 	/*
7551 	 * Prevent the FW from wandering off channel during association
7552 	 * by "protecting" the session with a time event.
7553 	 */
7554 	if (in->in_ni.ni_intval)
7555 		duration = in->in_ni.ni_intval * 9;
7556 	else
7557 		duration = 900;
7558 	return iwx_schedule_session_protection(sc, in, duration);
7559 
7560 rm_mgmt_queue:
7561 	if (generation == sc->sc_generation)
7562 		iwx_disable_mgmt_queue(sc);
7563 rm_sta:
7564 	if (generation == sc->sc_generation) {
7565 		iwx_rm_sta_cmd(sc, in);
7566 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7567 	}
7568 rm_binding:
7569 	if (generation == sc->sc_generation) {
7570 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7571 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7572 	}
7573 rm_mac_ctxt:
7574 	if (generation == sc->sc_generation) {
7575 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7576 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7577 	}
7578 	return err;
7579 }
7580 
7581 static int
7582 iwx_deauth(struct iwx_softc *sc)
7583 {
7584 	struct ieee80211com *ic = &sc->sc_ic;
7585 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7586 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7587 	int err;
7588 
7589 	IWX_ASSERT_LOCKED(sc);
7590 
7591 	iwx_unprotect_session(sc, in);
7592 
7593 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7594 		err = iwx_rm_sta(sc, in);
7595 		if (err)
7596 			return err;
7597 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7598 	}
7599 
7600 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7601 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7602 		if (err) {
7603 			printf("%s: could not remove binding (error %d)\n",
7604 			    DEVNAME(sc), err);
7605 			return err;
7606 		}
7607 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7608 	}
7609 
7610 	DPRINTF(("%s:  IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7611 	    IWX_FLAG_MAC_ACTIVE));
7612 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7613 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7614 		if (err) {
7615 			printf("%s: could not remove MAC context (error %d)\n",
7616 			    DEVNAME(sc), err);
7617 			return err;
7618 		}
7619 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7620 	}
7621 
7622 	/* Move unused PHY context to a default channel. */
7623 	//TODO uncommented in obsd, but stays on the way of auth->auth
7624 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7625 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7626 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7627 	if (err)
7628 		return err;
7629 
7630 	return 0;
7631 }
7632 
7633 static int
7634 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7635 {
7636 	struct ieee80211com *ic = &sc->sc_ic;
7637 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7638 	struct ieee80211_node *ni = &in->in_ni;
7639 	struct iwx_vap *ivp = IWX_VAP(vap);
7640 	int err;
7641 
7642 	IWX_ASSERT_LOCKED(sc);
7643 
7644 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7645 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7646 		uint8_t sco, vht_chan_width;
7647 			sco = IEEE80211_HTOP0_SCO_SCN;
7648 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7649 		    IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7650 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7651 		else
7652 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7653 		err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7654 		    ivp->phy_ctxt->channel, chains, chains,
7655 		    0, sco, vht_chan_width);
7656 		if (err) {
7657 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7658 			return err;
7659 		}
7660 	}
7661 
7662 	/* Update STA again to apply HT and VHT settings. */
7663 	err = iwx_add_sta_cmd(sc, in, 1);
7664 	if (err) {
7665 		printf("%s: could not update STA (error %d)\n",
7666 		    DEVNAME(sc), err);
7667 		return err;
7668 	}
7669 
7670 	/* We have now been assigned an associd by the AP. */
7671 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7672 	if (err) {
7673 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7674 		return err;
7675 	}
7676 
7677 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7678 	if (err) {
7679 		printf("%s: could not set sf full on (error %d)\n",
7680 		    DEVNAME(sc), err);
7681 		return err;
7682 	}
7683 
7684 	err = iwx_allow_mcast(sc);
7685 	if (err) {
7686 		printf("%s: could not allow mcast (error %d)\n",
7687 		    DEVNAME(sc), err);
7688 		return err;
7689 	}
7690 
7691 	err = iwx_power_update_device(sc);
7692 	if (err) {
7693 		printf("%s: could not send power command (error %d)\n",
7694 		    DEVNAME(sc), err);
7695 		return err;
7696 	}
7697 #ifdef notyet
7698 	/*
7699 	 * Disabled for now. Default beacon filter settings
7700 	 * prevent net80211 from getting ERP and HT protection
7701 	 * updates from beacons.
7702 	 */
7703 	err = iwx_enable_beacon_filter(sc, in);
7704 	if (err) {
7705 		printf("%s: could not enable beacon filter\n",
7706 		    DEVNAME(sc));
7707 		return err;
7708 	}
7709 #endif
7710 	err = iwx_power_mac_update_mode(sc, in);
7711 	if (err) {
7712 		printf("%s: could not update MAC power (error %d)\n",
7713 		    DEVNAME(sc), err);
7714 		return err;
7715 	}
7716 
7717 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7718 		return 0;
7719 
7720 	err = iwx_rs_init(sc, in);
7721 	if (err) {
7722 		printf("%s: could not init rate scaling (error %d)\n",
7723 		    DEVNAME(sc), err);
7724 		return err;
7725 	}
7726 
7727 	return 0;
7728 }
7729 
7730 static int
7731 iwx_run_stop(struct iwx_softc *sc)
7732 {
7733 	struct ieee80211com *ic = &sc->sc_ic;
7734 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7735 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
7736 	struct ieee80211_node *ni = &in->in_ni;
7737 	int err, i;
7738 
7739 	IWX_ASSERT_LOCKED(sc);
7740 
7741 	err = iwx_flush_sta(sc, in);
7742 	if (err) {
7743 		printf("%s: could not flush Tx path (error %d)\n",
7744 		    DEVNAME(sc), err);
7745 		return err;
7746 	}
7747 
7748 	/*
7749 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7750 	 * for this when moving out of RUN state since it runs in a
7751 	 * separate thread.
7752 	 * Note that in->in_ni (struct ieee80211_node) already represents
7753 	 * our new access point in case we are roaming between APs.
7754 	 * This means we cannot rely on struct ieee802111_node to tell
7755 	 * us which BA sessions exist.
7756 	 */
7757 	// TODO agg
7758 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7759 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7760 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7761 			continue;
7762 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7763 	}
7764 
7765 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7766 	if (err)
7767 		return err;
7768 
7769 	err = iwx_disable_beacon_filter(sc);
7770 	if (err) {
7771 		printf("%s: could not disable beacon filter (error %d)\n",
7772 		    DEVNAME(sc), err);
7773 		return err;
7774 	}
7775 
7776 	/* Mark station as disassociated. */
7777 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7778 	if (err) {
7779 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7780 		return err;
7781 	}
7782 
7783 	return 0;
7784 }
7785 
7786 static struct ieee80211_node *
7787 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7788 {
7789 	return malloc(sizeof (struct iwx_node), M_80211_NODE,
7790 	    M_NOWAIT | M_ZERO);
7791 }
7792 
7793 #if 0
7794 int
7795 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7796     struct ieee80211_key *k)
7797 {
7798 	struct iwx_softc *sc = ic->ic_softc;
7799 	struct iwx_node *in = (void *)ni;
7800 	struct iwx_setkey_task_arg *a;
7801 	int err;
7802 
7803 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7804 		/* Fallback to software crypto for other ciphers. */
7805 		err = ieee80211_set_key(ic, ni, k);
7806 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7807 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7808 		return err;
7809 	}
7810 
7811 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7812 		return ENOSPC;
7813 
7814 	a = &sc->setkey_arg[sc->setkey_cur];
7815 	a->sta_id = IWX_STATION_ID;
7816 	a->ni = ni;
7817 	a->k = k;
7818 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7819 	sc->setkey_nkeys++;
7820 	iwx_add_task(sc, systq, &sc->setkey_task);
7821 	return EBUSY;
7822 }
7823 
7824 int
7825 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7826     struct ieee80211_key *k)
7827 {
7828 	struct ieee80211com *ic = &sc->sc_ic;
7829 	struct iwx_node *in = (void *)ni;
7830 	struct iwx_add_sta_key_cmd cmd;
7831 	uint32_t status;
7832 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7833 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
7834 	int err;
7835 
7836 	/*
7837 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7838 	 * Currently we only implement station mode where 'ni' is always
7839 	 * ic->ic_bss so there is no need to validate arguments beyond this:
7840 	 */
7841 	KASSERT(ni == ic->ic_bss);
7842 
7843 	memset(&cmd, 0, sizeof(cmd));
7844 
7845 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7846 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
7847 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7848 	    IWX_STA_KEY_FLG_KEYID_MSK));
7849 	if (k->k_flags & IEEE80211_KEY_GROUP) {
7850 		cmd.common.key_offset = 1;
7851 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7852 	} else
7853 		cmd.common.key_offset = 0;
7854 
7855 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7856 	cmd.common.sta_id = sta_id;
7857 
7858 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7859 
7860 	status = IWX_ADD_STA_SUCCESS;
7861 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7862 	    &status);
7863 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7864 		return ECANCELED;
7865 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7866 		err = EIO;
7867 	if (err) {
7868 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7869 		    IEEE80211_REASON_AUTH_LEAVE);
7870 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7871 		return err;
7872 	}
7873 
7874 	if (k->k_flags & IEEE80211_KEY_GROUP)
7875 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7876 	else
7877 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7878 
7879 	if ((in->in_flags & want_keymask) == want_keymask) {
7880 		DPRINTF(("marking port %s valid\n",
7881 		    ether_sprintf(ni->ni_macaddr)));
7882 		ni->ni_port_valid = 1;
7883 		ieee80211_set_link_state(ic, LINK_STATE_UP);
7884 	}
7885 
7886 	return 0;
7887 }
7888 
7889 void
7890 iwx_setkey_task(void *arg)
7891 {
7892 	struct iwx_softc *sc = arg;
7893 	struct iwx_setkey_task_arg *a;
7894 	int err = 0, s = splnet();
7895 
7896 	while (sc->setkey_nkeys > 0) {
7897 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7898 			break;
7899 		a = &sc->setkey_arg[sc->setkey_tail];
7900 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7901 		a->sta_id = 0;
7902 		a->ni = NULL;
7903 		a->k = NULL;
7904 		sc->setkey_tail = (sc->setkey_tail + 1) %
7905 		    nitems(sc->setkey_arg);
7906 		sc->setkey_nkeys--;
7907 	}
7908 
7909 	refcnt_rele_wake(&sc->task_refs);
7910 	splx(s);
7911 }
7912 
7913 void
7914 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7915     struct ieee80211_key *k)
7916 {
7917 	struct iwx_softc *sc = ic->ic_softc;
7918 	struct iwx_add_sta_key_cmd cmd;
7919 
7920 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7921 		/* Fallback to software crypto for other ciphers. */
7922                 ieee80211_delete_key(ic, ni, k);
7923 		return;
7924 	}
7925 
7926 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7927 		return;
7928 
7929 	memset(&cmd, 0, sizeof(cmd));
7930 
7931 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7932 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7933 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7934 	    IWX_STA_KEY_FLG_KEYID_MSK));
7935 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7936 	if (k->k_flags & IEEE80211_KEY_GROUP)
7937 		cmd.common.key_offset = 1;
7938 	else
7939 		cmd.common.key_offset = 0;
7940 	cmd.common.sta_id = IWX_STATION_ID;
7941 
7942 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7943 }
7944 #endif
7945 
7946 static int
7947 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
7948 {
7949 	struct ieee80211com *ic = vap->iv_ic;
7950 	struct iwx_softc *sc = ic->ic_softc;
7951 	enum ieee80211_state ostate = vap->iv_state;
7952 	int err = 0;
7953 
7954 	IWX_LOCK(sc);
7955 
7956 	if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
7957 		switch (ostate) {
7958 		case IEEE80211_S_RUN:
7959 			err = iwx_run_stop(sc);
7960 			if (err)
7961 				goto out;
7962 			/* FALLTHROUGH */
7963 		case IEEE80211_S_ASSOC:
7964 		case IEEE80211_S_AUTH:
7965 			if (nstate <= IEEE80211_S_AUTH) {
7966 				err = iwx_deauth(sc);
7967 				if (err)
7968 					goto out;
7969 			}
7970 			/* FALLTHROUGH */
7971 		case IEEE80211_S_SCAN:
7972 		case IEEE80211_S_INIT:
7973 		default:
7974 			break;
7975 		}
7976 //
7977 //		/* Die now if iwx_stop() was called while we were sleeping. */
7978 //		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7979 //			refcnt_rele_wake(&sc->task_refs);
7980 //			splx(s);
7981 //			return;
7982 //		}
7983 	}
7984 
7985 	switch (nstate) {
7986 	case IEEE80211_S_INIT:
7987 		break;
7988 
7989 	case IEEE80211_S_SCAN:
7990 			break;
7991 
7992 	case IEEE80211_S_AUTH:
7993 		err = iwx_auth(vap, sc);
7994 		break;
7995 
7996 	case IEEE80211_S_ASSOC:
7997 		break;
7998 
7999 	case IEEE80211_S_RUN:
8000 		err = iwx_run(vap, sc);
8001 		break;
8002 	default:
8003 		break;
8004 	}
8005 
8006 out:
8007 	IWX_UNLOCK(sc);
8008 
8009 	return (err);
8010 }
8011 
8012 static int
8013 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8014 {
8015 	struct iwx_vap *ivp = IWX_VAP(vap);
8016 	struct ieee80211com *ic = vap->iv_ic;
8017 	enum ieee80211_state ostate = vap->iv_state;
8018 	int err;
8019 
8020 	/*
8021 	 * Prevent attempts to transition towards the same state, unless
8022 	 * we are scanning in which case a SCAN -> SCAN transition
8023 	 * triggers another scan iteration. And AUTH -> AUTH is needed
8024 	 * to support band-steering.
8025 	 */
8026 	if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8027 	    nstate != IEEE80211_S_AUTH)
8028 		return 0;
8029 	IEEE80211_UNLOCK(ic);
8030 	err = iwx_newstate_sub(vap, nstate);
8031 	IEEE80211_LOCK(ic);
8032 	if (err == 0)
8033 		err = ivp->iv_newstate(vap, nstate, arg);
8034 
8035 	return (err);
8036 }
8037 
8038 static void
8039 iwx_endscan(struct iwx_softc *sc)
8040 {
8041         struct ieee80211com *ic = &sc->sc_ic;
8042         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8043 
8044         if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8045                 return;
8046 
8047         sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8048 
8049         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8050         wakeup(&vap->iv_state); /* wake up iwx_newstate */
8051 }
8052 
8053 /*
8054  * Aging and idle timeouts for the different possible scenarios
8055  * in default configuration
8056  */
8057 static const uint32_t
8058 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8059 	{
8060 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8061 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8062 	},
8063 	{
8064 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8065 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8066 	},
8067 	{
8068 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8069 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8070 	},
8071 	{
8072 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8073 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8074 	},
8075 	{
8076 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8077 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8078 	},
8079 };
8080 
8081 /*
8082  * Aging and idle timeouts for the different possible scenarios
8083  * in single BSS MAC configuration.
8084  */
8085 static const uint32_t
8086 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8087 	{
8088 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8089 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8090 	},
8091 	{
8092 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8093 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8094 	},
8095 	{
8096 		htole32(IWX_SF_MCAST_AGING_TIMER),
8097 		htole32(IWX_SF_MCAST_IDLE_TIMER)
8098 	},
8099 	{
8100 		htole32(IWX_SF_BA_AGING_TIMER),
8101 		htole32(IWX_SF_BA_IDLE_TIMER)
8102 	},
8103 	{
8104 		htole32(IWX_SF_TX_RE_AGING_TIMER),
8105 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8106 	},
8107 };
8108 
8109 static void
8110 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8111     struct ieee80211_node *ni)
8112 {
8113 	int i, j, watermark;
8114 
8115 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8116 
8117 	/*
8118 	 * If we are in association flow - check antenna configuration
8119 	 * capabilities of the AP station, and choose the watermark accordingly.
8120 	 */
8121 	if (ni) {
8122 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8123 			struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8124 			int hasmimo = 0;
8125 			for (i = 0; i < htrs->rs_nrates; i++) {
8126 				if (htrs->rs_rates[i] > 7) {
8127 					hasmimo = 1;
8128 					break;
8129 				}
8130 			}
8131 			if (hasmimo)
8132 				watermark = IWX_SF_W_MARK_MIMO2;
8133 			else
8134 				watermark = IWX_SF_W_MARK_SISO;
8135 		} else {
8136 			watermark = IWX_SF_W_MARK_LEGACY;
8137 		}
8138 	/* default watermark value for unassociated mode. */
8139 	} else {
8140 		watermark = IWX_SF_W_MARK_MIMO2;
8141 	}
8142 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8143 
8144 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8145 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8146 			sf_cmd->long_delay_timeouts[i][j] =
8147 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8148 		}
8149 	}
8150 
8151 	if (ni) {
8152 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8153 		       sizeof(iwx_sf_full_timeout));
8154 	} else {
8155 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8156 		       sizeof(iwx_sf_full_timeout_def));
8157 	}
8158 
8159 }
8160 
8161 static int
8162 iwx_sf_config(struct iwx_softc *sc, int new_state)
8163 {
8164 	struct ieee80211com *ic = &sc->sc_ic;
8165 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8166 	struct ieee80211_node *ni = vap->iv_bss;
8167 	struct iwx_sf_cfg_cmd sf_cmd = {
8168 		.state = htole32(new_state),
8169 	};
8170 	int err = 0;
8171 
8172 	switch (new_state) {
8173 	case IWX_SF_UNINIT:
8174 	case IWX_SF_INIT_OFF:
8175 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8176 		break;
8177 	case IWX_SF_FULL_ON:
8178 		iwx_fill_sf_command(sc, &sf_cmd, ni);
8179 		break;
8180 	default:
8181 		return EINVAL;
8182 	}
8183 
8184 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8185 				   sizeof(sf_cmd), &sf_cmd);
8186 	return err;
8187 }
8188 
8189 static int
8190 iwx_send_bt_init_conf(struct iwx_softc *sc)
8191 {
8192 	struct iwx_bt_coex_cmd bt_cmd;
8193 
8194 	bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8195 
8196 	bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8197 	bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8198 	bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8199 
8200 
8201 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8202 	    &bt_cmd);
8203 }
8204 
8205 static int
8206 iwx_send_soc_conf(struct iwx_softc *sc)
8207 {
8208 	struct iwx_soc_configuration_cmd cmd;
8209 	int err;
8210 	uint32_t cmd_id, flags = 0;
8211 
8212 	memset(&cmd, 0, sizeof(cmd));
8213 
8214 	/*
8215 	 * In VER_1 of this command, the discrete value is considered
8216 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8217 	 * values in VER_1, this is backwards-compatible with VER_2,
8218 	 * as long as we don't set any other flag bits.
8219 	 */
8220 	if (!sc->sc_integrated) { /* VER_1 */
8221 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8222 	} else { /* VER_2 */
8223 		uint8_t scan_cmd_ver;
8224 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8225 			flags |= (sc->sc_ltr_delay &
8226 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8227 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8228 		    IWX_SCAN_REQ_UMAC);
8229 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8230 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8231 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8232 	}
8233 	cmd.flags = htole32(flags);
8234 
8235 	cmd.latency = htole32(sc->sc_xtal_latency);
8236 
8237 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8238 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8239 	if (err)
8240 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8241 	return err;
8242 }
8243 
8244 static int
8245 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8246 {
8247 	struct iwx_mcc_update_cmd mcc_cmd;
8248 	struct iwx_host_cmd hcmd = {
8249 		.id = IWX_MCC_UPDATE_CMD,
8250 		.flags = IWX_CMD_WANT_RESP,
8251 		.data = { &mcc_cmd },
8252 	};
8253 	struct iwx_rx_packet *pkt;
8254 	struct iwx_mcc_update_resp *resp;
8255 	size_t resp_len;
8256 	int err;
8257 
8258 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8259 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8260 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8261 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8262 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8263 	else
8264 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8265 
8266 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8267 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8268 
8269 	err = iwx_send_cmd(sc, &hcmd);
8270 	if (err)
8271 		return err;
8272 
8273 	pkt = hcmd.resp_pkt;
8274 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8275 		err = EIO;
8276 		goto out;
8277 	}
8278 
8279 	resp_len = iwx_rx_packet_payload_len(pkt);
8280 	if (resp_len < sizeof(*resp)) {
8281 		err = EIO;
8282 		goto out;
8283 	}
8284 
8285 	resp = (void *)pkt->data;
8286 	if (resp_len != sizeof(*resp) +
8287 	    resp->n_channels * sizeof(resp->channels[0])) {
8288 		err = EIO;
8289 		goto out;
8290 	}
8291 
8292 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8293 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8294 
8295 out:
8296 	iwx_free_resp(sc, &hcmd);
8297 
8298 	return err;
8299 }
8300 
8301 static int
8302 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8303 {
8304 	struct iwx_temp_report_ths_cmd cmd;
8305 	int err;
8306 
8307 	/*
8308 	 * In order to give responsibility for critical-temperature-kill
8309 	 * and TX backoff to FW we need to send an empty temperature
8310 	 * reporting command at init time.
8311 	 */
8312 	memset(&cmd, 0, sizeof(cmd));
8313 
8314 	err = iwx_send_cmd_pdu(sc,
8315 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8316 	    0, sizeof(cmd), &cmd);
8317 	if (err)
8318 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8319 		    DEVNAME(sc), err);
8320 
8321 	return err;
8322 }
8323 
8324 static int
8325 iwx_init_hw(struct iwx_softc *sc)
8326 {
8327 	struct ieee80211com *ic = &sc->sc_ic;
8328 	int err = 0, i;
8329 
8330 	err = iwx_run_init_mvm_ucode(sc, 0);
8331 	if (err)
8332 		return err;
8333 
8334 	if (!iwx_nic_lock(sc))
8335 		return EBUSY;
8336 
8337 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8338 	if (err) {
8339 		printf("%s: could not init tx ant config (error %d)\n",
8340 		    DEVNAME(sc), err);
8341 		goto err;
8342 	}
8343 
8344 	if (sc->sc_tx_with_siso_diversity) {
8345 		err = iwx_send_phy_cfg_cmd(sc);
8346 		if (err) {
8347 			printf("%s: could not send phy config (error %d)\n",
8348 			    DEVNAME(sc), err);
8349 			goto err;
8350 		}
8351 	}
8352 
8353 	err = iwx_send_bt_init_conf(sc);
8354 	if (err) {
8355 		printf("%s: could not init bt coex (error %d)\n",
8356 		    DEVNAME(sc), err);
8357 		return err;
8358 	}
8359 
8360 	err = iwx_send_soc_conf(sc);
8361 	if (err) {
8362 		printf("%s: iwx_send_soc_conf failed\n", __func__);
8363 		return err;
8364 	}
8365 
8366 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8367 		printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8368 		err = iwx_send_dqa_cmd(sc);
8369 		if (err) {
8370 			printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8371 			    "failed (error %d)\n", __func__, err);
8372 			return err;
8373 		}
8374 	}
8375 	// TODO phyctxt
8376 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8377 		/*
8378 		 * The channel used here isn't relevant as it's
8379 		 * going to be overwritten in the other flows.
8380 		 * For now use the first channel we have.
8381 		 */
8382 		sc->sc_phyctxt[i].id = i;
8383 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8384 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8385 		    IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8386 		if (err) {
8387 			printf("%s: could not add phy context %d (error %d)\n",
8388 			    DEVNAME(sc), i, err);
8389 			goto err;
8390 		}
8391 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8392 		    IWX_RLC_CONFIG_CMD) == 2) {
8393 			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8394 			if (err) {
8395 				printf("%s: could not configure RLC for PHY "
8396 				    "%d (error %d)\n", DEVNAME(sc), i, err);
8397 				goto err;
8398 			}
8399 		}
8400 	}
8401 
8402 	err = iwx_config_ltr(sc);
8403 	if (err) {
8404 		printf("%s: PCIe LTR configuration failed (error %d)\n",
8405 		    DEVNAME(sc), err);
8406 	}
8407 
8408 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8409 		err = iwx_send_temp_report_ths_cmd(sc);
8410 		if (err) {
8411 			printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8412 			    __func__);
8413 			goto err;
8414 		}
8415 	}
8416 
8417 	err = iwx_power_update_device(sc);
8418 	if (err) {
8419 		printf("%s: could not send power command (error %d)\n",
8420 		    DEVNAME(sc), err);
8421 		goto err;
8422 	}
8423 
8424 	if (sc->sc_nvm.lar_enabled) {
8425 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
8426 		if (err) {
8427 			printf("%s: could not init LAR (error %d)\n",
8428 			    DEVNAME(sc), err);
8429 			goto err;
8430 		}
8431 	}
8432 
8433 	err = iwx_config_umac_scan_reduced(sc);
8434 	if (err) {
8435 		printf("%s: could not configure scan (error %d)\n",
8436 		    DEVNAME(sc), err);
8437 		goto err;
8438 	}
8439 
8440 	err = iwx_disable_beacon_filter(sc);
8441 	if (err) {
8442 		printf("%s: could not disable beacon filter (error %d)\n",
8443 		    DEVNAME(sc), err);
8444 		goto err;
8445 	}
8446 
8447 err:
8448 	iwx_nic_unlock(sc);
8449 	return err;
8450 }
8451 
8452 /* Allow multicast from our BSSID. */
8453 static int
8454 iwx_allow_mcast(struct iwx_softc *sc)
8455 {
8456 	struct ieee80211com *ic = &sc->sc_ic;
8457 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8458 	struct iwx_node *in = IWX_NODE(vap->iv_bss);
8459 	struct iwx_mcast_filter_cmd *cmd;
8460 	size_t size;
8461 	int err;
8462 
8463 	size = roundup(sizeof(*cmd), 4);
8464 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8465 	if (cmd == NULL)
8466 		return ENOMEM;
8467 	cmd->filter_own = 1;
8468 	cmd->port_id = 0;
8469 	cmd->count = 0;
8470 	cmd->pass_all = 1;
8471 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8472 
8473 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8474 	    0, size, cmd);
8475 	free(cmd, M_DEVBUF);
8476 	return err;
8477 }
8478 
8479 static int
8480 iwx_init(struct iwx_softc *sc)
8481 {
8482 	int err, generation;
8483 	generation = ++sc->sc_generation;
8484 	iwx_preinit(sc);
8485 
8486 	err = iwx_start_hw(sc);
8487 	if (err) {
8488 		printf("%s: iwx_start_hw failed\n", __func__);
8489 		return err;
8490 	}
8491 
8492 	err = iwx_init_hw(sc);
8493 	if (err) {
8494 		if (generation == sc->sc_generation)
8495 			iwx_stop_device(sc);
8496 		printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8497 		return err;
8498 	}
8499 
8500 	sc->sc_flags |= IWX_FLAG_HW_INITED;
8501 	callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8502 
8503 	return 0;
8504 }
8505 
8506 static void
8507 iwx_start(struct iwx_softc *sc)
8508 {
8509         struct ieee80211_node *ni;
8510         struct mbuf *m;
8511 
8512         while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8513                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8514                 if (iwx_tx(sc, m, ni) != 0) {
8515                       if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8516                         continue;
8517                 }
8518         }
8519 }
8520 
8521 static void
8522 iwx_stop(struct iwx_softc *sc)
8523 {
8524 	struct ieee80211com *ic = &sc->sc_ic;
8525 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8526 	struct iwx_vap *ivp = IWX_VAP(vap);
8527 
8528 	iwx_stop_device(sc);
8529 
8530 	/* Reset soft state. */
8531 	sc->sc_generation++;
8532 	ivp->phy_ctxt = NULL;
8533 
8534 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8535 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8536 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8537 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8538 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8539 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8540 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8541 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8542 
8543 	sc->sc_rx_ba_sessions = 0;
8544 	sc->ba_rx.start_tidmask = 0;
8545 	sc->ba_rx.stop_tidmask = 0;
8546 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8547 	sc->ba_tx.start_tidmask = 0;
8548 	sc->ba_tx.stop_tidmask = 0;
8549 }
8550 
8551 static void
8552 iwx_watchdog(void *arg)
8553 {
8554 	struct iwx_softc *sc = arg;
8555 	struct ieee80211com *ic = &sc->sc_ic;
8556 	int i;
8557 
8558 	/*
8559 	 * We maintain a separate timer for each Tx queue because
8560 	 * Tx aggregation queues can get "stuck" while other queues
8561 	 * keep working. The Linux driver uses a similar workaround.
8562 	 */
8563 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8564 		if (sc->sc_tx_timer[i] > 0) {
8565 			if (--sc->sc_tx_timer[i] == 0) {
8566 				printf("%s: device timeout\n", DEVNAME(sc));
8567 
8568 				if (sc->sc_debug)
8569 					iwx_bbl_print_log();
8570 
8571 				iwx_nic_error(sc);
8572 				iwx_dump_driver_status(sc);
8573 				ieee80211_restart_all(ic);
8574 				return;
8575 			}
8576 		}
8577 	}
8578 	callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8579 }
8580 
8581 /*
8582  * Note: This structure is read from the device with IO accesses,
8583  * and the reading already does the endian conversion. As it is
8584  * read with uint32_t-sized accesses, any members with a different size
8585  * need to be ordered correctly though!
8586  */
8587 struct iwx_error_event_table {
8588 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8589 	uint32_t error_id;		/* type of error */
8590 	uint32_t trm_hw_status0;	/* TRM HW status */
8591 	uint32_t trm_hw_status1;	/* TRM HW status */
8592 	uint32_t blink2;		/* branch link */
8593 	uint32_t ilink1;		/* interrupt link */
8594 	uint32_t ilink2;		/* interrupt link */
8595 	uint32_t data1;		/* error-specific data */
8596 	uint32_t data2;		/* error-specific data */
8597 	uint32_t data3;		/* error-specific data */
8598 	uint32_t bcon_time;		/* beacon timer */
8599 	uint32_t tsf_low;		/* network timestamp function timer */
8600 	uint32_t tsf_hi;		/* network timestamp function timer */
8601 	uint32_t gp1;		/* GP1 timer register */
8602 	uint32_t gp2;		/* GP2 timer register */
8603 	uint32_t fw_rev_type;	/* firmware revision type */
8604 	uint32_t major;		/* uCode version major */
8605 	uint32_t minor;		/* uCode version minor */
8606 	uint32_t hw_ver;		/* HW Silicon version */
8607 	uint32_t brd_ver;		/* HW board version */
8608 	uint32_t log_pc;		/* log program counter */
8609 	uint32_t frame_ptr;		/* frame pointer */
8610 	uint32_t stack_ptr;		/* stack pointer */
8611 	uint32_t hcmd;		/* last host command header */
8612 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8613 				 * rxtx_flag */
8614 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8615 				 * host_flag */
8616 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8617 				 * enc_flag */
8618 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8619 				 * time_flag */
8620 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8621 				 * wico interrupt */
8622 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8623 	uint32_t wait_event;		/* wait event() caller address */
8624 	uint32_t l2p_control;	/* L2pControlField */
8625 	uint32_t l2p_duration;	/* L2pDurationField */
8626 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8627 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8628 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8629 				 * (LMPM_PMG_SEL) */
8630 	uint32_t u_timestamp;	/* indicate when the date and time of the
8631 				 * compilation */
8632 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8633 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8634 
8635 /*
8636  * UMAC error struct - relevant starting from family 8000 chip.
8637  * Note: This structure is read from the device with IO accesses,
8638  * and the reading already does the endian conversion. As it is
8639  * read with u32-sized accesses, any members with a different size
8640  * need to be ordered correctly though!
8641  */
8642 struct iwx_umac_error_event_table {
8643 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8644 	uint32_t error_id;	/* type of error */
8645 	uint32_t blink1;	/* branch link */
8646 	uint32_t blink2;	/* branch link */
8647 	uint32_t ilink1;	/* interrupt link */
8648 	uint32_t ilink2;	/* interrupt link */
8649 	uint32_t data1;		/* error-specific data */
8650 	uint32_t data2;		/* error-specific data */
8651 	uint32_t data3;		/* error-specific data */
8652 	uint32_t umac_major;
8653 	uint32_t umac_minor;
8654 	uint32_t frame_pointer;	/* core register 27*/
8655 	uint32_t stack_pointer;	/* core register 28 */
8656 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8657 	uint32_t nic_isr_pref;	/* ISR status register */
8658 } __packed;
8659 
8660 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8661 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8662 
8663 static void
8664 iwx_nic_umac_error(struct iwx_softc *sc)
8665 {
8666 	struct iwx_umac_error_event_table table;
8667 	uint32_t base;
8668 
8669 	base = sc->sc_uc.uc_umac_error_event_table;
8670 
8671 	if (base < 0x400000) {
8672 		printf("%s: Invalid error log pointer 0x%08x\n",
8673 		    DEVNAME(sc), base);
8674 		return;
8675 	}
8676 
8677 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8678 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8679 		return;
8680 	}
8681 
8682 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8683 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8684 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8685 			sc->sc_flags, table.valid);
8686 	}
8687 
8688 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8689 		iwx_desc_lookup(table.error_id));
8690 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8691 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8692 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8693 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8694 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8695 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8696 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8697 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8698 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8699 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8700 	    table.frame_pointer);
8701 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8702 	    table.stack_pointer);
8703 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8704 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8705 	    table.nic_isr_pref);
8706 }
8707 
8708 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8709 static struct {
8710 	const char *name;
8711 	uint8_t num;
8712 } advanced_lookup[] = {
8713 	{ "NMI_INTERRUPT_WDG", 0x34 },
8714 	{ "SYSASSERT", 0x35 },
8715 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8716 	{ "BAD_COMMAND", 0x38 },
8717 	{ "BAD_COMMAND", 0x39 },
8718 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8719 	{ "FATAL_ERROR", 0x3D },
8720 	{ "NMI_TRM_HW_ERR", 0x46 },
8721 	{ "NMI_INTERRUPT_TRM", 0x4C },
8722 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8723 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8724 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8725 	{ "NMI_INTERRUPT_HOST", 0x66 },
8726 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8727 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8728 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8729 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8730 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8731 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8732 	{ "ADVANCED_SYSASSERT", 0 },
8733 };
8734 
8735 static const char *
8736 iwx_desc_lookup(uint32_t num)
8737 {
8738 	int i;
8739 
8740 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8741 		if (advanced_lookup[i].num ==
8742 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8743 			return advanced_lookup[i].name;
8744 
8745 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8746 	return advanced_lookup[i].name;
8747 }
8748 
8749 /*
8750  * Support for dumping the error log seemed like a good idea ...
8751  * but it's mostly hex junk and the only sensible thing is the
8752  * hw/ucode revision (which we know anyway).  Since it's here,
8753  * I'll just leave it in, just in case e.g. the Intel guys want to
8754  * help us decipher some "ADVANCED_SYSASSERT" later.
8755  */
8756 static void
8757 iwx_nic_error(struct iwx_softc *sc)
8758 {
8759 	struct iwx_error_event_table table;
8760 	uint32_t base;
8761 
8762 	printf("%s: dumping device error log\n", DEVNAME(sc));
8763 	printf("%s: GOS-3758: 1\n", __func__);
8764 	base = sc->sc_uc.uc_lmac_error_event_table[0];
8765 	printf("%s: GOS-3758: 2\n", __func__);
8766 	if (base < 0x400000) {
8767 		printf("%s: Invalid error log pointer 0x%08x\n",
8768 		    DEVNAME(sc), base);
8769 		return;
8770 	}
8771 
8772 	printf("%s: GOS-3758: 3\n", __func__);
8773 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8774 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8775 		return;
8776 	}
8777 
8778 	printf("%s: GOS-3758: 4\n", __func__);
8779 	if (!table.valid) {
8780 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8781 		return;
8782 	}
8783 
8784 	printf("%s: GOS-3758: 5\n", __func__);
8785 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8786 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8787 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8788 		    sc->sc_flags, table.valid);
8789 	}
8790 
8791 	printf("%s: GOS-3758: 6\n", __func__);
8792 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8793 	    iwx_desc_lookup(table.error_id));
8794 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8795 	    table.trm_hw_status0);
8796 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8797 	    table.trm_hw_status1);
8798 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8799 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8800 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8801 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8802 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8803 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8804 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8805 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8806 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8807 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8808 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8809 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8810 	    table.fw_rev_type);
8811 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8812 	    table.major);
8813 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8814 	    table.minor);
8815 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8816 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8817 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8818 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8819 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8820 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8821 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8822 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8823 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8824 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8825 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8826 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8827 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8828 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8829 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8830 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8831 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8832 
8833 	if (sc->sc_uc.uc_umac_error_event_table)
8834 		iwx_nic_umac_error(sc);
8835 }
8836 
8837 static void
8838 iwx_dump_driver_status(struct iwx_softc *sc)
8839 {
8840 	struct ieee80211com *ic = &sc->sc_ic;
8841 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8842 	enum ieee80211_state state = vap->iv_state;
8843 	int i;
8844 
8845 	printf("driver status:\n");
8846 	for (i = 0; i < nitems(sc->txq); i++) {
8847 		struct iwx_tx_ring *ring = &sc->txq[i];
8848 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
8849 		    "cur_hw=%-3d queued=%-3d\n",
8850 		    i, ring->qid, ring->cur, ring->cur_hw,
8851 		    ring->queued);
8852 	}
8853 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
8854 	printf("  802.11 state %s\n", ieee80211_state_name[state]);
8855 }
8856 
8857 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8858 do {									\
8859 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8860 	_var_ = (void *)((_pkt_)+1);					\
8861 } while (/*CONSTCOND*/0)
8862 
8863 static int
8864 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8865 {
8866 	int qid, idx, code;
8867 
8868 	qid = pkt->hdr.qid & ~0x80;
8869 	idx = pkt->hdr.idx;
8870 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8871 
8872 	return (!(qid == 0 && idx == 0 && code == 0) &&
8873 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8874 }
8875 
8876 static void
8877 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8878 {
8879 	struct ieee80211com *ic = &sc->sc_ic;
8880 	struct iwx_rx_packet *pkt, *nextpkt;
8881 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8882 	struct mbuf *m0, *m;
8883 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8884 	int qid, idx, code, handled = 1;
8885 
8886 	m0 = data->m;
8887 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8888 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8889 		qid = pkt->hdr.qid;
8890 		idx = pkt->hdr.idx;
8891 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8892 
8893 		if (!iwx_rx_pkt_valid(pkt))
8894 			break;
8895 #ifdef IWX_DEBUG
8896         iwx_bbl_add_entry(pkt->hdr.code, IWX_BBL_CMD_RX, ticks);
8897 #endif
8898 		/*
8899 		 * XXX Intel inside (tm)
8900 		 * Any commands in the LONG_GROUP could actually be in the
8901 		 * LEGACY group. Firmware API versions >= 50 reject commands
8902 		 * in group 0, forcing us to use this hack.
8903 		 */
8904 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8905 			struct iwx_tx_ring *ring = &sc->txq[qid];
8906 			struct iwx_tx_data *txdata = &ring->data[idx];
8907 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8908 				code = iwx_cmd_opcode(code);
8909 		}
8910 
8911 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8912 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8913 			break;
8914 
8915 		// TODO ???
8916 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8917 			/* Take mbuf m0 off the RX ring. */
8918 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8919 				break;
8920 			}
8921 			KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8922 		}
8923 
8924 		switch (code) {
8925 		case IWX_REPLY_RX_PHY_CMD:
8926 			/* XXX-THJ: I've not managed to hit this path in testing */
8927 			iwx_rx_rx_phy_cmd(sc, pkt, data);
8928 			break;
8929 
8930 		case IWX_REPLY_RX_MPDU_CMD: {
8931 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8932 			nextoff = offset +
8933 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8934 			nextpkt = (struct iwx_rx_packet *)
8935 			    (m0->m_data + nextoff);
8936 			/* AX210 devices ship only one packet per Rx buffer. */
8937 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
8938 			    nextoff + minsz >= IWX_RBUF_SIZE ||
8939 			    !iwx_rx_pkt_valid(nextpkt)) {
8940 				/* No need to copy last frame in buffer. */
8941 				if (offset > 0)
8942 					m_adj(m0, offset);
8943 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
8944 				m0 = NULL; /* stack owns m0 now; abort loop */
8945 			} else {
8946 				/*
8947 				 * Create an mbuf which points to the current
8948 				 * packet. Always copy from offset zero to
8949 				 * preserve m_pkthdr.
8950 				 */
8951 				m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
8952 				if (m == NULL) {
8953 					m_freem(m0);
8954 					m0 = NULL;
8955 					break;
8956 				}
8957 				m_adj(m, offset);
8958 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
8959 			}
8960 			break;
8961 		}
8962 
8963 //		case IWX_BAR_FRAME_RELEASE:
8964 //			iwx_rx_bar_frame_release(sc, pkt, ml);
8965 //			break;
8966 //
8967 		case IWX_TX_CMD:
8968 			iwx_rx_tx_cmd(sc, pkt, data);
8969 			break;
8970 
8971 		case IWX_BA_NOTIF:
8972 			iwx_rx_compressed_ba(sc, pkt);
8973 			break;
8974 
8975 		case IWX_MISSED_BEACONS_NOTIFICATION:
8976 			iwx_rx_bmiss(sc, pkt, data);
8977 			DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
8978 			    __func__));
8979 			ieee80211_beacon_miss(ic);
8980 			break;
8981 
8982 		case IWX_MFUART_LOAD_NOTIFICATION:
8983 			break;
8984 
8985 		case IWX_ALIVE: {
8986 			struct iwx_alive_resp_v4 *resp4;
8987 			struct iwx_alive_resp_v5 *resp5;
8988 			struct iwx_alive_resp_v6 *resp6;
8989 
8990 			DPRINTF(("%s: firmware alive\n", __func__));
8991 			sc->sc_uc.uc_ok = 0;
8992 
8993 			/*
8994 			 * For v5 and above, we can check the version, for older
8995 			 * versions we need to check the size.
8996 			 */
8997 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
8998 			    IWX_ALIVE) == 6) {
8999 				SYNC_RESP_STRUCT(resp6, pkt);
9000 				if (iwx_rx_packet_payload_len(pkt) !=
9001 				    sizeof(*resp6)) {
9002 					sc->sc_uc.uc_intr = 1;
9003 					wakeup(&sc->sc_uc);
9004 					break;
9005 				}
9006 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9007 				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9008 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9009 				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9010 				sc->sc_uc.uc_log_event_table = le32toh(
9011 				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9012 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9013 				    resp6->umac_data.dbg_ptrs.error_info_addr);
9014 				sc->sc_sku_id[0] =
9015 				    le32toh(resp6->sku_id.data[0]);
9016 				sc->sc_sku_id[1] =
9017 				    le32toh(resp6->sku_id.data[1]);
9018 				sc->sc_sku_id[2] =
9019 				    le32toh(resp6->sku_id.data[2]);
9020 				if (resp6->status == IWX_ALIVE_STATUS_OK) {
9021 					sc->sc_uc.uc_ok = 1;
9022 				}
9023 			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9024 			    IWX_ALIVE) == 5) {
9025 				SYNC_RESP_STRUCT(resp5, pkt);
9026 				if (iwx_rx_packet_payload_len(pkt) !=
9027 				    sizeof(*resp5)) {
9028 					sc->sc_uc.uc_intr = 1;
9029 					wakeup(&sc->sc_uc);
9030 					break;
9031 				}
9032 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9033 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9034 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9035 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9036 				sc->sc_uc.uc_log_event_table = le32toh(
9037 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9038 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9039 				    resp5->umac_data.dbg_ptrs.error_info_addr);
9040 				sc->sc_sku_id[0] =
9041 				    le32toh(resp5->sku_id.data[0]);
9042 				sc->sc_sku_id[1] =
9043 				    le32toh(resp5->sku_id.data[1]);
9044 				sc->sc_sku_id[2] =
9045 				    le32toh(resp5->sku_id.data[2]);
9046 				if (resp5->status == IWX_ALIVE_STATUS_OK)
9047 					sc->sc_uc.uc_ok = 1;
9048 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9049 				SYNC_RESP_STRUCT(resp4, pkt);
9050 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9051 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9052 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9053 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9054 				sc->sc_uc.uc_log_event_table = le32toh(
9055 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9056 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9057 				    resp4->umac_data.dbg_ptrs.error_info_addr);
9058 				if (resp4->status == IWX_ALIVE_STATUS_OK)
9059 					sc->sc_uc.uc_ok = 1;
9060 			} else
9061 				printf("unknown payload version");
9062 
9063 			sc->sc_uc.uc_intr = 1;
9064 			wakeup(&sc->sc_uc);
9065 			break;
9066 		}
9067 
9068 		case IWX_STATISTICS_NOTIFICATION: {
9069 			struct iwx_notif_statistics *stats;
9070 			SYNC_RESP_STRUCT(stats, pkt);
9071 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9072 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9073 			break;
9074 		}
9075 
9076 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9077 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9078 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9079 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9080 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9081 			break;
9082 
9083 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9084 		    IWX_CT_KILL_NOTIFICATION): {
9085 			struct iwx_ct_kill_notif *notif;
9086 			SYNC_RESP_STRUCT(notif, pkt);
9087 			printf("%s: device at critical temperature (%u degC), "
9088 			    "stopping device\n",
9089 			    DEVNAME(sc), le16toh(notif->temperature));
9090 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9091 			ieee80211_restart_all(ic);
9092 			break;
9093 		}
9094 
9095 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9096 		    IWX_SCD_QUEUE_CONFIG_CMD):
9097 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9098 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9099 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9100 		    IWX_SESSION_PROTECTION_CMD):
9101 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9102 		    IWX_NVM_GET_INFO):
9103 		case IWX_ADD_STA_KEY:
9104 		case IWX_PHY_CONFIGURATION_CMD:
9105 		case IWX_TX_ANT_CONFIGURATION_CMD:
9106 		case IWX_ADD_STA:
9107 		case IWX_MAC_CONTEXT_CMD:
9108 		case IWX_REPLY_SF_CFG_CMD:
9109 		case IWX_POWER_TABLE_CMD:
9110 		case IWX_LTR_CONFIG:
9111 		case IWX_PHY_CONTEXT_CMD:
9112 		case IWX_BINDING_CONTEXT_CMD:
9113 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9114 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9115 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9116 		case IWX_REPLY_BEACON_FILTERING_CMD:
9117 		case IWX_MAC_PM_POWER_TABLE:
9118 		case IWX_TIME_QUOTA_CMD:
9119 		case IWX_REMOVE_STA:
9120 		case IWX_TXPATH_FLUSH:
9121 		case IWX_BT_CONFIG:
9122 		case IWX_MCC_UPDATE_CMD:
9123 		case IWX_TIME_EVENT_CMD:
9124 		case IWX_STATISTICS_CMD:
9125 		case IWX_SCD_QUEUE_CFG: {
9126 			size_t pkt_len;
9127 
9128 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9129 				break;
9130 
9131 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9132 			    BUS_DMASYNC_POSTREAD);
9133 
9134 			pkt_len = sizeof(pkt->len_n_flags) +
9135 			    iwx_rx_packet_len(pkt);
9136 
9137 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9138 			    pkt_len < sizeof(*pkt) ||
9139 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9140 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9141 				sc->sc_cmd_resp_pkt[idx] = NULL;
9142 				break;
9143 			}
9144 
9145 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9146 			    BUS_DMASYNC_POSTREAD);
9147 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9148 			break;
9149 		}
9150 
9151 		case IWX_INIT_COMPLETE_NOTIF:
9152 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
9153 			wakeup(&sc->sc_init_complete);
9154 			break;
9155 
9156 		case IWX_SCAN_COMPLETE_UMAC: {
9157 			DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9158 			struct iwx_umac_scan_complete *notif __attribute__((unused));
9159 			SYNC_RESP_STRUCT(notif, pkt);
9160 			DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9161 			    notif->status));
9162 			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9163 			iwx_endscan(sc);
9164 			break;
9165 		}
9166 
9167 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9168 			DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9169 			    __func__));
9170 			struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9171 			SYNC_RESP_STRUCT(notif, pkt);
9172 			DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9173 			    notif->status));
9174 			iwx_endscan(sc);
9175 			break;
9176 		}
9177 
9178 		case IWX_MCC_CHUB_UPDATE_CMD: {
9179 			struct iwx_mcc_chub_notif *notif;
9180 			SYNC_RESP_STRUCT(notif, pkt);
9181 			iwx_mcc_update(sc, notif);
9182 			break;
9183 		}
9184 
9185 		case IWX_REPLY_ERROR: {
9186 			struct iwx_error_resp *resp;
9187 			SYNC_RESP_STRUCT(resp, pkt);
9188 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
9189 				DEVNAME(sc), le32toh(resp->error_type),
9190 				resp->cmd_id);
9191 			break;
9192 		}
9193 
9194 		case IWX_TIME_EVENT_NOTIFICATION: {
9195 			struct iwx_time_event_notif *notif;
9196 			uint32_t action;
9197 			SYNC_RESP_STRUCT(notif, pkt);
9198 
9199 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9200 				break;
9201 			action = le32toh(notif->action);
9202 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9203 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9204 			break;
9205 		}
9206 
9207 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9208 		    IWX_SESSION_PROTECTION_NOTIF): {
9209 			struct iwx_session_prot_notif *notif;
9210 			uint32_t status, start, conf_id;
9211 
9212 			SYNC_RESP_STRUCT(notif, pkt);
9213 
9214 			status = le32toh(notif->status);
9215 			start = le32toh(notif->start);
9216 			conf_id = le32toh(notif->conf_id);
9217 			/* Check for end of successful PROTECT_CONF_ASSOC. */
9218 			if (status == 1 && start == 0 &&
9219 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9220 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9221 			break;
9222 		}
9223 
9224 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9225 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9226 		    break;
9227 
9228 		/*
9229 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9230 		 * messages. Just ignore them for now.
9231 		 */
9232 		case IWX_DEBUG_LOG_MSG:
9233 			break;
9234 
9235 		case IWX_MCAST_FILTER_CMD:
9236 			break;
9237 
9238 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9239 			break;
9240 
9241 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9242 			break;
9243 
9244 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9245 			break;
9246 
9247 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9248 		    IWX_NVM_ACCESS_COMPLETE):
9249 			break;
9250 
9251 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9252 			break; /* happens in monitor mode; ignore for now */
9253 
9254 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9255 			break;
9256 
9257 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9258 		    IWX_TLC_MNG_UPDATE_NOTIF): {
9259 			struct iwx_tlc_update_notif *notif;
9260 			SYNC_RESP_STRUCT(notif, pkt);
9261 			(void)notif;
9262 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9263 				iwx_rs_update(sc, notif);
9264 			break;
9265 		}
9266 
9267 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9268 			break;
9269 
9270 		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9271 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9272 			break;
9273 
9274 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9275 		    IWX_PNVM_INIT_COMPLETE):
9276 			DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9277 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9278 			wakeup(&sc->sc_init_complete);
9279 			break;
9280 
9281 		default:
9282 			handled = 0;
9283 			/* XXX wulf: Get rid of bluetooth-related spam */
9284 			if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9285 			    (code == 0xce && pkt->len_n_flags == 0x2000002c))
9286 				break;
9287 			printf("%s: unhandled firmware response 0x%x/0x%x "
9288 			    "rx ring %d[%d]\n",
9289 			    DEVNAME(sc), code, pkt->len_n_flags,
9290 			    (qid & ~0x80), idx);
9291 			break;
9292 		}
9293 
9294 		/*
9295 		 * uCode sets bit 0x80 when it originates the notification,
9296 		 * i.e. when the notification is not a direct response to a
9297 		 * command sent by the driver.
9298 		 * For example, uCode issues IWX_REPLY_RX when it sends a
9299 		 * received frame to the driver.
9300 		 */
9301 		if (handled && !(qid & (1 << 7))) {
9302 			iwx_cmd_done(sc, qid, idx, code);
9303 		}
9304 
9305 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9306 
9307 		/* AX210 devices ship only one packet per Rx buffer. */
9308 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9309 			break;
9310 	}
9311 
9312 	if (m0 && m0 != data->m)
9313 		m_freem(m0);
9314 }
9315 
9316 static void
9317 iwx_notif_intr(struct iwx_softc *sc)
9318 {
9319 	struct mbuf m;
9320 	uint16_t hw;
9321 
9322 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9323 	    BUS_DMASYNC_POSTREAD);
9324 
9325 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9326 		uint16_t *status = sc->rxq.stat_dma.vaddr;
9327 		hw = le16toh(*status) & 0xfff;
9328 	} else
9329 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9330 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
9331 	while (sc->rxq.cur != hw) {
9332 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9333 
9334 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9335 		BUS_DMASYNC_POSTREAD);
9336 
9337 		iwx_rx_pkt(sc, data, &m);
9338 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9339 	}
9340 
9341 	/*
9342 	 * Tell the firmware what we have processed.
9343 	 * Seems like the hardware gets upset unless we align the write by 8??
9344 	 */
9345 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9346 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9347 }
9348 
9349 #if 0
9350 int
9351 iwx_intr(void *arg)
9352 {
9353 	struct iwx_softc *sc = arg;
9354 	struct ieee80211com *ic = &sc->sc_ic;
9355 	struct ifnet *ifp = IC2IFP(ic);
9356 	int r1, r2, rv = 0;
9357 
9358 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9359 
9360 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9361 		uint32_t *ict = sc->ict_dma.vaddr;
9362 		int tmp;
9363 
9364 		tmp = htole32(ict[sc->ict_cur]);
9365 		if (!tmp)
9366 			goto out_ena;
9367 
9368 		/*
9369 		 * ok, there was something.  keep plowing until we have all.
9370 		 */
9371 		r1 = r2 = 0;
9372 		while (tmp) {
9373 			r1 |= tmp;
9374 			ict[sc->ict_cur] = 0;
9375 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9376 			tmp = htole32(ict[sc->ict_cur]);
9377 		}
9378 
9379 		/* this is where the fun begins.  don't ask */
9380 		if (r1 == 0xffffffff)
9381 			r1 = 0;
9382 
9383 		/* i am not expected to understand this */
9384 		if (r1 & 0xc0000)
9385 			r1 |= 0x8000;
9386 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9387 	} else {
9388 		r1 = IWX_READ(sc, IWX_CSR_INT);
9389 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9390 			goto out;
9391 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9392 	}
9393 	if (r1 == 0 && r2 == 0) {
9394 		goto out_ena;
9395 	}
9396 
9397 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9398 
9399 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9400 #if 0
9401 		int i;
9402 		/* Firmware has now configured the RFH. */
9403 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9404 			iwx_update_rx_desc(sc, &sc->rxq, i);
9405 #endif
9406 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9407 	}
9408 
9409 
9410 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9411 		iwx_check_rfkill(sc);
9412 		rv = 1;
9413 		goto out_ena;
9414 	}
9415 
9416 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9417 		if (ifp->if_flags & IFF_DEBUG) {
9418 			iwx_nic_error(sc);
9419 			iwx_dump_driver_status(sc);
9420 		}
9421 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9422 		ieee80211_restart_all(ic);
9423 		rv = 1;
9424 		goto out;
9425 
9426 	}
9427 
9428 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9429 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9430 		iwx_stop(sc);
9431 		rv = 1;
9432 		goto out;
9433 	}
9434 
9435 	/* firmware chunk loaded */
9436 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9437 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9438 
9439 		sc->sc_fw_chunk_done = 1;
9440 		wakeup(&sc->sc_fw);
9441 	}
9442 
9443 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9444 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
9445 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9446 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9447 		}
9448 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9449 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9450 		}
9451 
9452 		/* Disable periodic interrupt; we use it as just a one-shot. */
9453 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9454 
9455 		/*
9456 		 * Enable periodic interrupt in 8 msec only if we received
9457 		 * real RX interrupt (instead of just periodic int), to catch
9458 		 * any dangling Rx interrupt.  If it was just the periodic
9459 		 * interrupt, there was no dangling Rx activity, and no need
9460 		 * to extend the periodic interrupt; one-shot is enough.
9461 		 */
9462 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9463 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9464 			    IWX_CSR_INT_PERIODIC_ENA);
9465 
9466 		iwx_notif_intr(sc);
9467 	}
9468 
9469 	rv = 1;
9470 
9471  out_ena:
9472 	iwx_restore_interrupts(sc);
9473  out:
9474 	return rv;
9475 }
9476 #endif
9477 
9478 static void
9479 iwx_intr_msix(void *arg)
9480 {
9481 	struct iwx_softc *sc = arg;
9482 	struct ieee80211com *ic = &sc->sc_ic;
9483 	uint32_t inta_fh, inta_hw;
9484 	int vector = 0;
9485 
9486 	IWX_LOCK(sc);
9487 
9488 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9489 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9490 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9491 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9492 	inta_fh &= sc->sc_fh_mask;
9493 	inta_hw &= sc->sc_hw_mask;
9494 
9495 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9496 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9497 		iwx_notif_intr(sc);
9498 	}
9499 
9500 	/* firmware chunk loaded */
9501 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9502 		sc->sc_fw_chunk_done = 1;
9503 		wakeup(&sc->sc_fw);
9504 	}
9505 
9506 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9507 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9508 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9509 		if (sc->sc_debug) {
9510 			iwx_bbl_print_log();
9511 			iwx_nic_error(sc);
9512 			iwx_dump_driver_status(sc);
9513 		}
9514 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9515 		ieee80211_restart_all(ic);
9516 		goto out;
9517 	}
9518 
9519 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9520 		iwx_check_rfkill(sc);
9521 	}
9522 
9523 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9524 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9525 		sc->sc_flags |= IWX_FLAG_HW_ERR;
9526 		iwx_stop(sc);
9527 		goto out;
9528 	}
9529 
9530 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9531 		IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9532 		    "%s:%d WARNING: Skipping rx desc update\n",
9533 		    __func__, __LINE__);
9534 #if 0
9535 		/*
9536 		 * XXX-THJ: we don't have the dma segment handy. This is hacked
9537 		 * out in the fc release, return to it if we ever get this
9538 		 * warning.
9539 		 */
9540 		/* Firmware has now configured the RFH. */
9541 		for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9542 			iwx_update_rx_desc(sc, &sc->rxq, i);
9543 #endif
9544 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9545 	}
9546 
9547 	/*
9548 	 * Before sending the interrupt the HW disables it to prevent
9549 	 * a nested interrupt. This is done by writing 1 to the corresponding
9550 	 * bit in the mask register. After handling the interrupt, it should be
9551 	 * re-enabled by clearing this bit. This register is defined as
9552 	 * write 1 clear (W1C) register, meaning that it's being clear
9553 	 * by writing 1 to the bit.
9554 	 */
9555 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9556 out:
9557 	IWX_UNLOCK(sc);
9558 	return;
9559 }
9560 
9561 /*
9562  * The device info table below contains device-specific config overrides.
9563  * The most important parameter derived from this table is the name of the
9564  * firmware image to load.
9565  *
9566  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9567  * The "old" table matches devices based on PCI vendor/product IDs only.
9568  * The "new" table extends this with various device parameters derived
9569  * from MAC type, and RF type.
9570  *
9571  * In iwlwifi "old" and "new" tables share the same array, where "old"
9572  * entries contain dummy values for data defined only for "new" entries.
9573  * As of 2022, Linux developers are still in the process of moving entries
9574  * from "old" to "new" style and it looks like this effort has stalled in
9575  * in some work-in-progress state for quite a while. Linux commits moving
9576  * entries from "old" to "new" have at times been reverted due to regressions.
9577  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9578  * devices in the same driver.
9579  *
9580  * Our table below contains mostly "new" entries declared in iwlwifi
9581  * with the _IWL_DEV_INFO() macro (with a leading underscore).
9582  * Other devices are matched based on PCI vendor/product ID as usual,
9583  * unless matching specific PCI subsystem vendor/product IDs is required.
9584  *
9585  * Some "old"-style entries are required to identify the firmware image to use.
9586  * Others might be used to print a specific marketing name into Linux dmesg,
9587  * but we can't be sure whether the corresponding devices would be matched
9588  * correctly in the absence of their entries. So we include them just in case.
9589  */
9590 
9591 struct iwx_dev_info {
9592 	uint16_t device;
9593 	uint16_t subdevice;
9594 	uint16_t mac_type;
9595 	uint16_t rf_type;
9596 	uint8_t mac_step;
9597 	uint8_t rf_id;
9598 	uint8_t no_160;
9599 	uint8_t cores;
9600 	uint8_t cdb;
9601 	uint8_t jacket;
9602 	const struct iwx_device_cfg *cfg;
9603 };
9604 
9605 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9606 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9607 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
9608 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
9609 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
9610 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9611 
9612 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9613 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
9614 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
9615 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9616 
9617 /*
9618  * When adding entries to this table keep in mind that entries must
9619  * be listed in the same order as in the Linux driver. Code walks this
9620  * table backwards and uses the first matching entry it finds.
9621  * Device firmware must be available in fw_update(8).
9622  */
9623 static const struct iwx_dev_info iwx_dev_info_table[] = {
9624 	/* So with HR */
9625 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9626 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9627 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9628 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9629 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9630 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9631 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9632 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9633 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9634 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9635 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9636 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9637 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9638 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9639 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9640 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9641 	IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9642 	IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9643 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9644 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9645 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9646 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9647 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9648 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9649 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9650 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9651 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9652 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9653 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9654 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9655 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9656 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9657 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9658 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9659 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9660 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9661 
9662 	/* So with GF2 */
9663 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9664 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9665 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9666 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9667 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9668 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9669 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9670 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9671 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9672 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9673 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9674 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9675 
9676 	/* Qu with Jf, C step */
9677 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9678 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9679 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9680 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9681 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9682 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9683 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9684 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9685 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9686 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9687 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9688 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9689 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9690 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9691 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9692 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9693 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9694 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9695 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9696 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9697 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9698 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9699 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9700 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9701 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9702 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9703 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9704 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9705 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9706 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9707 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9708 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9709 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9710 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9711 		      IWX_CFG_ANY,
9712 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9713 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9714 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9715 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9716 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9717 		      IWX_CFG_ANY,
9718 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9719 
9720 	/* QuZ with Jf */
9721 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9722 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9723 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9724 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9725 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9726 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9727 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9728 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9729 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9730 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9731 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9732 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9733 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9734 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9735 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9736 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9737 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9738 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9739 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9740 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9741 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9742 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9743 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9744 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9745 		      IWX_CFG_ANY,
9746 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9747 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9748 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9749 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9750 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9751 		      IWX_CFG_ANY,
9752 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9753 
9754 	/* Qu with Hr, B step */
9755 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9756 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9757 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9758 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9759 		      iwx_qu_b0_hr1_b0), /* AX101 */
9760 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9761 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9762 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9763 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9764 		      iwx_qu_b0_hr_b0), /* AX203 */
9765 
9766 	/* Qu with Hr, C step */
9767 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9768 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9769 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9770 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9771 		      iwx_qu_c0_hr1_b0), /* AX101 */
9772 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9773 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9774 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9775 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9776 		      iwx_qu_c0_hr_b0), /* AX203 */
9777 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9778 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9779 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9780 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9781 		      iwx_qu_c0_hr_b0), /* AX201 */
9782 
9783 	/* QuZ with Hr */
9784 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9785 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9786 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9787 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9788 		      iwx_quz_a0_hr1_b0), /* AX101 */
9789 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9790 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9791 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9792 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9793 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
9794 
9795 	/* SoF with JF2 */
9796 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9797 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9798 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9799 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9800 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9801 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9802 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9803 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9804 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9805 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9806 
9807 	/* SoF with JF */
9808 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9809 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9810 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9811 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9812 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9813 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9814 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9815 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9816 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9817 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9818 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9819 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9820 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9821 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9822 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9823 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9824 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9825 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9826 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9827 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9828 
9829 	/* So with Hr */
9830 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9831 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9832 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9833 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9834 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
9835 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9836 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9837 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9838 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9839 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
9840 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9841 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9842 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9843 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9844 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
9845 
9846 	/* So-F with Hr */
9847 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9848 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9849 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9850 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9851 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
9852 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9853 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9854 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9855 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9856 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
9857 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9858 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9859 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9860 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9861 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
9862 
9863 	/* So-F with GF */
9864 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9865 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9866 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9867 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9868 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
9869 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9870 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9871 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9872 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9873 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9874 
9875 	/* So with GF */
9876 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9877 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9878 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9879 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9880 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
9881 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9882 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9883 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9884 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9885 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9886 
9887 	/* So with JF2 */
9888 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9889 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9890 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9891 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9892 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9893 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9894 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9895 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9896 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9897 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9898 
9899 	/* So with JF */
9900 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9901 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9902 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9903 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9904 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9905 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9906 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9907 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9908 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9909 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9910 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9911 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9912 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9913 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9914 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9915 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9916 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9917 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9918 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9919 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9920 };
9921 
9922 static int
9923 iwx_preinit(struct iwx_softc *sc)
9924 {
9925 	struct ieee80211com *ic = &sc->sc_ic;
9926 	int err;
9927 
9928 	err = iwx_prepare_card_hw(sc);
9929 	if (err) {
9930 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9931 		return err;
9932 	}
9933 
9934 	if (sc->attached) {
9935 		return 0;
9936 	}
9937 
9938 	err = iwx_start_hw(sc);
9939 	if (err) {
9940 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9941 		return err;
9942 	}
9943 
9944 	err = iwx_run_init_mvm_ucode(sc, 1);
9945 	iwx_stop_device(sc);
9946 	if (err) {
9947 		printf("%s: failed to stop device\n", DEVNAME(sc));
9948 		return err;
9949 	}
9950 
9951 	/* Print version info and MAC address on first successful fw load. */
9952 	sc->attached = 1;
9953 	if (sc->sc_pnvm_ver) {
9954 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
9955 		    "address %s\n",
9956 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9957 		    sc->sc_fwver, sc->sc_pnvm_ver,
9958 		    ether_sprintf(sc->sc_nvm.hw_addr));
9959 	} else {
9960 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
9961 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9962 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9963 	}
9964 
9965 	/* not all hardware can do 5GHz band */
9966 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9967 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9968 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9969 
9970 	return 0;
9971 }
9972 
9973 static void
9974 iwx_attach_hook(void *self)
9975 {
9976 	struct iwx_softc *sc = (void *)self;
9977 	struct ieee80211com *ic = &sc->sc_ic;
9978 	int err;
9979 
9980 	IWX_LOCK(sc);
9981 	err = iwx_preinit(sc);
9982 	IWX_UNLOCK(sc);
9983 	if (err != 0)
9984 		goto out;
9985 
9986 	iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
9987 	    ic->ic_channels);
9988 
9989 	ieee80211_ifattach(ic);
9990 	ic->ic_vap_create = iwx_vap_create;
9991 	ic->ic_vap_delete = iwx_vap_delete;
9992 	ic->ic_raw_xmit = iwx_raw_xmit;
9993 	ic->ic_node_alloc = iwx_node_alloc;
9994 	ic->ic_scan_start = iwx_scan_start;
9995 	ic->ic_scan_end = iwx_scan_end;
9996 	ic->ic_update_mcast = iwx_update_mcast;
9997 	ic->ic_getradiocaps = iwx_init_channel_map;
9998 
9999 	ic->ic_set_channel = iwx_set_channel;
10000 	ic->ic_scan_curchan = iwx_scan_curchan;
10001 	ic->ic_scan_mindwell = iwx_scan_mindwell;
10002 	ic->ic_wme.wme_update = iwx_wme_update;
10003 	ic->ic_parent = iwx_parent;
10004 	ic->ic_transmit = iwx_transmit;
10005 
10006 	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10007 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10008 	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10009 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10010 
10011 	sc->sc_addba_request = ic->ic_addba_request;
10012 	ic->ic_addba_request = iwx_addba_request;
10013 	sc->sc_addba_response = ic->ic_addba_response;
10014 	ic->ic_addba_response = iwx_addba_response;
10015 
10016 	iwx_radiotap_attach(sc);
10017 	ieee80211_announce(ic);
10018 out:
10019 	config_intrhook_disestablish(&sc->sc_preinit_hook);
10020 }
10021 
10022 const struct iwx_device_cfg *
10023 iwx_find_device_cfg(struct iwx_softc *sc)
10024 {
10025 	uint16_t mac_type, rf_type;
10026 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10027 	int i;
10028 	uint16_t sdev_id;
10029 
10030 	sdev_id = pci_get_device(sc->sc_dev);
10031 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10032 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10033 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10034 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10035 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10036 
10037 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10038 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10039 	cores = IWX_SUBDEVICE_CORES(sdev_id);
10040 
10041 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10042 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10043 
10044 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10045 		    dev_info->device != sc->sc_pid)
10046 			continue;
10047 
10048 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10049 		    dev_info->subdevice != sdev_id)
10050 			continue;
10051 
10052 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10053 		    dev_info->mac_type != mac_type)
10054 			continue;
10055 
10056 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10057 		    dev_info->mac_step != mac_step)
10058 			continue;
10059 
10060 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10061 		    dev_info->rf_type != rf_type)
10062 			continue;
10063 
10064 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10065 		    dev_info->cdb != cdb)
10066 			continue;
10067 
10068 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10069 		    dev_info->jacket != jacket)
10070 			continue;
10071 
10072 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10073 		    dev_info->rf_id != rf_id)
10074 			continue;
10075 
10076 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10077 		    dev_info->no_160 != no_160)
10078 			continue;
10079 
10080 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10081 		    dev_info->cores != cores)
10082 			continue;
10083 
10084 		return dev_info->cfg;
10085 	}
10086 
10087 	return NULL;
10088 }
10089 
10090 static int
10091 iwx_probe(device_t dev)
10092 {
10093 	int i;
10094 
10095 	for (i = 0; i < nitems(iwx_devices); i++) {
10096 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10097 		    pci_get_device(dev) == iwx_devices[i].device) {
10098 			device_set_desc(dev, iwx_devices[i].name);
10099 
10100 			/*
10101 			 * Due to significant existing deployments using
10102 			 * iwlwifi lower the priority of iwx.
10103 			 *
10104 			 * This inverts the advice in bus.h where drivers
10105 			 * supporting newer hardware should return
10106 			 * BUS_PROBE_DEFAULT and drivers for older devices
10107 			 * return BUS_PROBE_LOW_PRIORITY.
10108 			 *
10109 			 */
10110 			return (BUS_PROBE_LOW_PRIORITY);
10111 		}
10112 	}
10113 
10114 	return (ENXIO);
10115 }
10116 
10117 static int
10118 iwx_attach(device_t dev)
10119 {
10120 	struct iwx_softc *sc = device_get_softc(dev);
10121 	struct ieee80211com *ic = &sc->sc_ic;
10122 	const struct iwx_device_cfg *cfg;
10123 	int err;
10124 	int txq_i, i, j;
10125 	size_t ctxt_info_size;
10126 	int rid;
10127 	int count;
10128 	int error;
10129 	sc->sc_dev = dev;
10130 	sc->sc_pid = pci_get_device(dev);
10131 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10132 
10133 	TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10134 	IWX_LOCK_INIT(sc);
10135 	mbufq_init(&sc->sc_snd, ifqmaxlen);
10136 	TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10137 	TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10138 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10139 	    taskqueue_thread_enqueue, &sc->sc_tq);
10140 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10141 	if (error != 0) {
10142 		device_printf(dev, "can't start taskq thread, error %d\n",
10143 		    error);
10144 		return (ENXIO);
10145 	}
10146 
10147 	pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10148 	if (sc->sc_cap_off == 0) {
10149 		device_printf(dev, "PCIe capability structure not found!\n");
10150 		return (ENXIO);
10151 	}
10152 
10153 	/*
10154 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10155 	 * PCI Tx retries from interfering with C3 CPU state.
10156 	 */
10157 #define	PCI_CFG_RETRY_TIMEOUT	0x41
10158 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10159 
10160 	if (pci_msix_count(dev)) {
10161 		sc->sc_msix = 1;
10162 	} else {
10163 		device_printf(dev, "no MSI-X found\n");
10164 		return (ENXIO);
10165 	}
10166 
10167 	pci_enable_busmaster(dev);
10168 	rid = PCIR_BAR(0);
10169 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10170 	    RF_ACTIVE);
10171 	if (sc->sc_mem == NULL) {
10172 		device_printf(sc->sc_dev, "can't map mem space\n");
10173 		return (ENXIO);
10174 	}
10175 	sc->sc_st = rman_get_bustag(sc->sc_mem);
10176 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10177 
10178 	count = 1;
10179 	rid = 0;
10180 	if (pci_alloc_msix(dev, &count) == 0)
10181 		rid = 1;
10182 	DPRINTF(("%s: count=%d\n", __func__, count));
10183 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10184 	    (rid != 0 ? 0 : RF_SHAREABLE));
10185 	if (sc->sc_irq == NULL) {
10186 		device_printf(dev, "can't map interrupt\n");
10187 		return (ENXIO);
10188 	}
10189 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10190 	    NULL, iwx_intr_msix, sc, &sc->sc_ih);
10191 	if (error != 0) {
10192 		device_printf(dev, "can't establish interrupt\n");
10193 		return (ENXIO);
10194 	}
10195 
10196 	/* Clear pending interrupts. */
10197 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10198 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
10199 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10200 
10201 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10202 	DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10203 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10204 	DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10205 
10206 	/*
10207 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10208 	 * changed, and now the revision step also includes bit 0-1 (no more
10209 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10210 	 * in the old format.
10211 	 */
10212 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10213 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10214 
10215 	switch (sc->sc_pid) {
10216 	case PCI_PRODUCT_INTEL_WL_22500_1:
10217 		sc->sc_fwname = IWX_CC_A_FW;
10218 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10219 		sc->sc_integrated = 0;
10220 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10221 		sc->sc_low_latency_xtal = 0;
10222 		sc->sc_xtal_latency = 0;
10223 		sc->sc_tx_with_siso_diversity = 0;
10224 		sc->sc_uhb_supported = 0;
10225 		break;
10226 	case PCI_PRODUCT_INTEL_WL_22500_2:
10227 	case PCI_PRODUCT_INTEL_WL_22500_5:
10228 		/* These devices should be QuZ only. */
10229 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10230 			device_printf(dev, "unsupported AX201 adapter\n");
10231 			return (ENXIO);
10232 		}
10233 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10234 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10235 		sc->sc_integrated = 1;
10236 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10237 		sc->sc_low_latency_xtal = 0;
10238 		sc->sc_xtal_latency = 500;
10239 		sc->sc_tx_with_siso_diversity = 0;
10240 		sc->sc_uhb_supported = 0;
10241 		break;
10242 	case PCI_PRODUCT_INTEL_WL_22500_3:
10243 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10244 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10245 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10246 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10247 		else
10248 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10249 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10250 		sc->sc_integrated = 1;
10251 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10252 		sc->sc_low_latency_xtal = 0;
10253 		sc->sc_xtal_latency = 500;
10254 		sc->sc_tx_with_siso_diversity = 0;
10255 		sc->sc_uhb_supported = 0;
10256 		break;
10257 	case PCI_PRODUCT_INTEL_WL_22500_4:
10258 	case PCI_PRODUCT_INTEL_WL_22500_7:
10259 	case PCI_PRODUCT_INTEL_WL_22500_8:
10260 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10261 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10262 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10263 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10264 		else
10265 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10266 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10267 		sc->sc_integrated = 1;
10268 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10269 		sc->sc_low_latency_xtal = 0;
10270 		sc->sc_xtal_latency = 1820;
10271 		sc->sc_tx_with_siso_diversity = 0;
10272 		sc->sc_uhb_supported = 0;
10273 		break;
10274 	case PCI_PRODUCT_INTEL_WL_22500_6:
10275 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10276 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10277 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10278 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10279 		else
10280 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10281 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10282 		sc->sc_integrated = 1;
10283 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10284 		sc->sc_low_latency_xtal = 1;
10285 		sc->sc_xtal_latency = 12000;
10286 		sc->sc_tx_with_siso_diversity = 0;
10287 		sc->sc_uhb_supported = 0;
10288 		break;
10289 	case PCI_PRODUCT_INTEL_WL_22500_9:
10290 	case PCI_PRODUCT_INTEL_WL_22500_10:
10291 	case PCI_PRODUCT_INTEL_WL_22500_11:
10292 	case PCI_PRODUCT_INTEL_WL_22500_13:
10293 	/* _14 is an MA device, not yet supported */
10294 	case PCI_PRODUCT_INTEL_WL_22500_15:
10295 	case PCI_PRODUCT_INTEL_WL_22500_16:
10296 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
10297 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10298 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10299 		sc->sc_integrated = 0;
10300 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10301 		sc->sc_low_latency_xtal = 0;
10302 		sc->sc_xtal_latency = 0;
10303 		sc->sc_tx_with_siso_diversity = 0;
10304 		sc->sc_uhb_supported = 1;
10305 		break;
10306 	case PCI_PRODUCT_INTEL_WL_22500_12:
10307 	case PCI_PRODUCT_INTEL_WL_22500_17:
10308 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
10309 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10310 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10311 		sc->sc_integrated = 1;
10312 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10313 		sc->sc_low_latency_xtal = 1;
10314 		sc->sc_xtal_latency = 12000;
10315 		sc->sc_tx_with_siso_diversity = 0;
10316 		sc->sc_uhb_supported = 0;
10317 		sc->sc_imr_enabled = 1;
10318 		break;
10319 	default:
10320 		device_printf(dev, "unknown adapter type\n");
10321 		return (ENXIO);
10322 	}
10323 
10324 	cfg = iwx_find_device_cfg(sc);
10325 	DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10326 	if (cfg) {
10327 		sc->sc_fwname = cfg->fw_name;
10328 		sc->sc_pnvm_name = cfg->pnvm_name;
10329 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10330 		sc->sc_uhb_supported = cfg->uhb_supported;
10331 		if (cfg->xtal_latency) {
10332 			sc->sc_xtal_latency = cfg->xtal_latency;
10333 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10334 		}
10335 	}
10336 
10337 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10338 
10339 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10340 		sc->sc_umac_prph_offset = 0x300000;
10341 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10342 	} else
10343 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10344 
10345 	/* Allocate DMA memory for loading firmware. */
10346 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10347 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10348 	else
10349 		ctxt_info_size = sizeof(struct iwx_context_info);
10350 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10351 	    ctxt_info_size, 1);
10352 	if (err) {
10353 		device_printf(dev,
10354 		    "could not allocate memory for loading firmware\n");
10355 		return (ENXIO);
10356 	}
10357 
10358 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10359 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10360 		    sizeof(struct iwx_prph_scratch), 1);
10361 		if (err) {
10362 			device_printf(dev,
10363 			    "could not allocate prph scratch memory\n");
10364 			goto fail1;
10365 		}
10366 
10367 		/*
10368 		 * Allocate prph information. The driver doesn't use this.
10369 		 * We use the second half of this page to give the device
10370 		 * some dummy TR/CR tail pointers - which shouldn't be
10371 		 * necessary as we don't use this, but the hardware still
10372 		 * reads/writes there and we can't let it go do that with
10373 		 * a NULL pointer.
10374 		 */
10375 		KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10376 		    ("iwx_prph_info has wrong size"));
10377 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10378 		    PAGE_SIZE, 1);
10379 		if (err) {
10380 			device_printf(dev,
10381 			    "could not allocate prph info memory\n");
10382 			goto fail1;
10383 		}
10384 	}
10385 
10386 	/* Allocate interrupt cause table (ICT).*/
10387 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10388 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10389 	if (err) {
10390 		device_printf(dev, "could not allocate ICT table\n");
10391 		goto fail1;
10392 	}
10393 
10394 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10395 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10396 		if (err) {
10397 			device_printf(dev, "could not allocate TX ring %d\n",
10398 			    txq_i);
10399 			goto fail4;
10400 		}
10401 	}
10402 
10403 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
10404 	if (err) {
10405 		device_printf(sc->sc_dev, "could not allocate RX ring\n");
10406 		goto fail4;
10407 	}
10408 
10409 #ifdef IWX_DEBUG
10410 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10411 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10412 	    CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10413 
10414 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10415 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10416 	    CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10417 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10418 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10419 	    CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10420 
10421 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10422 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10423 	    CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10424 
10425 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10426 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10427 	    CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10428 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10429 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10430 	    CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10431 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10432 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10433 	    CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10434 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10435 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10436 	    CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10437 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10438 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10439 	    CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10440 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10441 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10442 	    CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10443 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10444 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10445 	    CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10446 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10447 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10448 	    CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10449 #endif
10450 	ic->ic_softc = sc;
10451 	ic->ic_name = device_get_nameunit(sc->sc_dev);
10452 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
10453 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
10454 
10455 	/* Set device capabilities. */
10456 	ic->ic_caps =
10457 	    IEEE80211_C_STA |
10458 	    IEEE80211_C_MONITOR |
10459 	    IEEE80211_C_WPA |		/* WPA/RSN */
10460 	    IEEE80211_C_WME |
10461 	    IEEE80211_C_PMGT |
10462 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
10463 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
10464 	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
10465 	    ;
10466 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10467 
10468 	ic->ic_txstream = 2;
10469 	ic->ic_rxstream = 2;
10470 	ic->ic_htcaps |= IEEE80211_HTC_HT
10471 			| IEEE80211_HTCAP_SMPS_OFF
10472 			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
10473 			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
10474 			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
10475 			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
10476 //			| IEEE80211_HTC_RX_AMSDU_AMPDU	/* TODO: hw reorder */
10477 			| IEEE80211_HTCAP_MAXAMSDU_3839;	/* max A-MSDU length */
10478 
10479 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10480 
10481 	/*
10482 	 * XXX: setupcurchan() expects vhtcaps to be non-zero
10483 	 * https://bugs.freebsd.org/274156
10484 	 */
10485 	ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10486 			| IEEE80211_VHTCAP_SHORT_GI_80
10487 			| 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10488 			| IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10489 			| IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10490 
10491 	ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10492 	int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10493 		  IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10494 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10495 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10496 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10497 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10498 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10499 		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10500 	ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10501 	ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10502 
10503 	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10504 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10505 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10506 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10507 		rxba->sc = sc;
10508 		for (j = 0; j < nitems(rxba->entries); j++)
10509 			mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10510 	}
10511 
10512 	sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10513 	sc->sc_preinit_hook.ich_arg = sc;
10514 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10515 		device_printf(dev,
10516 		    "config_intrhook_establish failed\n");
10517 		goto fail4;
10518 	}
10519 
10520 	return (0);
10521 
10522 fail4:
10523 	while (--txq_i >= 0)
10524 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10525 	iwx_free_rx_ring(sc, &sc->rxq);
10526 	if (sc->ict_dma.vaddr != NULL)
10527 		iwx_dma_contig_free(&sc->ict_dma);
10528 
10529 fail1:
10530 	iwx_dma_contig_free(&sc->ctxt_info_dma);
10531 	iwx_dma_contig_free(&sc->prph_scratch_dma);
10532 	iwx_dma_contig_free(&sc->prph_info_dma);
10533 	return (ENXIO);
10534 }
10535 
10536 static int
10537 iwx_detach(device_t dev)
10538 {
10539 	struct iwx_softc *sc = device_get_softc(dev);
10540 	int txq_i;
10541 
10542 	iwx_stop_device(sc);
10543 
10544 	taskqueue_drain_all(sc->sc_tq);
10545 	taskqueue_free(sc->sc_tq);
10546 
10547 	ieee80211_ifdetach(&sc->sc_ic);
10548 
10549 	callout_drain(&sc->watchdog_to);
10550 
10551 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10552 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10553 	iwx_free_rx_ring(sc, &sc->rxq);
10554 
10555 	if (sc->sc_fwp != NULL) {
10556 		firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10557 		sc->sc_fwp = NULL;
10558 	}
10559 
10560 	if (sc->sc_pnvm != NULL) {
10561 		firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10562 		sc->sc_pnvm = NULL;
10563 	}
10564 
10565 	if (sc->sc_irq != NULL) {
10566 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10567 		bus_release_resource(dev, SYS_RES_IRQ,
10568 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
10569 		pci_release_msi(dev);
10570         }
10571 	if (sc->sc_mem != NULL)
10572 		bus_release_resource(dev, SYS_RES_MEMORY,
10573 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
10574 
10575 	IWX_LOCK_DESTROY(sc);
10576 
10577 	return (0);
10578 }
10579 
10580 static void
10581 iwx_radiotap_attach(struct iwx_softc *sc)
10582 {
10583 	struct ieee80211com *ic = &sc->sc_ic;
10584 
10585 	IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10586 	    "->%s begin\n", __func__);
10587 
10588 	ieee80211_radiotap_attach(ic,
10589 	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10590 		IWX_TX_RADIOTAP_PRESENT,
10591 	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10592 		IWX_RX_RADIOTAP_PRESENT);
10593 
10594 	IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10595 	    "->%s end\n", __func__);
10596 }
10597 
10598 struct ieee80211vap *
10599 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10600     enum ieee80211_opmode opmode, int flags,
10601     const uint8_t bssid[IEEE80211_ADDR_LEN],
10602     const uint8_t mac[IEEE80211_ADDR_LEN])
10603 {
10604 	struct iwx_vap *ivp;
10605 	struct ieee80211vap *vap;
10606 
10607 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
10608 		return NULL;
10609 	ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10610 	vap = &ivp->iv_vap;
10611 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10612 	vap->iv_bmissthreshold = 10;            /* override default */
10613 	/* Override with driver methods. */
10614 	ivp->iv_newstate = vap->iv_newstate;
10615 	vap->iv_newstate = iwx_newstate;
10616 
10617 	ivp->id = IWX_DEFAULT_MACID;
10618 	ivp->color = IWX_DEFAULT_COLOR;
10619 
10620 	ivp->have_wme = TRUE;
10621 	ivp->ps_disabled = FALSE;
10622 
10623 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10624 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10625 
10626 	/* h/w crypto support */
10627 	vap->iv_key_alloc = iwx_key_alloc;
10628 	vap->iv_key_delete = iwx_key_delete;
10629 	vap->iv_key_set = iwx_key_set;
10630 	vap->iv_key_update_begin = iwx_key_update_begin;
10631 	vap->iv_key_update_end = iwx_key_update_end;
10632 
10633 	ieee80211_ratectl_init(vap);
10634 	/* Complete setup. */
10635 	ieee80211_vap_attach(vap, ieee80211_media_change,
10636 	    ieee80211_media_status, mac);
10637 	ic->ic_opmode = opmode;
10638 
10639 	return vap;
10640 }
10641 
10642 static void
10643 iwx_vap_delete(struct ieee80211vap *vap)
10644 {
10645 	struct iwx_vap *ivp = IWX_VAP(vap);
10646 
10647 	ieee80211_ratectl_deinit(vap);
10648 	ieee80211_vap_detach(vap);
10649 	free(ivp, M_80211_VAP);
10650 }
10651 
10652 static void
10653 iwx_parent(struct ieee80211com *ic)
10654 {
10655 	struct iwx_softc *sc = ic->ic_softc;
10656 	IWX_LOCK(sc);
10657 
10658 	if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10659 		iwx_stop(sc);
10660 		sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10661 	} else {
10662 		iwx_init(sc);
10663 		ieee80211_start_all(ic);
10664 	}
10665 	IWX_UNLOCK(sc);
10666 }
10667 
10668 static int
10669 iwx_suspend(device_t dev)
10670 {
10671 	struct iwx_softc *sc = device_get_softc(dev);
10672 
10673 	if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10674 		iwx_stop(sc);
10675 		sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10676 	}
10677 	return (0);
10678 }
10679 
10680 static int
10681 iwx_resume(device_t dev)
10682 {
10683 	struct iwx_softc *sc = device_get_softc(dev);
10684 	int err;
10685 
10686 	err = iwx_start_hw(sc);
10687 	if (err) {
10688 		return err;
10689 	}
10690 
10691 	err = iwx_init_hw(sc);
10692 	if (err) {
10693 		iwx_stop_device(sc);
10694 		return err;
10695 	}
10696 
10697 	ieee80211_start_all(&sc->sc_ic);
10698 
10699 	return (0);
10700 }
10701 
10702 static void
10703 iwx_scan_start(struct ieee80211com *ic)
10704 {
10705 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10706 	struct iwx_softc *sc = ic->ic_softc;
10707 	int err;
10708 
10709 	IWX_LOCK(sc);
10710 	if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10711 		err = iwx_scan(sc);
10712 	else
10713 		err = iwx_bgscan(ic);
10714 	IWX_UNLOCK(sc);
10715 	if (err)
10716 		ieee80211_cancel_scan(vap);
10717 
10718 	return;
10719 }
10720 
10721 static void
10722 iwx_update_mcast(struct ieee80211com *ic)
10723 {
10724 }
10725 
10726 static void
10727 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10728 {
10729 }
10730 
10731 static void
10732 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10733 {
10734 }
10735 
10736 static void
10737 iwx_scan_end(struct ieee80211com *ic)
10738 {
10739 	iwx_endscan(ic->ic_softc);
10740 }
10741 
10742 static void
10743 iwx_set_channel(struct ieee80211com *ic)
10744 {
10745 #if 0
10746         struct iwx_softc *sc = ic->ic_softc;
10747         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10748 
10749         IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10750         iwx_phy_ctxt_task((void *)sc);
10751 #endif
10752 }
10753 
10754 static void
10755 iwx_endscan_cb(void *arg, int pending)
10756 {
10757 	struct iwx_softc *sc = arg;
10758 	struct ieee80211com *ic = &sc->sc_ic;
10759 
10760 	DPRINTF(("scan ended\n"));
10761 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10762 }
10763 
10764 static int
10765 iwx_wme_update(struct ieee80211com *ic)
10766 {
10767 	return 0;
10768 }
10769 
10770 static int
10771 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10772     const struct ieee80211_bpf_params *params)
10773 {
10774 	struct ieee80211com *ic = ni->ni_ic;
10775 	struct iwx_softc *sc = ic->ic_softc;
10776 	int err;
10777 
10778 	IWX_LOCK(sc);
10779 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10780 		err = iwx_tx(sc, m, ni);
10781 		IWX_UNLOCK(sc);
10782 		return err;
10783 	} else {
10784 		IWX_UNLOCK(sc);
10785 		return EIO;
10786 	}
10787 }
10788 
10789 static int
10790 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10791 {
10792 	struct iwx_softc *sc = ic->ic_softc;
10793 	int error;
10794 
10795 	// TODO: mbufq_enqueue in iwm
10796 	// TODO dequeue in iwm_start, counters, locking
10797 	IWX_LOCK(sc);
10798 	error = mbufq_enqueue(&sc->sc_snd, m);
10799 	if (error) {
10800 		IWX_UNLOCK(sc);
10801 		return (error);
10802 	}
10803 
10804 	iwx_start(sc);
10805 	IWX_UNLOCK(sc);
10806 	return (0);
10807 }
10808 
10809 static int
10810 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10811     int baparamset, int batimeout, int baseqctl)
10812 {
10813 	struct ieee80211com *ic = ni->ni_ic;
10814 	struct iwx_softc *sc = ic->ic_softc;
10815 	int tid;
10816 
10817 	tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10818 	sc->ni_rx_ba[tid].ba_winstart =
10819 	    _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10820 	sc->ni_rx_ba[tid].ba_winsize =
10821 	    _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10822 	sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10823 
10824 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10825 	    tid >= IWX_MAX_TID_COUNT)
10826 		return ENOSPC;
10827 
10828 	if (sc->ba_rx.start_tidmask & (1 << tid)) {
10829 		DPRINTF(("%s: tid %d already added\n", __func__, tid));
10830 		return EBUSY;
10831 	}
10832 	DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10833 
10834 	sc->ba_rx.start_tidmask |= (1 << tid);
10835 	DPRINTF(("%s: tid=%i\n", __func__, tid));
10836 	DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10837 	DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10838 	DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10839 
10840 	taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10841 
10842 	// TODO:misha move to ba_task (serialize)
10843 	sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10844 
10845 	return (0);
10846 }
10847 
10848 static void
10849 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10850 {
10851 	return;
10852 }
10853 
10854 static int
10855 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10856     int dialogtoken, int baparamset, int batimeout)
10857 {
10858 	struct iwx_softc *sc = ni->ni_ic->ic_softc;
10859 	int tid;
10860 
10861 	tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10862 	DPRINTF(("%s: tid=%i\n", __func__, tid));
10863 	sc->ba_tx.start_tidmask |= (1 << tid);
10864 	taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10865 	return 0;
10866 }
10867 
10868 
10869 static int
10870 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10871     int code, int baparamset, int batimeout)
10872 {
10873 	return 0;
10874 }
10875 
10876 static void
10877 iwx_key_update_begin(struct ieee80211vap *vap)
10878 {
10879 	return;
10880 }
10881 
10882 static void
10883 iwx_key_update_end(struct ieee80211vap *vap)
10884 {
10885 	return;
10886 }
10887 
10888 static int
10889 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10890 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
10891 {
10892 
10893 	if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
10894 		return 1;
10895 	}
10896 	if (!(&vap->iv_nw_keys[0] <= k &&
10897 	     k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
10898 		/*
10899 		 * Not in the global key table, the driver should handle this
10900 		 * by allocating a slot in the h/w key table/cache.  In
10901 		 * lieu of that return key slot 0 for any unicast key
10902 		 * request.  We disallow the request if this is a group key.
10903 		 * This default policy does the right thing for legacy hardware
10904 		 * with a 4 key table.  It also handles devices that pass
10905 		 * packets through untouched when marked with the WEP bit
10906 		 * and key index 0.
10907 		 */
10908 		if (k->wk_flags & IEEE80211_KEY_GROUP)
10909 			return 0;
10910 		*keyix = 0;	/* NB: use key index 0 for ucast key */
10911 	} else {
10912 		*keyix = ieee80211_crypto_get_key_wepidx(vap, k);
10913 	}
10914 	*rxkeyix = IEEE80211_KEYIX_NONE;	/* XXX maybe *keyix? */
10915 	return 1;
10916 }
10917 
10918 static int
10919 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
10920 {
10921 	struct ieee80211com *ic = vap->iv_ic;
10922 	struct iwx_softc *sc = ic->ic_softc;
10923 	struct iwx_add_sta_key_cmd cmd;
10924 	uint32_t status;
10925 	int err;
10926 	int id;
10927 
10928 	if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
10929 		return 1;
10930 	}
10931 
10932 	IWX_LOCK(sc);
10933 	/*
10934 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
10935 	 * Currently we only implement station mode where 'ni' is always
10936 	 * ic->ic_bss so there is no need to validate arguments beyond this:
10937 	 */
10938 
10939 	memset(&cmd, 0, sizeof(cmd));
10940 
10941 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
10942 		DPRINTF(("%s: adding group key\n", __func__));
10943 	} else {
10944 		DPRINTF(("%s: adding key\n", __func__));
10945 	}
10946 	if (k >= &vap->iv_nw_keys[0] &&
10947 	    k <  &vap->iv_nw_keys[IEEE80211_WEP_NKID])
10948 		id = (k - vap->iv_nw_keys);
10949 	else
10950 		id = (0);
10951 	DPRINTF(("%s: setting keyid=%i\n", __func__, id));
10952 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
10953 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
10954 	    ((id << IWX_STA_KEY_FLG_KEYID_POS) &
10955 	    IWX_STA_KEY_FLG_KEYID_MSK));
10956 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
10957 		cmd.common.key_offset = 1;
10958 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
10959 	} else {
10960 		cmd.common.key_offset = 0;
10961 	}
10962 	memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
10963 	    k->wk_keylen));
10964 	DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
10965 	for (int i=0; i<k->wk_keylen; i++) {
10966 		DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
10967 	}
10968 	cmd.common.sta_id = IWX_STATION_ID;
10969 
10970 	cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
10971 	DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
10972 
10973 	status = IWX_ADD_STA_SUCCESS;
10974 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
10975 	    &status);
10976 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
10977 		err = EIO;
10978 	if (err) {
10979 		printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
10980 		IWX_UNLOCK(sc);
10981 		return err;
10982 	} else
10983 		DPRINTF(("%s: key added successfully\n", __func__));
10984 	IWX_UNLOCK(sc);
10985 	return 1;
10986 }
10987 
10988 static int
10989 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
10990 {
10991 	return 1;
10992 }
10993 
10994 static device_method_t iwx_pci_methods[] = {
10995 	/* Device interface */
10996 	DEVMETHOD(device_probe,		iwx_probe),
10997 	DEVMETHOD(device_attach,	iwx_attach),
10998 	DEVMETHOD(device_detach,	iwx_detach),
10999 	DEVMETHOD(device_suspend,	iwx_suspend),
11000 	DEVMETHOD(device_resume,	iwx_resume),
11001 
11002 	DEVMETHOD_END
11003 };
11004 
11005 static driver_t iwx_pci_driver = {
11006 	"iwx",
11007 	iwx_pci_methods,
11008 	sizeof (struct iwx_softc)
11009 };
11010 
11011 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11012 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11013     iwx_devices, nitems(iwx_devices));
11014 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11015 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11016 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11017