xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 3bf2d5dd64862cae43545bfcc11323844091bf66)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_mvm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_mvm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int	iwm_mvm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
326 					    struct iwm_rx_phy_info *);
327 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
328                                       struct iwm_rx_packet *);
329 static int	iwm_get_noise(struct iwm_softc *,
330 		    const struct iwm_mvm_statistics_rx_non_phy *);
331 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
332 		    struct iwm_rx_packet *);
333 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
334 				    uint32_t, boolean_t);
335 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
336                                          struct iwm_rx_packet *,
337 				         struct iwm_node *);
338 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
339 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
340 #if 0
341 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
342                                  uint16_t);
343 #endif
344 static const struct iwm_rate *
345 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
346 			struct mbuf *, struct iwm_tx_cmd *);
347 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
348                        struct ieee80211_node *, int);
349 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
350 			     const struct ieee80211_bpf_params *);
351 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
352 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
353 static struct ieee80211_node *
354 		iwm_node_alloc(struct ieee80211vap *,
355 		               const uint8_t[IEEE80211_ADDR_LEN]);
356 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
357 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
358 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
359 static int	iwm_media_change(struct ifnet *);
360 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
361 static void	iwm_endscan_cb(void *, int);
362 static int	iwm_send_bt_init_conf(struct iwm_softc *);
363 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
364 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
365 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
366 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
367 static int	iwm_init_hw(struct iwm_softc *);
368 static void	iwm_init(struct iwm_softc *);
369 static void	iwm_start(struct iwm_softc *);
370 static void	iwm_stop(struct iwm_softc *);
371 static void	iwm_watchdog(void *);
372 static void	iwm_parent(struct ieee80211com *);
373 #ifdef IWM_DEBUG
374 static const char *
375 		iwm_desc_lookup(uint32_t);
376 static void	iwm_nic_error(struct iwm_softc *);
377 static void	iwm_nic_umac_error(struct iwm_softc *);
378 #endif
379 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
380 static void	iwm_notif_intr(struct iwm_softc *);
381 static void	iwm_intr(void *);
382 static int	iwm_attach(device_t);
383 static int	iwm_is_valid_ether_addr(uint8_t *);
384 static void	iwm_preinit(void *);
385 static int	iwm_detach_local(struct iwm_softc *sc, int);
386 static void	iwm_init_task(void *);
387 static void	iwm_radiotap_attach(struct iwm_softc *);
388 static struct ieee80211vap *
389 		iwm_vap_create(struct ieee80211com *,
390 		               const char [IFNAMSIZ], int,
391 		               enum ieee80211_opmode, int,
392 		               const uint8_t [IEEE80211_ADDR_LEN],
393 		               const uint8_t [IEEE80211_ADDR_LEN]);
394 static void	iwm_vap_delete(struct ieee80211vap *);
395 static void	iwm_xmit_queue_drain(struct iwm_softc *);
396 static void	iwm_scan_start(struct ieee80211com *);
397 static void	iwm_scan_end(struct ieee80211com *);
398 static void	iwm_update_mcast(struct ieee80211com *);
399 static void	iwm_set_channel(struct ieee80211com *);
400 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
401 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
402 static int	iwm_detach(device_t);
403 
404 static int	iwm_lar_disable = 0;
405 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
406 
407 /*
408  * Firmware parser.
409  */
410 
411 static int
412 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
413 {
414 	const struct iwm_fw_cscheme_list *l = (const void *)data;
415 
416 	if (dlen < sizeof(*l) ||
417 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
418 		return EINVAL;
419 
420 	/* we don't actually store anything for now, always use s/w crypto */
421 
422 	return 0;
423 }
424 
425 static int
426 iwm_firmware_store_section(struct iwm_softc *sc,
427     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
428 {
429 	struct iwm_fw_img *fws;
430 	struct iwm_fw_desc *fwone;
431 
432 	if (type >= IWM_UCODE_TYPE_MAX)
433 		return EINVAL;
434 	if (dlen < sizeof(uint32_t))
435 		return EINVAL;
436 
437 	fws = &sc->sc_fw.img[type];
438 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
439 		return EINVAL;
440 
441 	fwone = &fws->sec[fws->fw_count];
442 
443 	/* first 32bit are device load offset */
444 	memcpy(&fwone->offset, data, sizeof(uint32_t));
445 
446 	/* rest is data */
447 	fwone->data = data + sizeof(uint32_t);
448 	fwone->len = dlen - sizeof(uint32_t);
449 
450 	fws->fw_count++;
451 
452 	return 0;
453 }
454 
455 #define IWM_DEFAULT_SCAN_CHANNELS 40
456 
457 /* iwlwifi: iwl-drv.c */
458 struct iwm_tlv_calib_data {
459 	uint32_t ucode_type;
460 	struct iwm_tlv_calib_ctrl calib;
461 } __packed;
462 
463 static int
464 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
465 {
466 	const struct iwm_tlv_calib_data *def_calib = data;
467 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
468 
469 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
470 		device_printf(sc->sc_dev,
471 		    "Wrong ucode_type %u for default "
472 		    "calibration.\n", ucode_type);
473 		return EINVAL;
474 	}
475 
476 	sc->sc_default_calib[ucode_type].flow_trigger =
477 	    def_calib->calib.flow_trigger;
478 	sc->sc_default_calib[ucode_type].event_trigger =
479 	    def_calib->calib.event_trigger;
480 
481 	return 0;
482 }
483 
484 static int
485 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
486 			struct iwm_ucode_capabilities *capa)
487 {
488 	const struct iwm_ucode_api *ucode_api = (const void *)data;
489 	uint32_t api_index = le32toh(ucode_api->api_index);
490 	uint32_t api_flags = le32toh(ucode_api->api_flags);
491 	int i;
492 
493 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
494 		device_printf(sc->sc_dev,
495 		    "api flags index %d larger than supported by driver\n",
496 		    api_index);
497 		/* don't return an error so we can load FW that has more bits */
498 		return 0;
499 	}
500 
501 	for (i = 0; i < 32; i++) {
502 		if (api_flags & (1U << i))
503 			setbit(capa->enabled_api, i + 32 * api_index);
504 	}
505 
506 	return 0;
507 }
508 
509 static int
510 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
511 			   struct iwm_ucode_capabilities *capa)
512 {
513 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
514 	uint32_t api_index = le32toh(ucode_capa->api_index);
515 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
516 	int i;
517 
518 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
519 		device_printf(sc->sc_dev,
520 		    "capa flags index %d larger than supported by driver\n",
521 		    api_index);
522 		/* don't return an error so we can load FW that has more bits */
523 		return 0;
524 	}
525 
526 	for (i = 0; i < 32; i++) {
527 		if (api_flags & (1U << i))
528 			setbit(capa->enabled_capa, i + 32 * api_index);
529 	}
530 
531 	return 0;
532 }
533 
534 static void
535 iwm_fw_info_free(struct iwm_fw_info *fw)
536 {
537 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
538 	fw->fw_fp = NULL;
539 	memset(fw->img, 0, sizeof(fw->img));
540 }
541 
542 static int
543 iwm_read_firmware(struct iwm_softc *sc)
544 {
545 	struct iwm_fw_info *fw = &sc->sc_fw;
546 	const struct iwm_tlv_ucode_header *uhdr;
547 	const struct iwm_ucode_tlv *tlv;
548 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
549 	enum iwm_ucode_tlv_type tlv_type;
550 	const struct firmware *fwp;
551 	const uint8_t *data;
552 	uint32_t tlv_len;
553 	uint32_t usniffer_img;
554 	const uint8_t *tlv_data;
555 	uint32_t paging_mem_size;
556 	int num_of_cpus;
557 	int error = 0;
558 	size_t len;
559 
560 	/*
561 	 * Load firmware into driver memory.
562 	 * fw_fp will be set.
563 	 */
564 	fwp = firmware_get(sc->cfg->fw_name);
565 	if (fwp == NULL) {
566 		device_printf(sc->sc_dev,
567 		    "could not read firmware %s (error %d)\n",
568 		    sc->cfg->fw_name, error);
569 		goto out;
570 	}
571 	fw->fw_fp = fwp;
572 
573 	/* (Re-)Initialize default values. */
574 	capa->flags = 0;
575 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
576 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
577 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
578 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
579 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
580 
581 	/*
582 	 * Parse firmware contents
583 	 */
584 
585 	uhdr = (const void *)fw->fw_fp->data;
586 	if (*(const uint32_t *)fw->fw_fp->data != 0
587 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
588 		device_printf(sc->sc_dev, "invalid firmware %s\n",
589 		    sc->cfg->fw_name);
590 		error = EINVAL;
591 		goto out;
592 	}
593 
594 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
595 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
596 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
597 	    IWM_UCODE_API(le32toh(uhdr->ver)));
598 	data = uhdr->data;
599 	len = fw->fw_fp->datasize - sizeof(*uhdr);
600 
601 	while (len >= sizeof(*tlv)) {
602 		len -= sizeof(*tlv);
603 		tlv = (const void *)data;
604 
605 		tlv_len = le32toh(tlv->length);
606 		tlv_type = le32toh(tlv->type);
607 		tlv_data = tlv->data;
608 
609 		if (len < tlv_len) {
610 			device_printf(sc->sc_dev,
611 			    "firmware too short: %zu bytes\n",
612 			    len);
613 			error = EINVAL;
614 			goto parse_out;
615 		}
616 		len -= roundup2(tlv_len, 4);
617 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
618 
619 		switch ((int)tlv_type) {
620 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
621 			if (tlv_len != sizeof(uint32_t)) {
622 				device_printf(sc->sc_dev,
623 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
624 				    __func__, tlv_len);
625 				error = EINVAL;
626 				goto parse_out;
627 			}
628 			capa->max_probe_length =
629 			    le32_to_cpup((const uint32_t *)tlv_data);
630 			/* limit it to something sensible */
631 			if (capa->max_probe_length >
632 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
633 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
634 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
635 				    "ridiculous\n", __func__);
636 				error = EINVAL;
637 				goto parse_out;
638 			}
639 			break;
640 		case IWM_UCODE_TLV_PAN:
641 			if (tlv_len) {
642 				device_printf(sc->sc_dev,
643 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
644 				    __func__, tlv_len);
645 				error = EINVAL;
646 				goto parse_out;
647 			}
648 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
649 			break;
650 		case IWM_UCODE_TLV_FLAGS:
651 			if (tlv_len < sizeof(uint32_t)) {
652 				device_printf(sc->sc_dev,
653 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
654 				    __func__, tlv_len);
655 				error = EINVAL;
656 				goto parse_out;
657 			}
658 			if (tlv_len % sizeof(uint32_t)) {
659 				device_printf(sc->sc_dev,
660 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
661 				    __func__, tlv_len);
662 				error = EINVAL;
663 				goto parse_out;
664 			}
665 			/*
666 			 * Apparently there can be many flags, but Linux driver
667 			 * parses only the first one, and so do we.
668 			 *
669 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
670 			 * Intentional or a bug?  Observations from
671 			 * current firmware file:
672 			 *  1) TLV_PAN is parsed first
673 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
674 			 * ==> this resets TLV_PAN to itself... hnnnk
675 			 */
676 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
677 			break;
678 		case IWM_UCODE_TLV_CSCHEME:
679 			if ((error = iwm_store_cscheme(sc,
680 			    tlv_data, tlv_len)) != 0) {
681 				device_printf(sc->sc_dev,
682 				    "%s: iwm_store_cscheme(): returned %d\n",
683 				    __func__, error);
684 				goto parse_out;
685 			}
686 			break;
687 		case IWM_UCODE_TLV_NUM_OF_CPU:
688 			if (tlv_len != sizeof(uint32_t)) {
689 				device_printf(sc->sc_dev,
690 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
691 				    __func__, tlv_len);
692 				error = EINVAL;
693 				goto parse_out;
694 			}
695 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
696 			if (num_of_cpus == 2) {
697 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
698 					TRUE;
699 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
700 					TRUE;
701 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
702 					TRUE;
703 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
704 				device_printf(sc->sc_dev,
705 				    "%s: Driver supports only 1 or 2 CPUs\n",
706 				    __func__);
707 				error = EINVAL;
708 				goto parse_out;
709 			}
710 			break;
711 		case IWM_UCODE_TLV_SEC_RT:
712 			if ((error = iwm_firmware_store_section(sc,
713 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
714 				device_printf(sc->sc_dev,
715 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
716 				    __func__, error);
717 				goto parse_out;
718 			}
719 			break;
720 		case IWM_UCODE_TLV_SEC_INIT:
721 			if ((error = iwm_firmware_store_section(sc,
722 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
723 				device_printf(sc->sc_dev,
724 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
725 				    __func__, error);
726 				goto parse_out;
727 			}
728 			break;
729 		case IWM_UCODE_TLV_SEC_WOWLAN:
730 			if ((error = iwm_firmware_store_section(sc,
731 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
732 				device_printf(sc->sc_dev,
733 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
734 				    __func__, error);
735 				goto parse_out;
736 			}
737 			break;
738 		case IWM_UCODE_TLV_DEF_CALIB:
739 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
740 				device_printf(sc->sc_dev,
741 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
742 				    __func__, tlv_len,
743 				    sizeof(struct iwm_tlv_calib_data));
744 				error = EINVAL;
745 				goto parse_out;
746 			}
747 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
748 				device_printf(sc->sc_dev,
749 				    "%s: iwm_set_default_calib() failed: %d\n",
750 				    __func__, error);
751 				goto parse_out;
752 			}
753 			break;
754 		case IWM_UCODE_TLV_PHY_SKU:
755 			if (tlv_len != sizeof(uint32_t)) {
756 				error = EINVAL;
757 				device_printf(sc->sc_dev,
758 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
759 				    __func__, tlv_len);
760 				goto parse_out;
761 			}
762 			sc->sc_fw.phy_config =
763 			    le32_to_cpup((const uint32_t *)tlv_data);
764 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
765 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
766 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
767 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
768 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
769 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
770 			break;
771 
772 		case IWM_UCODE_TLV_API_CHANGES_SET: {
773 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
774 				error = EINVAL;
775 				goto parse_out;
776 			}
777 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
778 				error = EINVAL;
779 				goto parse_out;
780 			}
781 			break;
782 		}
783 
784 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
785 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
786 				error = EINVAL;
787 				goto parse_out;
788 			}
789 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
790 				error = EINVAL;
791 				goto parse_out;
792 			}
793 			break;
794 		}
795 
796 		case IWM_UCODE_TLV_CMD_VERSIONS:
797 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
798 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
799 			/* ignore, not used by current driver */
800 			break;
801 
802 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
803 			if ((error = iwm_firmware_store_section(sc,
804 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
805 			    tlv_len)) != 0)
806 				goto parse_out;
807 			break;
808 
809 		case IWM_UCODE_TLV_PAGING:
810 			if (tlv_len != sizeof(uint32_t)) {
811 				error = EINVAL;
812 				goto parse_out;
813 			}
814 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
815 
816 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
817 			    "%s: Paging: paging enabled (size = %u bytes)\n",
818 			    __func__, paging_mem_size);
819 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
820 				device_printf(sc->sc_dev,
821 					"%s: Paging: driver supports up to %u bytes for paging image\n",
822 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
823 				error = EINVAL;
824 				goto out;
825 			}
826 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
827 				device_printf(sc->sc_dev,
828 				    "%s: Paging: image isn't multiple %u\n",
829 				    __func__, IWM_FW_PAGING_SIZE);
830 				error = EINVAL;
831 				goto out;
832 			}
833 
834 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
835 			    paging_mem_size;
836 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
837 			sc->sc_fw.img[usniffer_img].paging_mem_size =
838 			    paging_mem_size;
839 			break;
840 
841 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
842 			if (tlv_len != sizeof(uint32_t)) {
843 				error = EINVAL;
844 				goto parse_out;
845 			}
846 			capa->n_scan_channels =
847 			    le32_to_cpup((const uint32_t *)tlv_data);
848 			break;
849 
850 		case IWM_UCODE_TLV_FW_VERSION:
851 			if (tlv_len != sizeof(uint32_t) * 3) {
852 				error = EINVAL;
853 				goto parse_out;
854 			}
855 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
856 			    "%d.%d.%d",
857 			    le32toh(((const uint32_t *)tlv_data)[0]),
858 			    le32toh(((const uint32_t *)tlv_data)[1]),
859 			    le32toh(((const uint32_t *)tlv_data)[2]));
860 			break;
861 
862 		case IWM_UCODE_TLV_FW_MEM_SEG:
863 			break;
864 
865 		default:
866 			device_printf(sc->sc_dev,
867 			    "%s: unknown firmware section %d, abort\n",
868 			    __func__, tlv_type);
869 			error = EINVAL;
870 			goto parse_out;
871 		}
872 	}
873 
874 	KASSERT(error == 0, ("unhandled error"));
875 
876  parse_out:
877 	if (error) {
878 		device_printf(sc->sc_dev, "firmware parse error %d, "
879 		    "section type %d\n", error, tlv_type);
880 	}
881 
882  out:
883 	if (error) {
884 		if (fw->fw_fp != NULL)
885 			iwm_fw_info_free(fw);
886 	}
887 
888 	return error;
889 }
890 
891 /*
892  * DMA resource routines
893  */
894 
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899 	/* Must be aligned on a 16-byte boundary. */
900 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
902 }
903 
904 /* tx scheduler rings.  not used? */
905 static int
906 iwm_alloc_sched(struct iwm_softc *sc)
907 {
908 	/* TX scheduler rings must be aligned on a 1KB boundary. */
909 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
910 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
911 }
912 
913 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
914 static int
915 iwm_alloc_kw(struct iwm_softc *sc)
916 {
917 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
918 }
919 
920 /* interrupt cause table */
921 static int
922 iwm_alloc_ict(struct iwm_softc *sc)
923 {
924 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
925 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
926 }
927 
928 static int
929 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
930 {
931 	bus_size_t size;
932 	int i, error;
933 
934 	ring->cur = 0;
935 
936 	/* Allocate RX descriptors (256-byte aligned). */
937 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
938 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
939 	if (error != 0) {
940 		device_printf(sc->sc_dev,
941 		    "could not allocate RX ring DMA memory\n");
942 		goto fail;
943 	}
944 	ring->desc = ring->desc_dma.vaddr;
945 
946 	/* Allocate RX status area (16-byte aligned). */
947 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
948 	    sizeof(*ring->stat), 16);
949 	if (error != 0) {
950 		device_printf(sc->sc_dev,
951 		    "could not allocate RX status DMA memory\n");
952 		goto fail;
953 	}
954 	ring->stat = ring->stat_dma.vaddr;
955 
956         /* Create RX buffer DMA tag. */
957         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
958             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
959             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
960         if (error != 0) {
961                 device_printf(sc->sc_dev,
962                     "%s: could not create RX buf DMA tag, error %d\n",
963                     __func__, error);
964                 goto fail;
965         }
966 
967 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
968 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
969 	if (error != 0) {
970 		device_printf(sc->sc_dev,
971 		    "%s: could not create RX buf DMA map, error %d\n",
972 		    __func__, error);
973 		goto fail;
974 	}
975 	/*
976 	 * Allocate and map RX buffers.
977 	 */
978 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
979 		struct iwm_rx_data *data = &ring->data[i];
980 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981 		if (error != 0) {
982 			device_printf(sc->sc_dev,
983 			    "%s: could not create RX buf DMA map, error %d\n",
984 			    __func__, error);
985 			goto fail;
986 		}
987 		data->m = NULL;
988 
989 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
990 			goto fail;
991 		}
992 	}
993 	return 0;
994 
995 fail:	iwm_free_rx_ring(sc, ring);
996 	return error;
997 }
998 
999 static void
1000 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1001 {
1002 	/* Reset the ring state */
1003 	ring->cur = 0;
1004 
1005 	/*
1006 	 * The hw rx ring index in shared memory must also be cleared,
1007 	 * otherwise the discrepancy can cause reprocessing chaos.
1008 	 */
1009 	if (sc->rxq.stat)
1010 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1011 }
1012 
1013 static void
1014 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1015 {
1016 	int i;
1017 
1018 	iwm_dma_contig_free(&ring->desc_dma);
1019 	iwm_dma_contig_free(&ring->stat_dma);
1020 
1021 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1022 		struct iwm_rx_data *data = &ring->data[i];
1023 
1024 		if (data->m != NULL) {
1025 			bus_dmamap_sync(ring->data_dmat, data->map,
1026 			    BUS_DMASYNC_POSTREAD);
1027 			bus_dmamap_unload(ring->data_dmat, data->map);
1028 			m_freem(data->m);
1029 			data->m = NULL;
1030 		}
1031 		if (data->map != NULL) {
1032 			bus_dmamap_destroy(ring->data_dmat, data->map);
1033 			data->map = NULL;
1034 		}
1035 	}
1036 	if (ring->spare_map != NULL) {
1037 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1038 		ring->spare_map = NULL;
1039 	}
1040 	if (ring->data_dmat != NULL) {
1041 		bus_dma_tag_destroy(ring->data_dmat);
1042 		ring->data_dmat = NULL;
1043 	}
1044 }
1045 
1046 static int
1047 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1048 {
1049 	bus_addr_t paddr;
1050 	bus_size_t size;
1051 	size_t maxsize;
1052 	int nsegments;
1053 	int i, error;
1054 
1055 	ring->qid = qid;
1056 	ring->queued = 0;
1057 	ring->cur = 0;
1058 
1059 	/* Allocate TX descriptors (256-byte aligned). */
1060 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1061 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1062 	if (error != 0) {
1063 		device_printf(sc->sc_dev,
1064 		    "could not allocate TX ring DMA memory\n");
1065 		goto fail;
1066 	}
1067 	ring->desc = ring->desc_dma.vaddr;
1068 
1069 	/*
1070 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1071 	 * to allocate commands space for other rings.
1072 	 */
1073 	if (qid > IWM_MVM_CMD_QUEUE)
1074 		return 0;
1075 
1076 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1077 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1078 	if (error != 0) {
1079 		device_printf(sc->sc_dev,
1080 		    "could not allocate TX cmd DMA memory\n");
1081 		goto fail;
1082 	}
1083 	ring->cmd = ring->cmd_dma.vaddr;
1084 
1085 	/* FW commands may require more mapped space than packets. */
1086 	if (qid == IWM_MVM_CMD_QUEUE) {
1087 		maxsize = IWM_RBUF_SIZE;
1088 		nsegments = 1;
1089 	} else {
1090 		maxsize = MCLBYTES;
1091 		nsegments = IWM_MAX_SCATTER - 2;
1092 	}
1093 
1094 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1095 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1096             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1097 	if (error != 0) {
1098 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1099 		goto fail;
1100 	}
1101 
1102 	paddr = ring->cmd_dma.paddr;
1103 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1104 		struct iwm_tx_data *data = &ring->data[i];
1105 
1106 		data->cmd_paddr = paddr;
1107 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1108 		    + offsetof(struct iwm_tx_cmd, scratch);
1109 		paddr += sizeof(struct iwm_device_cmd);
1110 
1111 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1112 		if (error != 0) {
1113 			device_printf(sc->sc_dev,
1114 			    "could not create TX buf DMA map\n");
1115 			goto fail;
1116 		}
1117 	}
1118 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1119 	    ("invalid physical address"));
1120 	return 0;
1121 
1122 fail:	iwm_free_tx_ring(sc, ring);
1123 	return error;
1124 }
1125 
1126 static void
1127 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1128 {
1129 	int i;
1130 
1131 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1132 		struct iwm_tx_data *data = &ring->data[i];
1133 
1134 		if (data->m != NULL) {
1135 			bus_dmamap_sync(ring->data_dmat, data->map,
1136 			    BUS_DMASYNC_POSTWRITE);
1137 			bus_dmamap_unload(ring->data_dmat, data->map);
1138 			m_freem(data->m);
1139 			data->m = NULL;
1140 		}
1141 	}
1142 	/* Clear TX descriptors. */
1143 	memset(ring->desc, 0, ring->desc_dma.size);
1144 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1145 	    BUS_DMASYNC_PREWRITE);
1146 	sc->qfullmsk &= ~(1 << ring->qid);
1147 	ring->queued = 0;
1148 	ring->cur = 0;
1149 
1150 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1151 		iwm_pcie_clear_cmd_in_flight(sc);
1152 }
1153 
1154 static void
1155 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1156 {
1157 	int i;
1158 
1159 	iwm_dma_contig_free(&ring->desc_dma);
1160 	iwm_dma_contig_free(&ring->cmd_dma);
1161 
1162 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1163 		struct iwm_tx_data *data = &ring->data[i];
1164 
1165 		if (data->m != NULL) {
1166 			bus_dmamap_sync(ring->data_dmat, data->map,
1167 			    BUS_DMASYNC_POSTWRITE);
1168 			bus_dmamap_unload(ring->data_dmat, data->map);
1169 			m_freem(data->m);
1170 			data->m = NULL;
1171 		}
1172 		if (data->map != NULL) {
1173 			bus_dmamap_destroy(ring->data_dmat, data->map);
1174 			data->map = NULL;
1175 		}
1176 	}
1177 	if (ring->data_dmat != NULL) {
1178 		bus_dma_tag_destroy(ring->data_dmat);
1179 		ring->data_dmat = NULL;
1180 	}
1181 }
1182 
1183 /*
1184  * High-level hardware frobbing routines
1185  */
1186 
1187 static void
1188 iwm_enable_interrupts(struct iwm_softc *sc)
1189 {
1190 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1191 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1192 }
1193 
1194 static void
1195 iwm_restore_interrupts(struct iwm_softc *sc)
1196 {
1197 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1198 }
1199 
1200 static void
1201 iwm_disable_interrupts(struct iwm_softc *sc)
1202 {
1203 	/* disable interrupts */
1204 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1205 
1206 	/* acknowledge all interrupts */
1207 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1208 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1209 }
1210 
1211 static void
1212 iwm_ict_reset(struct iwm_softc *sc)
1213 {
1214 	iwm_disable_interrupts(sc);
1215 
1216 	/* Reset ICT table. */
1217 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1218 	sc->ict_cur = 0;
1219 
1220 	/* Set physical address of ICT table (4KB aligned). */
1221 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1222 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1223 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1224 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1225 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1226 
1227 	/* Switch to ICT interrupt mode in driver. */
1228 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1229 
1230 	/* Re-enable interrupts. */
1231 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1232 	iwm_enable_interrupts(sc);
1233 }
1234 
1235 /* iwlwifi pcie/trans.c */
1236 
1237 /*
1238  * Since this .. hard-resets things, it's time to actually
1239  * mark the first vap (if any) as having no mac context.
1240  * It's annoying, but since the driver is potentially being
1241  * stop/start'ed whilst active (thanks openbsd port!) we
1242  * have to correctly track this.
1243  */
1244 static void
1245 iwm_stop_device(struct iwm_softc *sc)
1246 {
1247 	struct ieee80211com *ic = &sc->sc_ic;
1248 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1249 	int chnl, qid;
1250 	uint32_t mask = 0;
1251 
1252 	/* tell the device to stop sending interrupts */
1253 	iwm_disable_interrupts(sc);
1254 
1255 	/*
1256 	 * FreeBSD-local: mark the first vap as not-uploaded,
1257 	 * so the next transition through auth/assoc
1258 	 * will correctly populate the MAC context.
1259 	 */
1260 	if (vap) {
1261 		struct iwm_vap *iv = IWM_VAP(vap);
1262 		iv->phy_ctxt = NULL;
1263 		iv->is_uploaded = 0;
1264 	}
1265 	sc->sc_firmware_state = 0;
1266 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1267 
1268 	/* device going down, Stop using ICT table */
1269 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1270 
1271 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1272 
1273 	if (iwm_nic_lock(sc)) {
1274 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1275 
1276 		/* Stop each Tx DMA channel */
1277 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1278 			IWM_WRITE(sc,
1279 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1280 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1281 		}
1282 
1283 		/* Wait for DMA channels to be idle */
1284 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1285 		    5000)) {
1286 			device_printf(sc->sc_dev,
1287 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1288 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1289 		}
1290 		iwm_nic_unlock(sc);
1291 	}
1292 	iwm_pcie_rx_stop(sc);
1293 
1294 	/* Stop RX ring. */
1295 	iwm_reset_rx_ring(sc, &sc->rxq);
1296 
1297 	/* Reset all TX rings. */
1298 	for (qid = 0; qid < nitems(sc->txq); qid++)
1299 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1300 
1301 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1302 		/* Power-down device's busmaster DMA clocks */
1303 		if (iwm_nic_lock(sc)) {
1304 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1305 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1306 			iwm_nic_unlock(sc);
1307 		}
1308 		DELAY(5);
1309 	}
1310 
1311 	/* Make sure (redundant) we've released our request to stay awake */
1312 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1313 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1314 
1315 	/* Stop the device, and put it in low power state */
1316 	iwm_apm_stop(sc);
1317 
1318 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1319 	 * Clean again the interrupt here
1320 	 */
1321 	iwm_disable_interrupts(sc);
1322 	/* stop and reset the on-board processor */
1323 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1324 
1325 	/*
1326 	 * Even if we stop the HW, we still want the RF kill
1327 	 * interrupt
1328 	 */
1329 	iwm_enable_rfkill_int(sc);
1330 	iwm_check_rfkill(sc);
1331 }
1332 
1333 /* iwlwifi: mvm/ops.c */
1334 static void
1335 iwm_mvm_nic_config(struct iwm_softc *sc)
1336 {
1337 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1338 	uint32_t reg_val = 0;
1339 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1340 
1341 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1342 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1343 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1344 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1345 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1346 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1347 
1348 	/* SKU control */
1349 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1350 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1351 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1352 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1353 
1354 	/* radio configuration */
1355 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1356 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1357 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1358 
1359 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1360 
1361 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1362 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1363 	    radio_cfg_step, radio_cfg_dash);
1364 
1365 	/*
1366 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1367 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1368 	 * to lose ownership and not being able to obtain it back.
1369 	 */
1370 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1371 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1372 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1373 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1374 	}
1375 }
1376 
1377 static int
1378 iwm_nic_rx_init(struct iwm_softc *sc)
1379 {
1380 	/*
1381 	 * Initialize RX ring.  This is from the iwn driver.
1382 	 */
1383 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1384 
1385 	/* Stop Rx DMA */
1386 	iwm_pcie_rx_stop(sc);
1387 
1388 	if (!iwm_nic_lock(sc))
1389 		return EBUSY;
1390 
1391 	/* reset and flush pointers */
1392 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1393 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1394 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1395 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1396 
1397 	/* Set physical address of RX ring (256-byte aligned). */
1398 	IWM_WRITE(sc,
1399 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1400 
1401 	/* Set physical address of RX status (16-byte aligned). */
1402 	IWM_WRITE(sc,
1403 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1404 
1405 	/* Enable Rx DMA
1406 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1407 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1408 	 *      the credit mechanism in 5000 HW RX FIFO
1409 	 * Direct rx interrupts to hosts
1410 	 * Rx buffer size 4 or 8k or 12k
1411 	 * RB timeout 0x10
1412 	 * 256 RBDs
1413 	 */
1414 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1415 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1416 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1417 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1418 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1419 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1420 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1421 
1422 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1423 
1424 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1425 	if (sc->cfg->host_interrupt_operation_mode)
1426 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1427 
1428 	/*
1429 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1430 	 *
1431 	 * This value should initially be 0 (before preparing any
1432 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1433 	 */
1434 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1435 
1436 	iwm_nic_unlock(sc);
1437 
1438 	return 0;
1439 }
1440 
1441 static int
1442 iwm_nic_tx_init(struct iwm_softc *sc)
1443 {
1444 	int qid;
1445 
1446 	if (!iwm_nic_lock(sc))
1447 		return EBUSY;
1448 
1449 	/* Deactivate TX scheduler. */
1450 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1451 
1452 	/* Set physical address of "keep warm" page (16-byte aligned). */
1453 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1454 
1455 	/* Initialize TX rings. */
1456 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1457 		struct iwm_tx_ring *txq = &sc->txq[qid];
1458 
1459 		/* Set physical address of TX ring (256-byte aligned). */
1460 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1461 		    txq->desc_dma.paddr >> 8);
1462 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1463 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1464 		    __func__,
1465 		    qid, txq->desc,
1466 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1467 	}
1468 
1469 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1470 
1471 	iwm_nic_unlock(sc);
1472 
1473 	return 0;
1474 }
1475 
1476 static int
1477 iwm_nic_init(struct iwm_softc *sc)
1478 {
1479 	int error;
1480 
1481 	iwm_apm_init(sc);
1482 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1483 		iwm_set_pwr(sc);
1484 
1485 	iwm_mvm_nic_config(sc);
1486 
1487 	if ((error = iwm_nic_rx_init(sc)) != 0)
1488 		return error;
1489 
1490 	/*
1491 	 * Ditto for TX, from iwn
1492 	 */
1493 	if ((error = iwm_nic_tx_init(sc)) != 0)
1494 		return error;
1495 
1496 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1497 	    "%s: shadow registers enabled\n", __func__);
1498 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1499 
1500 	return 0;
1501 }
1502 
1503 int
1504 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1505 {
1506 	if (!iwm_nic_lock(sc)) {
1507 		device_printf(sc->sc_dev,
1508 		    "%s: cannot enable txq %d\n",
1509 		    __func__,
1510 		    qid);
1511 		return EBUSY;
1512 	}
1513 
1514 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1515 
1516 	if (qid == IWM_MVM_CMD_QUEUE) {
1517 		/* unactivate before configuration */
1518 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1519 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1520 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1521 
1522 		iwm_nic_unlock(sc);
1523 
1524 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1525 
1526 		if (!iwm_nic_lock(sc)) {
1527 			device_printf(sc->sc_dev,
1528 			    "%s: cannot enable txq %d\n", __func__, qid);
1529 			return EBUSY;
1530 		}
1531 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1532 		iwm_nic_unlock(sc);
1533 
1534 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1535 		/* Set scheduler window size and frame limit. */
1536 		iwm_write_mem32(sc,
1537 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1538 		    sizeof(uint32_t),
1539 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1540 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1541 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1542 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1543 
1544 		if (!iwm_nic_lock(sc)) {
1545 			device_printf(sc->sc_dev,
1546 			    "%s: cannot enable txq %d\n", __func__, qid);
1547 			return EBUSY;
1548 		}
1549 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1550 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1551 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1552 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1553 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1554 	} else {
1555 		struct iwm_scd_txq_cfg_cmd cmd;
1556 		int error;
1557 
1558 		iwm_nic_unlock(sc);
1559 
1560 		memset(&cmd, 0, sizeof(cmd));
1561 		cmd.scd_queue = qid;
1562 		cmd.enable = 1;
1563 		cmd.sta_id = sta_id;
1564 		cmd.tx_fifo = fifo;
1565 		cmd.aggregate = 0;
1566 		cmd.window = IWM_FRAME_LIMIT;
1567 
1568 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1569 		    sizeof(cmd), &cmd);
1570 		if (error) {
1571 			device_printf(sc->sc_dev,
1572 			    "cannot enable txq %d\n", qid);
1573 			return error;
1574 		}
1575 
1576 		if (!iwm_nic_lock(sc))
1577 			return EBUSY;
1578 	}
1579 
1580 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1581 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1582 
1583 	iwm_nic_unlock(sc);
1584 
1585 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1586 	    __func__, qid, fifo);
1587 
1588 	return 0;
1589 }
1590 
1591 static int
1592 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1593 {
1594 	int error, chnl;
1595 
1596 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1597 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1598 
1599 	if (!iwm_nic_lock(sc))
1600 		return EBUSY;
1601 
1602 	iwm_ict_reset(sc);
1603 
1604 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1605 	if (scd_base_addr != 0 &&
1606 	    scd_base_addr != sc->scd_base_addr) {
1607 		device_printf(sc->sc_dev,
1608 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1609 		    __func__, sc->scd_base_addr, scd_base_addr);
1610 	}
1611 
1612 	iwm_nic_unlock(sc);
1613 
1614 	/* reset context data, TX status and translation data */
1615 	error = iwm_write_mem(sc,
1616 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1617 	    NULL, clear_dwords);
1618 	if (error)
1619 		return EBUSY;
1620 
1621 	if (!iwm_nic_lock(sc))
1622 		return EBUSY;
1623 
1624 	/* Set physical address of TX scheduler rings (1KB aligned). */
1625 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1626 
1627 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1628 
1629 	iwm_nic_unlock(sc);
1630 
1631 	/* enable command channel */
1632 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1633 	if (error)
1634 		return error;
1635 
1636 	if (!iwm_nic_lock(sc))
1637 		return EBUSY;
1638 
1639 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1640 
1641 	/* Enable DMA channels. */
1642 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1643 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1644 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1645 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1646 	}
1647 
1648 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1649 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1650 
1651 	iwm_nic_unlock(sc);
1652 
1653 	/* Enable L1-Active */
1654 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1655 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1656 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1657 	}
1658 
1659 	return error;
1660 }
1661 
1662 /*
1663  * NVM read access and content parsing.  We do not support
1664  * external NVM or writing NVM.
1665  * iwlwifi/mvm/nvm.c
1666  */
1667 
1668 /* Default NVM size to read */
1669 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1670 
1671 #define IWM_NVM_WRITE_OPCODE 1
1672 #define IWM_NVM_READ_OPCODE 0
1673 
1674 /* load nvm chunk response */
1675 enum {
1676 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1677 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1678 };
1679 
1680 static int
1681 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1682 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1683 {
1684 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1685 		.offset = htole16(offset),
1686 		.length = htole16(length),
1687 		.type = htole16(section),
1688 		.op_code = IWM_NVM_READ_OPCODE,
1689 	};
1690 	struct iwm_nvm_access_resp *nvm_resp;
1691 	struct iwm_rx_packet *pkt;
1692 	struct iwm_host_cmd cmd = {
1693 		.id = IWM_NVM_ACCESS_CMD,
1694 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1695 		.data = { &nvm_access_cmd, },
1696 	};
1697 	int ret, bytes_read, offset_read;
1698 	uint8_t *resp_data;
1699 
1700 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1701 
1702 	ret = iwm_send_cmd(sc, &cmd);
1703 	if (ret) {
1704 		device_printf(sc->sc_dev,
1705 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1706 		return ret;
1707 	}
1708 
1709 	pkt = cmd.resp_pkt;
1710 
1711 	/* Extract NVM response */
1712 	nvm_resp = (void *)pkt->data;
1713 	ret = le16toh(nvm_resp->status);
1714 	bytes_read = le16toh(nvm_resp->length);
1715 	offset_read = le16toh(nvm_resp->offset);
1716 	resp_data = nvm_resp->data;
1717 	if (ret) {
1718 		if ((offset != 0) &&
1719 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1720 			/*
1721 			 * meaning of NOT_VALID_ADDRESS:
1722 			 * driver try to read chunk from address that is
1723 			 * multiple of 2K and got an error since addr is empty.
1724 			 * meaning of (offset != 0): driver already
1725 			 * read valid data from another chunk so this case
1726 			 * is not an error.
1727 			 */
1728 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1729 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1730 				    offset);
1731 			*len = 0;
1732 			ret = 0;
1733 		} else {
1734 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1735 				    "NVM access command failed with status %d\n", ret);
1736 			ret = EIO;
1737 		}
1738 		goto exit;
1739 	}
1740 
1741 	if (offset_read != offset) {
1742 		device_printf(sc->sc_dev,
1743 		    "NVM ACCESS response with invalid offset %d\n",
1744 		    offset_read);
1745 		ret = EINVAL;
1746 		goto exit;
1747 	}
1748 
1749 	if (bytes_read > length) {
1750 		device_printf(sc->sc_dev,
1751 		    "NVM ACCESS response with too much data "
1752 		    "(%d bytes requested, %d bytes received)\n",
1753 		    length, bytes_read);
1754 		ret = EINVAL;
1755 		goto exit;
1756 	}
1757 
1758 	/* Write data to NVM */
1759 	memcpy(data + offset, resp_data, bytes_read);
1760 	*len = bytes_read;
1761 
1762  exit:
1763 	iwm_free_resp(sc, &cmd);
1764 	return ret;
1765 }
1766 
1767 /*
1768  * Reads an NVM section completely.
1769  * NICs prior to 7000 family don't have a real NVM, but just read
1770  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1771  * by uCode, we need to manually check in this case that we don't
1772  * overflow and try to read more than the EEPROM size.
1773  * For 7000 family NICs, we supply the maximal size we can read, and
1774  * the uCode fills the response with as much data as we can,
1775  * without overflowing, so no check is needed.
1776  */
1777 static int
1778 iwm_nvm_read_section(struct iwm_softc *sc,
1779 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1780 {
1781 	uint16_t seglen, length, offset = 0;
1782 	int ret;
1783 
1784 	/* Set nvm section read length */
1785 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1786 
1787 	seglen = length;
1788 
1789 	/* Read the NVM until exhausted (reading less than requested) */
1790 	while (seglen == length) {
1791 		/* Check no memory assumptions fail and cause an overflow */
1792 		if ((size_read + offset + length) >
1793 		    sc->cfg->eeprom_size) {
1794 			device_printf(sc->sc_dev,
1795 			    "EEPROM size is too small for NVM\n");
1796 			return ENOBUFS;
1797 		}
1798 
1799 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1800 		if (ret) {
1801 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1802 				    "Cannot read NVM from section %d offset %d, length %d\n",
1803 				    section, offset, length);
1804 			return ret;
1805 		}
1806 		offset += seglen;
1807 	}
1808 
1809 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1810 		    "NVM section %d read completed\n", section);
1811 	*len = offset;
1812 	return 0;
1813 }
1814 
1815 /*
1816  * BEGIN IWM_NVM_PARSE
1817  */
1818 
1819 /* iwlwifi/iwl-nvm-parse.c */
1820 
1821 /* NVM offsets (in words) definitions */
1822 enum iwm_nvm_offsets {
1823 	/* NVM HW-Section offset (in words) definitions */
1824 	IWM_HW_ADDR = 0x15,
1825 
1826 /* NVM SW-Section offset (in words) definitions */
1827 	IWM_NVM_SW_SECTION = 0x1C0,
1828 	IWM_NVM_VERSION = 0,
1829 	IWM_RADIO_CFG = 1,
1830 	IWM_SKU = 2,
1831 	IWM_N_HW_ADDRS = 3,
1832 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1833 
1834 /* NVM calibration section offset (in words) definitions */
1835 	IWM_NVM_CALIB_SECTION = 0x2B8,
1836 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1837 };
1838 
1839 enum iwm_8000_nvm_offsets {
1840 	/* NVM HW-Section offset (in words) definitions */
1841 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1842 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1843 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1844 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1845 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1846 
1847 	/* NVM SW-Section offset (in words) definitions */
1848 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1849 	IWM_NVM_VERSION_8000 = 0,
1850 	IWM_RADIO_CFG_8000 = 0,
1851 	IWM_SKU_8000 = 2,
1852 	IWM_N_HW_ADDRS_8000 = 3,
1853 
1854 	/* NVM REGULATORY -Section offset (in words) definitions */
1855 	IWM_NVM_CHANNELS_8000 = 0,
1856 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1857 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1858 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1859 
1860 	/* NVM calibration section offset (in words) definitions */
1861 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1862 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1863 };
1864 
1865 /* SKU Capabilities (actual values from NVM definition) */
1866 enum nvm_sku_bits {
1867 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1868 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1869 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1870 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1871 };
1872 
1873 /* radio config bits (actual values from NVM definition) */
1874 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1875 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1876 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1877 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1878 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1879 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1880 
1881 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1882 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1883 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1884 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1885 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1886 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1887 
1888 /**
1889  * enum iwm_nvm_channel_flags - channel flags in NVM
1890  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1891  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1892  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1893  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1894  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1895  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1896  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1897  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1898  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1899  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1900  */
1901 enum iwm_nvm_channel_flags {
1902 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1903 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1904 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1905 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1906 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1907 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1908 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1909 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1910 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1911 };
1912 
1913 /*
1914  * Translate EEPROM flags to net80211.
1915  */
1916 static uint32_t
1917 iwm_eeprom_channel_flags(uint16_t ch_flags)
1918 {
1919 	uint32_t nflags;
1920 
1921 	nflags = 0;
1922 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1923 		nflags |= IEEE80211_CHAN_PASSIVE;
1924 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1925 		nflags |= IEEE80211_CHAN_NOADHOC;
1926 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1927 		nflags |= IEEE80211_CHAN_DFS;
1928 		/* Just in case. */
1929 		nflags |= IEEE80211_CHAN_NOADHOC;
1930 	}
1931 
1932 	return (nflags);
1933 }
1934 
1935 static void
1936 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1937     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1938     const uint8_t bands[])
1939 {
1940 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1941 	uint32_t nflags;
1942 	uint16_t ch_flags;
1943 	uint8_t ieee;
1944 	int error;
1945 
1946 	for (; ch_idx < ch_num; ch_idx++) {
1947 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1948 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1949 			ieee = iwm_nvm_channels[ch_idx];
1950 		else
1951 			ieee = iwm_nvm_channels_8000[ch_idx];
1952 
1953 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1954 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1955 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1956 			    ieee, ch_flags,
1957 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1958 			    "5.2" : "2.4");
1959 			continue;
1960 		}
1961 
1962 		nflags = iwm_eeprom_channel_flags(ch_flags);
1963 		error = ieee80211_add_channel(chans, maxchans, nchans,
1964 		    ieee, 0, 0, nflags, bands);
1965 		if (error != 0)
1966 			break;
1967 
1968 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1969 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1970 		    ieee, ch_flags,
1971 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1972 		    "5.2" : "2.4");
1973 	}
1974 }
1975 
1976 static void
1977 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1978     struct ieee80211_channel chans[])
1979 {
1980 	struct iwm_softc *sc = ic->ic_softc;
1981 	struct iwm_nvm_data *data = sc->nvm_data;
1982 	uint8_t bands[IEEE80211_MODE_BYTES];
1983 	size_t ch_num;
1984 
1985 	memset(bands, 0, sizeof(bands));
1986 	/* 1-13: 11b/g channels. */
1987 	setbit(bands, IEEE80211_MODE_11B);
1988 	setbit(bands, IEEE80211_MODE_11G);
1989 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1990 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1991 
1992 	/* 14: 11b channel only. */
1993 	clrbit(bands, IEEE80211_MODE_11G);
1994 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1995 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1996 
1997 	if (data->sku_cap_band_52GHz_enable) {
1998 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1999 			ch_num = nitems(iwm_nvm_channels);
2000 		else
2001 			ch_num = nitems(iwm_nvm_channels_8000);
2002 		memset(bands, 0, sizeof(bands));
2003 		setbit(bands, IEEE80211_MODE_11A);
2004 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2005 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2006 	}
2007 }
2008 
2009 static void
2010 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2011 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2012 {
2013 	const uint8_t *hw_addr;
2014 
2015 	if (mac_override) {
2016 		static const uint8_t reserved_mac[] = {
2017 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2018 		};
2019 
2020 		hw_addr = (const uint8_t *)(mac_override +
2021 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2022 
2023 		/*
2024 		 * Store the MAC address from MAO section.
2025 		 * No byte swapping is required in MAO section
2026 		 */
2027 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2028 
2029 		/*
2030 		 * Force the use of the OTP MAC address in case of reserved MAC
2031 		 * address in the NVM, or if address is given but invalid.
2032 		 */
2033 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2034 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2035 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2036 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2037 			return;
2038 
2039 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2040 		    "%s: mac address from nvm override section invalid\n",
2041 		    __func__);
2042 	}
2043 
2044 	if (nvm_hw) {
2045 		/* read the mac address from WFMP registers */
2046 		uint32_t mac_addr0 =
2047 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2048 		uint32_t mac_addr1 =
2049 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2050 
2051 		hw_addr = (const uint8_t *)&mac_addr0;
2052 		data->hw_addr[0] = hw_addr[3];
2053 		data->hw_addr[1] = hw_addr[2];
2054 		data->hw_addr[2] = hw_addr[1];
2055 		data->hw_addr[3] = hw_addr[0];
2056 
2057 		hw_addr = (const uint8_t *)&mac_addr1;
2058 		data->hw_addr[4] = hw_addr[1];
2059 		data->hw_addr[5] = hw_addr[0];
2060 
2061 		return;
2062 	}
2063 
2064 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2065 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2066 }
2067 
2068 static int
2069 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2070 	    const uint16_t *phy_sku)
2071 {
2072 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2073 		return le16_to_cpup(nvm_sw + IWM_SKU);
2074 
2075 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2076 }
2077 
2078 static int
2079 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2080 {
2081 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2082 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2083 	else
2084 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2085 						IWM_NVM_VERSION_8000));
2086 }
2087 
2088 static int
2089 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2090 		  const uint16_t *phy_sku)
2091 {
2092         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2093                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2094 
2095         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2096 }
2097 
2098 static int
2099 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2100 {
2101 	int n_hw_addr;
2102 
2103 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2104 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2105 
2106 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2107 
2108         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2109 }
2110 
2111 static void
2112 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2113 		  uint32_t radio_cfg)
2114 {
2115 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2116 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2117 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2118 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2119 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2120 		return;
2121 	}
2122 
2123 	/* set the radio configuration for family 8000 */
2124 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2125 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2126 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2127 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2128 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2129 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2130 }
2131 
2132 static int
2133 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2134 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2135 {
2136 #ifdef notyet /* for FAMILY 9000 */
2137 	if (cfg->mac_addr_from_csr) {
2138 		iwm_set_hw_address_from_csr(sc, data);
2139         } else
2140 #endif
2141 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2142 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2143 
2144 		/* The byte order is little endian 16 bit, meaning 214365 */
2145 		data->hw_addr[0] = hw_addr[1];
2146 		data->hw_addr[1] = hw_addr[0];
2147 		data->hw_addr[2] = hw_addr[3];
2148 		data->hw_addr[3] = hw_addr[2];
2149 		data->hw_addr[4] = hw_addr[5];
2150 		data->hw_addr[5] = hw_addr[4];
2151 	} else {
2152 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2153 	}
2154 
2155 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2156 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2157 		return EINVAL;
2158 	}
2159 
2160 	return 0;
2161 }
2162 
2163 static struct iwm_nvm_data *
2164 iwm_parse_nvm_data(struct iwm_softc *sc,
2165 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2166 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2167 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2168 {
2169 	struct iwm_nvm_data *data;
2170 	uint32_t sku, radio_cfg;
2171 	uint16_t lar_config;
2172 
2173 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2174 		data = malloc(sizeof(*data) +
2175 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2176 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2177 	} else {
2178 		data = malloc(sizeof(*data) +
2179 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2180 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2181 	}
2182 	if (!data)
2183 		return NULL;
2184 
2185 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2186 
2187 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2188 	iwm_set_radio_cfg(sc, data, radio_cfg);
2189 
2190 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2191 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2192 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2193 	data->sku_cap_11n_enable = 0;
2194 
2195 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2196 
2197 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2198 		/* TODO: use IWL_NVM_EXT */
2199 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2200 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2201 				       IWM_NVM_LAR_OFFSET_8000;
2202 
2203 		lar_config = le16_to_cpup(regulatory + lar_offset);
2204 		data->lar_enabled = !!(lar_config &
2205 				       IWM_NVM_LAR_ENABLED_8000);
2206 	}
2207 
2208 	/* If no valid mac address was found - bail out */
2209 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2210 		free(data, M_DEVBUF);
2211 		return NULL;
2212 	}
2213 
2214 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2215 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2216 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2217 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2218 	} else {
2219 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2220 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2221 	}
2222 
2223 	return data;
2224 }
2225 
2226 static void
2227 iwm_free_nvm_data(struct iwm_nvm_data *data)
2228 {
2229 	if (data != NULL)
2230 		free(data, M_DEVBUF);
2231 }
2232 
2233 static struct iwm_nvm_data *
2234 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2235 {
2236 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2237 
2238 	/* Checking for required sections */
2239 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2240 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2241 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2242 			device_printf(sc->sc_dev,
2243 			    "Can't parse empty OTP/NVM sections\n");
2244 			return NULL;
2245 		}
2246 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2247 		/* SW and REGULATORY sections are mandatory */
2248 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2249 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2250 			device_printf(sc->sc_dev,
2251 			    "Can't parse empty OTP/NVM sections\n");
2252 			return NULL;
2253 		}
2254 		/* MAC_OVERRIDE or at least HW section must exist */
2255 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2256 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2257 			device_printf(sc->sc_dev,
2258 			    "Can't parse mac_address, empty sections\n");
2259 			return NULL;
2260 		}
2261 
2262 		/* PHY_SKU section is mandatory in B0 */
2263 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2264 			device_printf(sc->sc_dev,
2265 			    "Can't parse phy_sku in B0, empty sections\n");
2266 			return NULL;
2267 		}
2268 	} else {
2269 		panic("unknown device family %d\n", sc->cfg->device_family);
2270 	}
2271 
2272 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2273 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2274 	calib = (const uint16_t *)
2275 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2276 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2277 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2278 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2279 	mac_override = (const uint16_t *)
2280 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2281 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2282 
2283 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2284 	    phy_sku, regulatory);
2285 }
2286 
2287 static int
2288 iwm_nvm_init(struct iwm_softc *sc)
2289 {
2290 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2291 	int i, ret, section;
2292 	uint32_t size_read = 0;
2293 	uint8_t *nvm_buffer, *temp;
2294 	uint16_t len;
2295 
2296 	memset(nvm_sections, 0, sizeof(nvm_sections));
2297 
2298 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2299 		return EINVAL;
2300 
2301 	/* load NVM values from nic */
2302 	/* Read From FW NVM */
2303 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2304 
2305 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2306 	if (!nvm_buffer)
2307 		return ENOMEM;
2308 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2309 		/* we override the constness for initial read */
2310 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2311 					   &len, size_read);
2312 		if (ret)
2313 			continue;
2314 		size_read += len;
2315 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2316 		if (!temp) {
2317 			ret = ENOMEM;
2318 			break;
2319 		}
2320 		memcpy(temp, nvm_buffer, len);
2321 
2322 		nvm_sections[section].data = temp;
2323 		nvm_sections[section].length = len;
2324 	}
2325 	if (!size_read)
2326 		device_printf(sc->sc_dev, "OTP is blank\n");
2327 	free(nvm_buffer, M_DEVBUF);
2328 
2329 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2330 	if (!sc->nvm_data)
2331 		return EINVAL;
2332 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2333 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2334 
2335 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2336 		if (nvm_sections[i].data != NULL)
2337 			free(nvm_sections[i].data, M_DEVBUF);
2338 	}
2339 
2340 	return 0;
2341 }
2342 
2343 static int
2344 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2345 	const struct iwm_fw_desc *section)
2346 {
2347 	struct iwm_dma_info *dma = &sc->fw_dma;
2348 	uint8_t *v_addr;
2349 	bus_addr_t p_addr;
2350 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2351 	int ret = 0;
2352 
2353 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2354 		    "%s: [%d] uCode section being loaded...\n",
2355 		    __func__, section_num);
2356 
2357 	v_addr = dma->vaddr;
2358 	p_addr = dma->paddr;
2359 
2360 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2361 		uint32_t copy_size, dst_addr;
2362 		int extended_addr = FALSE;
2363 
2364 		copy_size = MIN(chunk_sz, section->len - offset);
2365 		dst_addr = section->offset + offset;
2366 
2367 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2368 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2369 			extended_addr = TRUE;
2370 
2371 		if (extended_addr)
2372 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2373 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2374 
2375 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2376 		    copy_size);
2377 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2378 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2379 						   copy_size);
2380 
2381 		if (extended_addr)
2382 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2383 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2384 
2385 		if (ret) {
2386 			device_printf(sc->sc_dev,
2387 			    "%s: Could not load the [%d] uCode section\n",
2388 			    __func__, section_num);
2389 			break;
2390 		}
2391 	}
2392 
2393 	return ret;
2394 }
2395 
2396 /*
2397  * ucode
2398  */
2399 static int
2400 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2401 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2402 {
2403 	sc->sc_fw_chunk_done = 0;
2404 
2405 	if (!iwm_nic_lock(sc))
2406 		return EBUSY;
2407 
2408 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2409 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2410 
2411 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2412 	    dst_addr);
2413 
2414 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2415 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2416 
2417 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2418 	    (iwm_get_dma_hi_addr(phy_addr)
2419 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2420 
2421 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2422 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2423 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2424 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2425 
2426 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2427 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2428 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2429 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2430 
2431 	iwm_nic_unlock(sc);
2432 
2433 	/* wait up to 5s for this segment to load */
2434 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2435 
2436 	if (!sc->sc_fw_chunk_done) {
2437 		device_printf(sc->sc_dev,
2438 		    "fw chunk addr 0x%x len %d failed to load\n",
2439 		    dst_addr, byte_cnt);
2440 		return ETIMEDOUT;
2441 	}
2442 
2443 	return 0;
2444 }
2445 
2446 static int
2447 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2448 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2449 {
2450 	int shift_param;
2451 	int i, ret = 0, sec_num = 0x1;
2452 	uint32_t val, last_read_idx = 0;
2453 
2454 	if (cpu == 1) {
2455 		shift_param = 0;
2456 		*first_ucode_section = 0;
2457 	} else {
2458 		shift_param = 16;
2459 		(*first_ucode_section)++;
2460 	}
2461 
2462 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2463 		last_read_idx = i;
2464 
2465 		/*
2466 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2467 		 * CPU1 to CPU2.
2468 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2469 		 * CPU2 non paged to CPU2 paging sec.
2470 		 */
2471 		if (!image->sec[i].data ||
2472 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2473 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2474 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2475 				    "Break since Data not valid or Empty section, sec = %d\n",
2476 				    i);
2477 			break;
2478 		}
2479 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2480 		if (ret)
2481 			return ret;
2482 
2483 		/* Notify the ucode of the loaded section number and status */
2484 		if (iwm_nic_lock(sc)) {
2485 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2486 			val = val | (sec_num << shift_param);
2487 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2488 			sec_num = (sec_num << 1) | 0x1;
2489 			iwm_nic_unlock(sc);
2490 		}
2491 	}
2492 
2493 	*first_ucode_section = last_read_idx;
2494 
2495 	iwm_enable_interrupts(sc);
2496 
2497 	if (iwm_nic_lock(sc)) {
2498 		if (cpu == 1)
2499 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2500 		else
2501 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2502 		iwm_nic_unlock(sc);
2503 	}
2504 
2505 	return 0;
2506 }
2507 
2508 static int
2509 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2510 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2511 {
2512 	int shift_param;
2513 	int i, ret = 0;
2514 	uint32_t last_read_idx = 0;
2515 
2516 	if (cpu == 1) {
2517 		shift_param = 0;
2518 		*first_ucode_section = 0;
2519 	} else {
2520 		shift_param = 16;
2521 		(*first_ucode_section)++;
2522 	}
2523 
2524 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2525 		last_read_idx = i;
2526 
2527 		/*
2528 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2529 		 * CPU1 to CPU2.
2530 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2531 		 * CPU2 non paged to CPU2 paging sec.
2532 		 */
2533 		if (!image->sec[i].data ||
2534 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2535 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2536 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2537 				    "Break since Data not valid or Empty section, sec = %d\n",
2538 				     i);
2539 			break;
2540 		}
2541 
2542 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2543 		if (ret)
2544 			return ret;
2545 	}
2546 
2547 	*first_ucode_section = last_read_idx;
2548 
2549 	return 0;
2550 
2551 }
2552 
2553 static int
2554 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2555 {
2556 	int ret = 0;
2557 	int first_ucode_section;
2558 
2559 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2560 		     image->is_dual_cpus ? "Dual" : "Single");
2561 
2562 	/* load to FW the binary non secured sections of CPU1 */
2563 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2564 	if (ret)
2565 		return ret;
2566 
2567 	if (image->is_dual_cpus) {
2568 		/* set CPU2 header address */
2569 		if (iwm_nic_lock(sc)) {
2570 			iwm_write_prph(sc,
2571 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2572 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2573 			iwm_nic_unlock(sc);
2574 		}
2575 
2576 		/* load to FW the binary sections of CPU2 */
2577 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2578 						 &first_ucode_section);
2579 		if (ret)
2580 			return ret;
2581 	}
2582 
2583 	iwm_enable_interrupts(sc);
2584 
2585 	/* release CPU reset */
2586 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2587 
2588 	return 0;
2589 }
2590 
2591 int
2592 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2593 	const struct iwm_fw_img *image)
2594 {
2595 	int ret = 0;
2596 	int first_ucode_section;
2597 
2598 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2599 		    image->is_dual_cpus ? "Dual" : "Single");
2600 
2601 	/* configure the ucode to be ready to get the secured image */
2602 	/* release CPU reset */
2603 	if (iwm_nic_lock(sc)) {
2604 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2605 		    IWM_RELEASE_CPU_RESET_BIT);
2606 		iwm_nic_unlock(sc);
2607 	}
2608 
2609 	/* load to FW the binary Secured sections of CPU1 */
2610 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2611 	    &first_ucode_section);
2612 	if (ret)
2613 		return ret;
2614 
2615 	/* load to FW the binary sections of CPU2 */
2616 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2617 	    &first_ucode_section);
2618 }
2619 
2620 /* XXX Get rid of this definition */
2621 static inline void
2622 iwm_enable_fw_load_int(struct iwm_softc *sc)
2623 {
2624 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2625 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2626 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2627 }
2628 
2629 /* XXX Add proper rfkill support code */
2630 static int
2631 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2632 {
2633 	int ret;
2634 
2635 	/* This may fail if AMT took ownership of the device */
2636 	if (iwm_prepare_card_hw(sc)) {
2637 		device_printf(sc->sc_dev,
2638 		    "%s: Exit HW not ready\n", __func__);
2639 		ret = EIO;
2640 		goto out;
2641 	}
2642 
2643 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2644 
2645 	iwm_disable_interrupts(sc);
2646 
2647 	/* make sure rfkill handshake bits are cleared */
2648 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2649 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2650 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2651 
2652 	/* clear (again), then enable host interrupts */
2653 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2654 
2655 	ret = iwm_nic_init(sc);
2656 	if (ret) {
2657 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2658 		goto out;
2659 	}
2660 
2661 	/*
2662 	 * Now, we load the firmware and don't want to be interrupted, even
2663 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2664 	 * FH_TX interrupt which is needed to load the firmware). If the
2665 	 * RF-Kill switch is toggled, we will find out after having loaded
2666 	 * the firmware and return the proper value to the caller.
2667 	 */
2668 	iwm_enable_fw_load_int(sc);
2669 
2670 	/* really make sure rfkill handshake bits are cleared */
2671 	/* maybe we should write a few times more?  just to make sure */
2672 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2673 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2674 
2675 	/* Load the given image to the HW */
2676 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2677 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2678 	else
2679 		ret = iwm_pcie_load_given_ucode(sc, fw);
2680 
2681 	/* XXX re-check RF-Kill state */
2682 
2683 out:
2684 	return ret;
2685 }
2686 
2687 static int
2688 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2689 {
2690 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2691 		.valid = htole32(valid_tx_ant),
2692 	};
2693 
2694 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2695 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2696 }
2697 
2698 /* iwlwifi: mvm/fw.c */
2699 static int
2700 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2701 {
2702 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2703 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2704 
2705 	/* Set parameters */
2706 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2707 	phy_cfg_cmd.calib_control.event_trigger =
2708 	    sc->sc_default_calib[ucode_type].event_trigger;
2709 	phy_cfg_cmd.calib_control.flow_trigger =
2710 	    sc->sc_default_calib[ucode_type].flow_trigger;
2711 
2712 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2713 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2714 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2715 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2716 }
2717 
2718 static int
2719 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2720 {
2721 	struct iwm_mvm_alive_data *alive_data = data;
2722 	struct iwm_mvm_alive_resp_v3 *palive3;
2723 	struct iwm_mvm_alive_resp *palive;
2724 	struct iwm_umac_alive *umac;
2725 	struct iwm_lmac_alive *lmac1;
2726 	struct iwm_lmac_alive *lmac2 = NULL;
2727 	uint16_t status;
2728 
2729 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2730 		palive = (void *)pkt->data;
2731 		umac = &palive->umac_data;
2732 		lmac1 = &palive->lmac_data[0];
2733 		lmac2 = &palive->lmac_data[1];
2734 		status = le16toh(palive->status);
2735 	} else {
2736 		palive3 = (void *)pkt->data;
2737 		umac = &palive3->umac_data;
2738 		lmac1 = &palive3->lmac_data;
2739 		status = le16toh(palive3->status);
2740 	}
2741 
2742 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2743 	if (lmac2)
2744 		sc->error_event_table[1] =
2745 			le32toh(lmac2->error_event_table_ptr);
2746 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2747 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2748 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2749 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2750 	if (sc->umac_error_event_table)
2751 		sc->support_umac_log = TRUE;
2752 
2753 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2754 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2755 		    status, lmac1->ver_type, lmac1->ver_subtype);
2756 
2757 	if (lmac2)
2758 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2759 
2760 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2761 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2762 		    le32toh(umac->umac_major),
2763 		    le32toh(umac->umac_minor));
2764 
2765 	return TRUE;
2766 }
2767 
2768 static int
2769 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2770 	struct iwm_rx_packet *pkt, void *data)
2771 {
2772 	struct iwm_phy_db *phy_db = data;
2773 
2774 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2775 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2776 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2777 			    __func__, pkt->hdr.code);
2778 		}
2779 		return TRUE;
2780 	}
2781 
2782 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2783 		device_printf(sc->sc_dev,
2784 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2785 	}
2786 
2787 	return FALSE;
2788 }
2789 
2790 static int
2791 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2792 	enum iwm_ucode_type ucode_type)
2793 {
2794 	struct iwm_notification_wait alive_wait;
2795 	struct iwm_mvm_alive_data alive_data;
2796 	const struct iwm_fw_img *fw;
2797 	enum iwm_ucode_type old_type = sc->cur_ucode;
2798 	int error;
2799 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2800 
2801 	fw = &sc->sc_fw.img[ucode_type];
2802 	sc->cur_ucode = ucode_type;
2803 	sc->ucode_loaded = FALSE;
2804 
2805 	memset(&alive_data, 0, sizeof(alive_data));
2806 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2807 				   alive_cmd, nitems(alive_cmd),
2808 				   iwm_alive_fn, &alive_data);
2809 
2810 	error = iwm_start_fw(sc, fw);
2811 	if (error) {
2812 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2813 		sc->cur_ucode = old_type;
2814 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2815 		return error;
2816 	}
2817 
2818 	/*
2819 	 * Some things may run in the background now, but we
2820 	 * just wait for the ALIVE notification here.
2821 	 */
2822 	IWM_UNLOCK(sc);
2823 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2824 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2825 	IWM_LOCK(sc);
2826 	if (error) {
2827 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2828 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2829 			if (iwm_nic_lock(sc)) {
2830 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2831 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2832 				iwm_nic_unlock(sc);
2833 			}
2834 			device_printf(sc->sc_dev,
2835 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2836 			    a, b);
2837 		}
2838 		sc->cur_ucode = old_type;
2839 		return error;
2840 	}
2841 
2842 	if (!alive_data.valid) {
2843 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2844 		    __func__);
2845 		sc->cur_ucode = old_type;
2846 		return EIO;
2847 	}
2848 
2849 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2850 
2851 	/*
2852 	 * configure and operate fw paging mechanism.
2853 	 * driver configures the paging flow only once, CPU2 paging image
2854 	 * included in the IWM_UCODE_INIT image.
2855 	 */
2856 	if (fw->paging_mem_size) {
2857 		error = iwm_save_fw_paging(sc, fw);
2858 		if (error) {
2859 			device_printf(sc->sc_dev,
2860 			    "%s: failed to save the FW paging image\n",
2861 			    __func__);
2862 			return error;
2863 		}
2864 
2865 		error = iwm_send_paging_cmd(sc, fw);
2866 		if (error) {
2867 			device_printf(sc->sc_dev,
2868 			    "%s: failed to send the paging cmd\n", __func__);
2869 			iwm_free_fw_paging(sc);
2870 			return error;
2871 		}
2872 	}
2873 
2874 	if (!error)
2875 		sc->ucode_loaded = TRUE;
2876 	return error;
2877 }
2878 
2879 /*
2880  * mvm misc bits
2881  */
2882 
2883 /*
2884  * follows iwlwifi/fw.c
2885  */
2886 static int
2887 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2888 {
2889 	struct iwm_notification_wait calib_wait;
2890 	static const uint16_t init_complete[] = {
2891 		IWM_INIT_COMPLETE_NOTIF,
2892 		IWM_CALIB_RES_NOTIF_PHY_DB
2893 	};
2894 	int ret;
2895 
2896 	/* do not operate with rfkill switch turned on */
2897 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2898 		device_printf(sc->sc_dev,
2899 		    "radio is disabled by hardware switch\n");
2900 		return EPERM;
2901 	}
2902 
2903 	iwm_init_notification_wait(sc->sc_notif_wait,
2904 				   &calib_wait,
2905 				   init_complete,
2906 				   nitems(init_complete),
2907 				   iwm_wait_phy_db_entry,
2908 				   sc->sc_phy_db);
2909 
2910 	/* Will also start the device */
2911 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2912 	if (ret) {
2913 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2914 		    ret);
2915 		goto error;
2916 	}
2917 
2918 	if (justnvm) {
2919 		/* Read nvm */
2920 		ret = iwm_nvm_init(sc);
2921 		if (ret) {
2922 			device_printf(sc->sc_dev, "failed to read nvm\n");
2923 			goto error;
2924 		}
2925 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2926 		goto error;
2927 	}
2928 
2929 	ret = iwm_send_bt_init_conf(sc);
2930 	if (ret) {
2931 		device_printf(sc->sc_dev,
2932 		    "failed to send bt coex configuration: %d\n", ret);
2933 		goto error;
2934 	}
2935 
2936 	/* Send TX valid antennas before triggering calibrations */
2937 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2938 	if (ret) {
2939 		device_printf(sc->sc_dev,
2940 		    "failed to send antennas before calibration: %d\n", ret);
2941 		goto error;
2942 	}
2943 
2944 	/*
2945 	 * Send phy configurations command to init uCode
2946 	 * to start the 16.0 uCode init image internal calibrations.
2947 	 */
2948 	ret = iwm_send_phy_cfg_cmd(sc);
2949 	if (ret) {
2950 		device_printf(sc->sc_dev,
2951 		    "%s: Failed to run INIT calibrations: %d\n",
2952 		    __func__, ret);
2953 		goto error;
2954 	}
2955 
2956 	/*
2957 	 * Nothing to do but wait for the init complete notification
2958 	 * from the firmware.
2959 	 */
2960 	IWM_UNLOCK(sc);
2961 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2962 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
2963 	IWM_LOCK(sc);
2964 
2965 
2966 	goto out;
2967 
2968 error:
2969 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2970 out:
2971 	return ret;
2972 }
2973 
2974 static int
2975 iwm_mvm_config_ltr(struct iwm_softc *sc)
2976 {
2977 	struct iwm_ltr_config_cmd cmd = {
2978 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
2979 	};
2980 
2981 	if (!sc->sc_ltr_enabled)
2982 		return 0;
2983 
2984 	return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2985 }
2986 
2987 /*
2988  * receive side
2989  */
2990 
2991 /* (re)stock rx ring, called at init-time and at runtime */
2992 static int
2993 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2994 {
2995 	struct iwm_rx_ring *ring = &sc->rxq;
2996 	struct iwm_rx_data *data = &ring->data[idx];
2997 	struct mbuf *m;
2998 	bus_dmamap_t dmamap;
2999 	bus_dma_segment_t seg;
3000 	int nsegs, error;
3001 
3002 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3003 	if (m == NULL)
3004 		return ENOBUFS;
3005 
3006 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3007 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3008 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3009 	if (error != 0) {
3010 		device_printf(sc->sc_dev,
3011 		    "%s: can't map mbuf, error %d\n", __func__, error);
3012 		m_freem(m);
3013 		return error;
3014 	}
3015 
3016 	if (data->m != NULL)
3017 		bus_dmamap_unload(ring->data_dmat, data->map);
3018 
3019 	/* Swap ring->spare_map with data->map */
3020 	dmamap = data->map;
3021 	data->map = ring->spare_map;
3022 	ring->spare_map = dmamap;
3023 
3024 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3025 	data->m = m;
3026 
3027 	/* Update RX descriptor. */
3028 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3029 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3030 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3031 	    BUS_DMASYNC_PREWRITE);
3032 
3033 	return 0;
3034 }
3035 
3036 /* iwlwifi: mvm/rx.c */
3037 /*
3038  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3039  * values are reported by the fw as positive values - need to negate
3040  * to obtain their dBM.  Account for missing antennas by replacing 0
3041  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3042  */
3043 static int
3044 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3045 {
3046 	int energy_a, energy_b, energy_c, max_energy;
3047 	uint32_t val;
3048 
3049 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3050 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3051 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3052 	energy_a = energy_a ? -energy_a : -256;
3053 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3054 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3055 	energy_b = energy_b ? -energy_b : -256;
3056 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3057 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3058 	energy_c = energy_c ? -energy_c : -256;
3059 	max_energy = MAX(energy_a, energy_b);
3060 	max_energy = MAX(max_energy, energy_c);
3061 
3062 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3063 	    "energy In A %d B %d C %d , and max %d\n",
3064 	    energy_a, energy_b, energy_c, max_energy);
3065 
3066 	return max_energy;
3067 }
3068 
3069 static void
3070 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3071 {
3072 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3073 
3074 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3075 
3076 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3077 }
3078 
3079 /*
3080  * Retrieve the average noise (in dBm) among receivers.
3081  */
3082 static int
3083 iwm_get_noise(struct iwm_softc *sc,
3084     const struct iwm_mvm_statistics_rx_non_phy *stats)
3085 {
3086 	int i, total, nbant, noise;
3087 
3088 	total = nbant = noise = 0;
3089 	for (i = 0; i < 3; i++) {
3090 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3091 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3092 		    __func__,
3093 		    i,
3094 		    noise);
3095 
3096 		if (noise) {
3097 			total += noise;
3098 			nbant++;
3099 		}
3100 	}
3101 
3102 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3103 	    __func__, nbant, total);
3104 #if 0
3105 	/* There should be at least one antenna but check anyway. */
3106 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3107 #else
3108 	/* For now, just hard-code it to -96 to be safe */
3109 	return (-96);
3110 #endif
3111 }
3112 
3113 static void
3114 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3115 {
3116 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3117 
3118 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3119 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3120 }
3121 
3122 /*
3123  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3124  *
3125  * Handles the actual data of the Rx packet from the fw
3126  */
3127 static boolean_t
3128 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3129 	boolean_t stolen)
3130 {
3131 	struct ieee80211com *ic = &sc->sc_ic;
3132 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3133 	struct ieee80211_frame *wh;
3134 	struct ieee80211_node *ni;
3135 	struct ieee80211_rx_stats rxs;
3136 	struct iwm_rx_phy_info *phy_info;
3137 	struct iwm_rx_mpdu_res_start *rx_res;
3138 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3139 	uint32_t len;
3140 	uint32_t rx_pkt_status;
3141 	int rssi;
3142 
3143 	phy_info = &sc->sc_last_phy_info;
3144 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3145 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3146 	len = le16toh(rx_res->byte_count);
3147 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3148 
3149 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3150 		device_printf(sc->sc_dev,
3151 		    "dsp size out of range [0,20]: %d\n",
3152 		    phy_info->cfg_phy_cnt);
3153 		goto fail;
3154 	}
3155 
3156 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3157 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3158 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3159 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3160 		goto fail;
3161 	}
3162 
3163 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3164 
3165 	/* Map it to relative value */
3166 	rssi = rssi - sc->sc_noise;
3167 
3168 	/* replenish ring for the buffer we're going to feed to the sharks */
3169 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3170 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3171 		    __func__);
3172 		goto fail;
3173 	}
3174 
3175 	m->m_data = pkt->data + sizeof(*rx_res);
3176 	m->m_pkthdr.len = m->m_len = len;
3177 
3178 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3179 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3180 
3181 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3182 
3183 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3184 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3185 	    __func__,
3186 	    le16toh(phy_info->channel),
3187 	    le16toh(phy_info->phy_flags));
3188 
3189 	/*
3190 	 * Populate an RX state struct with the provided information.
3191 	 */
3192 	bzero(&rxs, sizeof(rxs));
3193 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3194 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3195 	rxs.c_ieee = le16toh(phy_info->channel);
3196 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3197 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3198 	} else {
3199 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3200 	}
3201 
3202 	/* rssi is in 1/2db units */
3203 	rxs.c_rssi = rssi * 2;
3204 	rxs.c_nf = sc->sc_noise;
3205 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3206 		if (ni)
3207 			ieee80211_free_node(ni);
3208 		goto fail;
3209 	}
3210 
3211 	if (ieee80211_radiotap_active_vap(vap)) {
3212 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3213 
3214 		tap->wr_flags = 0;
3215 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3216 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3217 		tap->wr_chan_freq = htole16(rxs.c_freq);
3218 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3219 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3220 		tap->wr_dbm_antsignal = (int8_t)rssi;
3221 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3222 		tap->wr_tsft = phy_info->system_timestamp;
3223 		switch (phy_info->rate) {
3224 		/* CCK rates. */
3225 		case  10: tap->wr_rate =   2; break;
3226 		case  20: tap->wr_rate =   4; break;
3227 		case  55: tap->wr_rate =  11; break;
3228 		case 110: tap->wr_rate =  22; break;
3229 		/* OFDM rates. */
3230 		case 0xd: tap->wr_rate =  12; break;
3231 		case 0xf: tap->wr_rate =  18; break;
3232 		case 0x5: tap->wr_rate =  24; break;
3233 		case 0x7: tap->wr_rate =  36; break;
3234 		case 0x9: tap->wr_rate =  48; break;
3235 		case 0xb: tap->wr_rate =  72; break;
3236 		case 0x1: tap->wr_rate =  96; break;
3237 		case 0x3: tap->wr_rate = 108; break;
3238 		/* Unknown rate: should not happen. */
3239 		default:  tap->wr_rate =   0;
3240 		}
3241 	}
3242 
3243 	IWM_UNLOCK(sc);
3244 	if (ni != NULL) {
3245 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3246 		ieee80211_input_mimo(ni, m);
3247 		ieee80211_free_node(ni);
3248 	} else {
3249 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3250 		ieee80211_input_mimo_all(ic, m);
3251 	}
3252 	IWM_LOCK(sc);
3253 
3254 	return TRUE;
3255 
3256 fail:
3257 	counter_u64_add(ic->ic_ierrors, 1);
3258 	return FALSE;
3259 }
3260 
3261 static int
3262 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3263 	struct iwm_node *in)
3264 {
3265 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3266 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3267 	struct ieee80211_node *ni = &in->in_ni;
3268 	struct ieee80211vap *vap = ni->ni_vap;
3269 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3270 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3271 	boolean_t rate_matched;
3272 	uint8_t tx_resp_rate;
3273 
3274 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3275 
3276 	/* Update rate control statistics. */
3277 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3278 	    __func__,
3279 	    (int) le16toh(tx_resp->status.status),
3280 	    (int) le16toh(tx_resp->status.sequence),
3281 	    tx_resp->frame_count,
3282 	    tx_resp->bt_kill_count,
3283 	    tx_resp->failure_rts,
3284 	    tx_resp->failure_frame,
3285 	    le32toh(tx_resp->initial_rate),
3286 	    (int) le16toh(tx_resp->wireless_media_time));
3287 
3288 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3289 
3290 	/* For rate control, ignore frames sent at different initial rate */
3291 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3292 
3293 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3294 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3295 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3296 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3297 	}
3298 
3299 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3300 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3301 	txs->short_retries = tx_resp->failure_rts;
3302 	txs->long_retries = tx_resp->failure_frame;
3303 	if (status != IWM_TX_STATUS_SUCCESS &&
3304 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3305 		switch (status) {
3306 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3307 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3308 			break;
3309 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3310 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3311 			break;
3312 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3313 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3314 			break;
3315 		default:
3316 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3317 			break;
3318 		}
3319 	} else {
3320 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3321 	}
3322 
3323 	if (rate_matched) {
3324 		ieee80211_ratectl_tx_complete(ni, txs);
3325 
3326 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3327 		new_rate = vap->iv_bss->ni_txrate;
3328 		if (new_rate != 0 && new_rate != cur_rate) {
3329 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3330 			iwm_setrates(sc, in, rix);
3331 			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3332 		}
3333  	}
3334 
3335 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3336 }
3337 
3338 static void
3339 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3340 {
3341 	struct iwm_cmd_header *cmd_hdr;
3342 	struct iwm_tx_ring *ring;
3343 	struct iwm_tx_data *txd;
3344 	struct iwm_node *in;
3345 	struct mbuf *m;
3346 	int idx, qid, qmsk, status;
3347 
3348 	cmd_hdr = &pkt->hdr;
3349 	idx = cmd_hdr->idx;
3350 	qid = cmd_hdr->qid;
3351 
3352 	ring = &sc->txq[qid];
3353 	txd = &ring->data[idx];
3354 	in = txd->in;
3355 	m = txd->m;
3356 
3357 	KASSERT(txd->done == 0, ("txd not done"));
3358 	KASSERT(txd->in != NULL, ("txd without node"));
3359 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3360 
3361 	sc->sc_tx_timer = 0;
3362 
3363 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3364 
3365 	/* Unmap and free mbuf. */
3366 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3367 	bus_dmamap_unload(ring->data_dmat, txd->map);
3368 
3369 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3370 	    "free txd %p, in %p\n", txd, txd->in);
3371 	txd->done = 1;
3372 	txd->m = NULL;
3373 	txd->in = NULL;
3374 
3375 	ieee80211_tx_complete(&in->in_ni, m, status);
3376 
3377 	qmsk = 1 << qid;
3378 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3379 		sc->qfullmsk &= ~qmsk;
3380 		if (sc->qfullmsk == 0)
3381 			iwm_start(sc);
3382 	}
3383 }
3384 
3385 /*
3386  * transmit side
3387  */
3388 
3389 /*
3390  * Process a "command done" firmware notification.  This is where we wakeup
3391  * processes waiting for a synchronous command completion.
3392  * from if_iwn
3393  */
3394 static void
3395 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3396 {
3397 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3398 	struct iwm_tx_data *data;
3399 
3400 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3401 		return;	/* Not a command ack. */
3402 	}
3403 
3404 	/* XXX wide commands? */
3405 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3406 	    "cmd notification type 0x%x qid %d idx %d\n",
3407 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3408 
3409 	data = &ring->data[pkt->hdr.idx];
3410 
3411 	/* If the command was mapped in an mbuf, free it. */
3412 	if (data->m != NULL) {
3413 		bus_dmamap_sync(ring->data_dmat, data->map,
3414 		    BUS_DMASYNC_POSTWRITE);
3415 		bus_dmamap_unload(ring->data_dmat, data->map);
3416 		m_freem(data->m);
3417 		data->m = NULL;
3418 	}
3419 	wakeup(&ring->desc[pkt->hdr.idx]);
3420 
3421 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3422 		device_printf(sc->sc_dev,
3423 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3424 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3425 		/* XXX call iwm_force_nmi() */
3426 	}
3427 
3428 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3429 	ring->queued--;
3430 	if (ring->queued == 0)
3431 		iwm_pcie_clear_cmd_in_flight(sc);
3432 }
3433 
3434 #if 0
3435 /*
3436  * necessary only for block ack mode
3437  */
3438 void
3439 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3440 	uint16_t len)
3441 {
3442 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3443 	uint16_t w_val;
3444 
3445 	scd_bc_tbl = sc->sched_dma.vaddr;
3446 
3447 	len += 8; /* magic numbers came naturally from paris */
3448 	len = roundup(len, 4) / 4;
3449 
3450 	w_val = htole16(sta_id << 12 | len);
3451 
3452 	/* Update TX scheduler. */
3453 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3454 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3455 	    BUS_DMASYNC_PREWRITE);
3456 
3457 	/* I really wonder what this is ?!? */
3458 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3459 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3460 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3461 		    BUS_DMASYNC_PREWRITE);
3462 	}
3463 }
3464 #endif
3465 
3466 static int
3467 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3468 {
3469 	int i;
3470 
3471 	for (i = 0; i < nitems(iwm_rates); i++) {
3472 		if (iwm_rates[i].rate == rate)
3473 			return (i);
3474 	}
3475 	/* XXX error? */
3476 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3477 	    "%s: couldn't find an entry for rate=%d\n",
3478 	    __func__,
3479 	    rate);
3480 	return (0);
3481 }
3482 
3483 /*
3484  * Fill in the rate related information for a transmit command.
3485  */
3486 static const struct iwm_rate *
3487 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3488 	struct mbuf *m, struct iwm_tx_cmd *tx)
3489 {
3490 	struct ieee80211_node *ni = &in->in_ni;
3491 	struct ieee80211_frame *wh;
3492 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3493 	const struct iwm_rate *rinfo;
3494 	int type;
3495 	int ridx, rate_flags;
3496 
3497 	wh = mtod(m, struct ieee80211_frame *);
3498 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3499 
3500 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3501 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3502 
3503 	if (type == IEEE80211_FC0_TYPE_MGT ||
3504 	    type == IEEE80211_FC0_TYPE_CTL ||
3505 	    (m->m_flags & M_EAPOL) != 0) {
3506 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3507 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3508 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3509 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3510 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3511 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3512 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3513 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3514 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3515 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3516 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3517 	} else {
3518 		/* for data frames, use RS table */
3519 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3520 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3521 		if (ridx == -1)
3522 			ridx = 0;
3523 
3524 		/* This is the index into the programmed table */
3525 		tx->initial_rate_index = 0;
3526 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3527 	}
3528 
3529 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3530 	    "%s: frame type=%d txrate %d\n",
3531 	        __func__, type, iwm_rates[ridx].rate);
3532 
3533 	rinfo = &iwm_rates[ridx];
3534 
3535 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3536 	    __func__, ridx,
3537 	    rinfo->rate,
3538 	    !! (IWM_RIDX_IS_CCK(ridx))
3539 	    );
3540 
3541 	/* XXX TODO: hard-coded TX antenna? */
3542 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3543 	if (IWM_RIDX_IS_CCK(ridx))
3544 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3545 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3546 
3547 	return rinfo;
3548 }
3549 
3550 #define TB0_SIZE 16
3551 static int
3552 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3553 {
3554 	struct ieee80211com *ic = &sc->sc_ic;
3555 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3556 	struct iwm_node *in = IWM_NODE(ni);
3557 	struct iwm_tx_ring *ring;
3558 	struct iwm_tx_data *data;
3559 	struct iwm_tfd *desc;
3560 	struct iwm_device_cmd *cmd;
3561 	struct iwm_tx_cmd *tx;
3562 	struct ieee80211_frame *wh;
3563 	struct ieee80211_key *k = NULL;
3564 	struct mbuf *m1;
3565 	const struct iwm_rate *rinfo;
3566 	uint32_t flags;
3567 	u_int hdrlen;
3568 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3569 	int nsegs;
3570 	uint8_t tid, type;
3571 	int i, totlen, error, pad;
3572 
3573 	wh = mtod(m, struct ieee80211_frame *);
3574 	hdrlen = ieee80211_anyhdrsize(wh);
3575 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3576 	tid = 0;
3577 	ring = &sc->txq[ac];
3578 	desc = &ring->desc[ring->cur];
3579 	data = &ring->data[ring->cur];
3580 
3581 	/* Fill out iwm_tx_cmd to send to the firmware */
3582 	cmd = &ring->cmd[ring->cur];
3583 	cmd->hdr.code = IWM_TX_CMD;
3584 	cmd->hdr.flags = 0;
3585 	cmd->hdr.qid = ring->qid;
3586 	cmd->hdr.idx = ring->cur;
3587 
3588 	tx = (void *)cmd->data;
3589 	memset(tx, 0, sizeof(*tx));
3590 
3591 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3592 
3593 	/* Encrypt the frame if need be. */
3594 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3595 		/* Retrieve key for TX && do software encryption. */
3596 		k = ieee80211_crypto_encap(ni, m);
3597 		if (k == NULL) {
3598 			m_freem(m);
3599 			return (ENOBUFS);
3600 		}
3601 		/* 802.11 header may have moved. */
3602 		wh = mtod(m, struct ieee80211_frame *);
3603 	}
3604 
3605 	if (ieee80211_radiotap_active_vap(vap)) {
3606 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3607 
3608 		tap->wt_flags = 0;
3609 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3610 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3611 		tap->wt_rate = rinfo->rate;
3612 		if (k != NULL)
3613 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3614 		ieee80211_radiotap_tx(vap, m);
3615 	}
3616 
3617 	flags = 0;
3618 	totlen = m->m_pkthdr.len;
3619 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3620 		flags |= IWM_TX_CMD_FLG_ACK;
3621 	}
3622 
3623 	if (type == IEEE80211_FC0_TYPE_DATA &&
3624 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3625 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3626 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3627 	}
3628 
3629 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3630 	    type != IEEE80211_FC0_TYPE_DATA)
3631 		tx->sta_id = sc->sc_aux_sta.sta_id;
3632 	else
3633 		tx->sta_id = IWM_STATION_ID;
3634 
3635 	if (type == IEEE80211_FC0_TYPE_MGT) {
3636 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3637 
3638 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3639 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3640 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3641 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3642 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3643 		} else {
3644 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3645 		}
3646 	} else {
3647 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3648 	}
3649 
3650 	if (hdrlen & 3) {
3651 		/* First segment length must be a multiple of 4. */
3652 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3653 		pad = 4 - (hdrlen & 3);
3654 	} else
3655 		pad = 0;
3656 
3657 	tx->driver_txop = 0;
3658 	tx->next_frame_len = 0;
3659 
3660 	tx->len = htole16(totlen);
3661 	tx->tid_tspec = tid;
3662 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3663 
3664 	/* Set physical address of "scratch area". */
3665 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3666 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3667 
3668 	/* Copy 802.11 header in TX command. */
3669 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3670 
3671 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3672 
3673 	tx->sec_ctl = 0;
3674 	tx->tx_flags |= htole32(flags);
3675 
3676 	/* Trim 802.11 header. */
3677 	m_adj(m, hdrlen);
3678 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3679 	    segs, &nsegs, BUS_DMA_NOWAIT);
3680 	if (error != 0) {
3681 		if (error != EFBIG) {
3682 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3683 			    error);
3684 			m_freem(m);
3685 			return error;
3686 		}
3687 		/* Too many DMA segments, linearize mbuf. */
3688 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3689 		if (m1 == NULL) {
3690 			device_printf(sc->sc_dev,
3691 			    "%s: could not defrag mbuf\n", __func__);
3692 			m_freem(m);
3693 			return (ENOBUFS);
3694 		}
3695 		m = m1;
3696 
3697 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3698 		    segs, &nsegs, BUS_DMA_NOWAIT);
3699 		if (error != 0) {
3700 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3701 			    error);
3702 			m_freem(m);
3703 			return error;
3704 		}
3705 	}
3706 	data->m = m;
3707 	data->in = in;
3708 	data->done = 0;
3709 
3710 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3711 	    "sending txd %p, in %p\n", data, data->in);
3712 	KASSERT(data->in != NULL, ("node is NULL"));
3713 
3714 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3715 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3716 	    ring->qid, ring->cur, totlen, nsegs,
3717 	    le32toh(tx->tx_flags),
3718 	    le32toh(tx->rate_n_flags),
3719 	    tx->initial_rate_index
3720 	    );
3721 
3722 	/* Fill TX descriptor. */
3723 	memset(desc, 0, sizeof(*desc));
3724 	desc->num_tbs = 2 + nsegs;
3725 
3726 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3727 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3728 	    (TB0_SIZE << 4));
3729 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3730 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3731 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3732 	    hdrlen + pad - TB0_SIZE) << 4));
3733 
3734 	/* Other DMA segments are for data payload. */
3735 	for (i = 0; i < nsegs; i++) {
3736 		seg = &segs[i];
3737 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3738 		desc->tbs[i + 2].hi_n_len =
3739 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3740 		    (seg->ds_len << 4);
3741 	}
3742 
3743 	bus_dmamap_sync(ring->data_dmat, data->map,
3744 	    BUS_DMASYNC_PREWRITE);
3745 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3746 	    BUS_DMASYNC_PREWRITE);
3747 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3748 	    BUS_DMASYNC_PREWRITE);
3749 
3750 #if 0
3751 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3752 #endif
3753 
3754 	/* Kick TX ring. */
3755 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3756 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3757 
3758 	/* Mark TX ring as full if we reach a certain threshold. */
3759 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3760 		sc->qfullmsk |= 1 << ring->qid;
3761 	}
3762 
3763 	return 0;
3764 }
3765 
3766 static int
3767 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3768     const struct ieee80211_bpf_params *params)
3769 {
3770 	struct ieee80211com *ic = ni->ni_ic;
3771 	struct iwm_softc *sc = ic->ic_softc;
3772 	int error = 0;
3773 
3774 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3775 	    "->%s begin\n", __func__);
3776 
3777 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3778 		m_freem(m);
3779 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3780 		    "<-%s not RUNNING\n", __func__);
3781 		return (ENETDOWN);
3782         }
3783 
3784 	IWM_LOCK(sc);
3785 	/* XXX fix this */
3786         if (params == NULL) {
3787 		error = iwm_tx(sc, m, ni, 0);
3788 	} else {
3789 		error = iwm_tx(sc, m, ni, 0);
3790 	}
3791 	if (sc->sc_tx_timer == 0)
3792 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3793 	sc->sc_tx_timer = 5;
3794 	IWM_UNLOCK(sc);
3795 
3796         return (error);
3797 }
3798 
3799 /*
3800  * mvm/tx.c
3801  */
3802 
3803 /*
3804  * Note that there are transports that buffer frames before they reach
3805  * the firmware. This means that after flush_tx_path is called, the
3806  * queue might not be empty. The race-free way to handle this is to:
3807  * 1) set the station as draining
3808  * 2) flush the Tx path
3809  * 3) wait for the transport queues to be empty
3810  */
3811 int
3812 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3813 {
3814 	int ret;
3815 	struct iwm_tx_path_flush_cmd flush_cmd = {
3816 		.queues_ctl = htole32(tfd_msk),
3817 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3818 	};
3819 
3820 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3821 	    sizeof(flush_cmd), &flush_cmd);
3822 	if (ret)
3823                 device_printf(sc->sc_dev,
3824 		    "Flushing tx queue failed: %d\n", ret);
3825 	return ret;
3826 }
3827 
3828 /*
3829  * BEGIN mvm/quota.c
3830  */
3831 
3832 static int
3833 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3834 {
3835 	struct iwm_time_quota_cmd cmd;
3836 	int i, idx, ret, num_active_macs, quota, quota_rem;
3837 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3838 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3839 	uint16_t id;
3840 
3841 	memset(&cmd, 0, sizeof(cmd));
3842 
3843 	/* currently, PHY ID == binding ID */
3844 	if (ivp) {
3845 		id = ivp->phy_ctxt->id;
3846 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3847 		colors[id] = ivp->phy_ctxt->color;
3848 
3849 		if (1)
3850 			n_ifs[id] = 1;
3851 	}
3852 
3853 	/*
3854 	 * The FW's scheduling session consists of
3855 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3856 	 * equally between all the bindings that require quota
3857 	 */
3858 	num_active_macs = 0;
3859 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3860 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3861 		num_active_macs += n_ifs[i];
3862 	}
3863 
3864 	quota = 0;
3865 	quota_rem = 0;
3866 	if (num_active_macs) {
3867 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3868 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3869 	}
3870 
3871 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3872 		if (colors[i] < 0)
3873 			continue;
3874 
3875 		cmd.quotas[idx].id_and_color =
3876 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3877 
3878 		if (n_ifs[i] <= 0) {
3879 			cmd.quotas[idx].quota = htole32(0);
3880 			cmd.quotas[idx].max_duration = htole32(0);
3881 		} else {
3882 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3883 			cmd.quotas[idx].max_duration = htole32(0);
3884 		}
3885 		idx++;
3886 	}
3887 
3888 	/* Give the remainder of the session to the first binding */
3889 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3890 
3891 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3892 	    sizeof(cmd), &cmd);
3893 	if (ret)
3894 		device_printf(sc->sc_dev,
3895 		    "%s: Failed to send quota: %d\n", __func__, ret);
3896 	return ret;
3897 }
3898 
3899 /*
3900  * END mvm/quota.c
3901  */
3902 
3903 /*
3904  * ieee80211 routines
3905  */
3906 
3907 /*
3908  * Change to AUTH state in 80211 state machine.  Roughly matches what
3909  * Linux does in bss_info_changed().
3910  */
3911 static int
3912 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3913 {
3914 	struct ieee80211_node *ni;
3915 	struct iwm_node *in;
3916 	struct iwm_vap *iv = IWM_VAP(vap);
3917 	uint32_t duration;
3918 	int error;
3919 
3920 	/*
3921 	 * XXX i have a feeling that the vap node is being
3922 	 * freed from underneath us. Grr.
3923 	 */
3924 	ni = ieee80211_ref_node(vap->iv_bss);
3925 	in = IWM_NODE(ni);
3926 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3927 	    "%s: called; vap=%p, bss ni=%p\n",
3928 	    __func__,
3929 	    vap,
3930 	    ni);
3931 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3932 	    __func__, ether_sprintf(ni->ni_bssid));
3933 
3934 	in->in_assoc = 0;
3935 	iv->iv_auth = 1;
3936 
3937 	/*
3938 	 * Firmware bug - it'll crash if the beacon interval is less
3939 	 * than 16. We can't avoid connecting at all, so refuse the
3940 	 * station state change, this will cause net80211 to abandon
3941 	 * attempts to connect to this AP, and eventually wpa_s will
3942 	 * blacklist the AP...
3943 	 */
3944 	if (ni->ni_intval < 16) {
3945 		device_printf(sc->sc_dev,
3946 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3947 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3948 		error = EINVAL;
3949 		goto out;
3950 	}
3951 
3952 	error = iwm_allow_mcast(vap, sc);
3953 	if (error) {
3954 		device_printf(sc->sc_dev,
3955 		    "%s: failed to set multicast\n", __func__);
3956 		goto out;
3957 	}
3958 
3959 	/*
3960 	 * This is where it deviates from what Linux does.
3961 	 *
3962 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3963 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3964 	 * and always does a mac_ctx_changed().
3965 	 *
3966 	 * The openbsd port doesn't attempt to do that - it reset things
3967 	 * at odd states and does the add here.
3968 	 *
3969 	 * So, until the state handling is fixed (ie, we never reset
3970 	 * the NIC except for a firmware failure, which should drag
3971 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3972 	 * contexts that are required), let's do a dirty hack here.
3973 	 */
3974 	if (iv->is_uploaded) {
3975 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3976 			device_printf(sc->sc_dev,
3977 			    "%s: failed to update MAC\n", __func__);
3978 			goto out;
3979 		}
3980 	} else {
3981 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3982 			device_printf(sc->sc_dev,
3983 			    "%s: failed to add MAC\n", __func__);
3984 			goto out;
3985 		}
3986 	}
3987 	sc->sc_firmware_state = 1;
3988 
3989 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3990 	    in->in_ni.ni_chan, 1, 1)) != 0) {
3991 		device_printf(sc->sc_dev,
3992 		    "%s: failed update phy ctxt\n", __func__);
3993 		goto out;
3994 	}
3995 	iv->phy_ctxt = &sc->sc_phyctxt[0];
3996 
3997 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
3998 		device_printf(sc->sc_dev,
3999 		    "%s: binding update cmd\n", __func__);
4000 		goto out;
4001 	}
4002 	sc->sc_firmware_state = 2;
4003 	/*
4004 	 * Authentication becomes unreliable when powersaving is left enabled
4005 	 * here. Powersaving will be activated again when association has
4006 	 * finished or is aborted.
4007 	 */
4008 	iv->ps_disabled = TRUE;
4009 	error = iwm_mvm_power_update_mac(sc);
4010 	iv->ps_disabled = FALSE;
4011 	if (error != 0) {
4012 		device_printf(sc->sc_dev,
4013 		    "%s: failed to update power management\n",
4014 		    __func__);
4015 		goto out;
4016 	}
4017 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4018 		device_printf(sc->sc_dev,
4019 		    "%s: failed to add sta\n", __func__);
4020 		goto out;
4021 	}
4022 	sc->sc_firmware_state = 3;
4023 
4024 	/*
4025 	 * Prevent the FW from wandering off channel during association
4026 	 * by "protecting" the session with a time event.
4027 	 */
4028 	/* XXX duration is in units of TU, not MS */
4029 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4030 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4031 
4032 	error = 0;
4033 out:
4034 	if (error != 0)
4035 		iv->iv_auth = 0;
4036 	ieee80211_free_node(ni);
4037 	return (error);
4038 }
4039 
4040 static struct ieee80211_node *
4041 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4042 {
4043 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4044 	    M_NOWAIT | M_ZERO);
4045 }
4046 
4047 static uint8_t
4048 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4049 {
4050 	uint8_t plcp = rate_n_flags & 0xff;
4051 	int i;
4052 
4053 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4054 		if (iwm_rates[i].plcp == plcp)
4055 			return iwm_rates[i].rate;
4056 	}
4057 	return 0;
4058 }
4059 
4060 uint8_t
4061 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4062 {
4063 	int i;
4064 	uint8_t rval;
4065 
4066 	for (i = 0; i < rs->rs_nrates; i++) {
4067 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4068 		if (rval == iwm_rates[ridx].rate)
4069 			return rs->rs_rates[i];
4070 	}
4071 
4072 	return 0;
4073 }
4074 
4075 static int
4076 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4077 {
4078 	int i;
4079 
4080 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4081 		if (iwm_rates[i].rate == rate)
4082 			return i;
4083 	}
4084 
4085 	device_printf(sc->sc_dev,
4086 	    "%s: WARNING: device rate for %u not found!\n",
4087 	    __func__, rate);
4088 
4089 	return -1;
4090 }
4091 
4092 
4093 static void
4094 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4095 {
4096 	struct ieee80211_node *ni = &in->in_ni;
4097 	struct iwm_lq_cmd *lq = &in->in_lq;
4098 	struct ieee80211_rateset *rs = &ni->ni_rates;
4099 	int nrates = rs->rs_nrates;
4100 	int i, ridx, tab = 0;
4101 //	int txant = 0;
4102 
4103 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4104 
4105 	if (nrates > nitems(lq->rs_table)) {
4106 		device_printf(sc->sc_dev,
4107 		    "%s: node supports %d rates, driver handles "
4108 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4109 		return;
4110 	}
4111 	if (nrates == 0) {
4112 		device_printf(sc->sc_dev,
4113 		    "%s: node supports 0 rates, odd!\n", __func__);
4114 		return;
4115 	}
4116 	nrates = imin(rix + 1, nrates);
4117 
4118 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4119 	    "%s: nrates=%d\n", __func__, nrates);
4120 
4121 	/* then construct a lq_cmd based on those */
4122 	memset(lq, 0, sizeof(*lq));
4123 	lq->sta_id = IWM_STATION_ID;
4124 
4125 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4126 	if (ni->ni_flags & IEEE80211_NODE_HT)
4127 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4128 
4129 	/*
4130 	 * are these used? (we don't do SISO or MIMO)
4131 	 * need to set them to non-zero, though, or we get an error.
4132 	 */
4133 	lq->single_stream_ant_msk = 1;
4134 	lq->dual_stream_ant_msk = 1;
4135 
4136 	/*
4137 	 * Build the actual rate selection table.
4138 	 * The lowest bits are the rates.  Additionally,
4139 	 * CCK needs bit 9 to be set.  The rest of the bits
4140 	 * we add to the table select the tx antenna
4141 	 * Note that we add the rates in the highest rate first
4142 	 * (opposite of ni_rates).
4143 	 */
4144 	for (i = 0; i < nrates; i++) {
4145 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4146 		int nextant;
4147 
4148 		/* Map 802.11 rate to HW rate index. */
4149 		ridx = iwm_rate2ridx(sc, rate);
4150 		if (ridx == -1)
4151 			continue;
4152 
4153 #if 0
4154 		if (txant == 0)
4155 			txant = iwm_mvm_get_valid_tx_ant(sc);
4156 		nextant = 1<<(ffs(txant)-1);
4157 		txant &= ~nextant;
4158 #else
4159 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4160 #endif
4161 		tab = iwm_rates[ridx].plcp;
4162 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4163 		if (IWM_RIDX_IS_CCK(ridx))
4164 			tab |= IWM_RATE_MCS_CCK_MSK;
4165 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4166 		    "station rate i=%d, rate=%d, hw=%x\n",
4167 		    i, iwm_rates[ridx].rate, tab);
4168 		lq->rs_table[i] = htole32(tab);
4169 	}
4170 	/* then fill the rest with the lowest possible rate */
4171 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4172 		KASSERT(tab != 0, ("invalid tab"));
4173 		lq->rs_table[i] = htole32(tab);
4174 	}
4175 }
4176 
4177 static int
4178 iwm_media_change(struct ifnet *ifp)
4179 {
4180 	struct ieee80211vap *vap = ifp->if_softc;
4181 	struct ieee80211com *ic = vap->iv_ic;
4182 	struct iwm_softc *sc = ic->ic_softc;
4183 	int error;
4184 
4185 	error = ieee80211_media_change(ifp);
4186 	if (error != ENETRESET)
4187 		return error;
4188 
4189 	IWM_LOCK(sc);
4190 	if (ic->ic_nrunning > 0) {
4191 		iwm_stop(sc);
4192 		iwm_init(sc);
4193 	}
4194 	IWM_UNLOCK(sc);
4195 	return error;
4196 }
4197 
4198 static void
4199 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4200 {
4201 	struct iwm_vap *ivp = IWM_VAP(vap);
4202 	int error;
4203 
4204 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4205 	sc->sc_tx_timer = 0;
4206 
4207 	ivp->iv_auth = 0;
4208 	if (sc->sc_firmware_state == 3) {
4209 		iwm_xmit_queue_drain(sc);
4210 //		iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4211 		error = iwm_mvm_rm_sta(sc, vap, TRUE);
4212 		if (error) {
4213 			device_printf(sc->sc_dev,
4214 			    "%s: Failed to remove station: %d\n",
4215 			    __func__, error);
4216 		}
4217 	}
4218 	if (sc->sc_firmware_state == 3) {
4219 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4220 		if (error) {
4221 			device_printf(sc->sc_dev,
4222 			    "%s: Failed to change mac context: %d\n",
4223 			    __func__, error);
4224 		}
4225 	}
4226 	if (sc->sc_firmware_state == 3) {
4227 		error = iwm_mvm_sf_update(sc, vap, FALSE);
4228 		if (error) {
4229 			device_printf(sc->sc_dev,
4230 			    "%s: Failed to update smart FIFO: %d\n",
4231 			    __func__, error);
4232 		}
4233 	}
4234 	if (sc->sc_firmware_state == 3) {
4235 		error = iwm_mvm_rm_sta_id(sc, vap);
4236 		if (error) {
4237 			device_printf(sc->sc_dev,
4238 			    "%s: Failed to remove station id: %d\n",
4239 			    __func__, error);
4240 		}
4241 	}
4242 	if (sc->sc_firmware_state == 3) {
4243 		error = iwm_mvm_update_quotas(sc, NULL);
4244 		if (error) {
4245 			device_printf(sc->sc_dev,
4246 			    "%s: Failed to update PHY quota: %d\n",
4247 			    __func__, error);
4248 		}
4249 	}
4250 	if (sc->sc_firmware_state == 3) {
4251 		/* XXX Might need to specify bssid correctly. */
4252 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4253 		if (error) {
4254 			device_printf(sc->sc_dev,
4255 			    "%s: Failed to change mac context: %d\n",
4256 			    __func__, error);
4257 		}
4258 	}
4259 	if (sc->sc_firmware_state == 3) {
4260 		sc->sc_firmware_state = 2;
4261 	}
4262 	if (sc->sc_firmware_state > 1) {
4263 		error = iwm_mvm_binding_remove_vif(sc, ivp);
4264 		if (error) {
4265 			device_printf(sc->sc_dev,
4266 			    "%s: Failed to remove channel ctx: %d\n",
4267 			    __func__, error);
4268 		}
4269 	}
4270 	if (sc->sc_firmware_state > 1) {
4271 		sc->sc_firmware_state = 1;
4272 	}
4273 	ivp->phy_ctxt = NULL;
4274 	if (sc->sc_firmware_state > 0) {
4275 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4276 		if (error) {
4277 			device_printf(sc->sc_dev,
4278 			    "%s: Failed to change mac context: %d\n",
4279 			    __func__, error);
4280 		}
4281 	}
4282 	if (sc->sc_firmware_state > 0) {
4283 		error = iwm_mvm_power_update_mac(sc);
4284 		if (error != 0) {
4285 			device_printf(sc->sc_dev,
4286 			    "%s: failed to update power management\n",
4287 			    __func__);
4288 		}
4289 	}
4290 	sc->sc_firmware_state = 0;
4291 }
4292 
4293 static int
4294 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4295 {
4296 	struct iwm_vap *ivp = IWM_VAP(vap);
4297 	struct ieee80211com *ic = vap->iv_ic;
4298 	struct iwm_softc *sc = ic->ic_softc;
4299 	struct iwm_node *in;
4300 	int error;
4301 
4302 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4303 	    "switching state %s -> %s arg=0x%x\n",
4304 	    ieee80211_state_name[vap->iv_state],
4305 	    ieee80211_state_name[nstate],
4306 	    arg);
4307 
4308 	IEEE80211_UNLOCK(ic);
4309 	IWM_LOCK(sc);
4310 
4311 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4312 	    (nstate == IEEE80211_S_AUTH ||
4313 	     nstate == IEEE80211_S_ASSOC ||
4314 	     nstate == IEEE80211_S_RUN)) {
4315 		/* Stop blinking for a scan, when authenticating. */
4316 		iwm_led_blink_stop(sc);
4317 	}
4318 
4319 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4320 		iwm_mvm_led_disable(sc);
4321 		/* disable beacon filtering if we're hopping out of RUN */
4322 		iwm_mvm_disable_beacon_filter(sc);
4323 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4324 			in->in_assoc = 0;
4325 	}
4326 
4327 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4328 	     vap->iv_state == IEEE80211_S_ASSOC ||
4329 	     vap->iv_state == IEEE80211_S_RUN) &&
4330 	    (nstate == IEEE80211_S_INIT ||
4331 	     nstate == IEEE80211_S_SCAN ||
4332 	     nstate == IEEE80211_S_AUTH)) {
4333 		iwm_mvm_stop_session_protection(sc, ivp);
4334 	}
4335 
4336 	if ((vap->iv_state == IEEE80211_S_RUN ||
4337 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4338 	    nstate == IEEE80211_S_INIT) {
4339 		/*
4340 		 * In this case, iv_newstate() wants to send an 80211 frame on
4341 		 * the network that we are leaving. So we need to call it,
4342 		 * before tearing down all the firmware state.
4343 		 */
4344 		IWM_UNLOCK(sc);
4345 		IEEE80211_LOCK(ic);
4346 		ivp->iv_newstate(vap, nstate, arg);
4347 		IEEE80211_UNLOCK(ic);
4348 		IWM_LOCK(sc);
4349 		iwm_bring_down_firmware(sc, vap);
4350 		IWM_UNLOCK(sc);
4351 		IEEE80211_LOCK(ic);
4352 		return 0;
4353 	}
4354 
4355 	switch (nstate) {
4356 	case IEEE80211_S_INIT:
4357 	case IEEE80211_S_SCAN:
4358 		break;
4359 
4360 	case IEEE80211_S_AUTH:
4361 		iwm_bring_down_firmware(sc, vap);
4362 		if ((error = iwm_auth(vap, sc)) != 0) {
4363 			device_printf(sc->sc_dev,
4364 			    "%s: could not move to auth state: %d\n",
4365 			    __func__, error);
4366 			iwm_bring_down_firmware(sc, vap);
4367 			IWM_UNLOCK(sc);
4368 			IEEE80211_LOCK(ic);
4369 			return 1;
4370 		}
4371 		break;
4372 
4373 	case IEEE80211_S_ASSOC:
4374 		/*
4375 		 * EBS may be disabled due to previous failures reported by FW.
4376 		 * Reset EBS status here assuming environment has been changed.
4377 		 */
4378 		sc->last_ebs_successful = TRUE;
4379 		break;
4380 
4381 	case IEEE80211_S_RUN:
4382 		in = IWM_NODE(vap->iv_bss);
4383 		/* Update the association state, now we have it all */
4384 		/* (eg associd comes in at this point */
4385 		error = iwm_mvm_update_sta(sc, in);
4386 		if (error != 0) {
4387 			device_printf(sc->sc_dev,
4388 			    "%s: failed to update STA\n", __func__);
4389 			IWM_UNLOCK(sc);
4390 			IEEE80211_LOCK(ic);
4391 			return error;
4392 		}
4393 		in->in_assoc = 1;
4394 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4395 		if (error != 0) {
4396 			device_printf(sc->sc_dev,
4397 			    "%s: failed to update MAC: %d\n", __func__, error);
4398 		}
4399 
4400 		iwm_mvm_sf_update(sc, vap, FALSE);
4401 		iwm_mvm_enable_beacon_filter(sc, ivp);
4402 		iwm_mvm_power_update_mac(sc);
4403 		iwm_mvm_update_quotas(sc, ivp);
4404 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4405 		iwm_setrates(sc, in, rix);
4406 
4407 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4408 			device_printf(sc->sc_dev,
4409 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4410 		}
4411 
4412 		iwm_mvm_led_enable(sc);
4413 		break;
4414 
4415 	default:
4416 		break;
4417 	}
4418 	IWM_UNLOCK(sc);
4419 	IEEE80211_LOCK(ic);
4420 
4421 	return (ivp->iv_newstate(vap, nstate, arg));
4422 }
4423 
4424 void
4425 iwm_endscan_cb(void *arg, int pending)
4426 {
4427 	struct iwm_softc *sc = arg;
4428 	struct ieee80211com *ic = &sc->sc_ic;
4429 
4430 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4431 	    "%s: scan ended\n",
4432 	    __func__);
4433 
4434 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4435 }
4436 
4437 static int
4438 iwm_send_bt_init_conf(struct iwm_softc *sc)
4439 {
4440 	struct iwm_bt_coex_cmd bt_cmd;
4441 
4442 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4443 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4444 
4445 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4446 	    &bt_cmd);
4447 }
4448 
4449 static boolean_t
4450 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4451 {
4452 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4453 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4454 
4455 	if (iwm_lar_disable)
4456 		return FALSE;
4457 
4458 	/*
4459 	 * Enable LAR only if it is supported by the FW (TLV) &&
4460 	 * enabled in the NVM
4461 	 */
4462 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4463 		return nvm_lar && tlv_lar;
4464 	else
4465 		return tlv_lar;
4466 }
4467 
4468 static boolean_t
4469 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4470 {
4471 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4472 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4473 }
4474 
4475 static int
4476 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4477 {
4478 	struct iwm_mcc_update_cmd mcc_cmd;
4479 	struct iwm_host_cmd hcmd = {
4480 		.id = IWM_MCC_UPDATE_CMD,
4481 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4482 		.data = { &mcc_cmd },
4483 	};
4484 	int ret;
4485 #ifdef IWM_DEBUG
4486 	struct iwm_rx_packet *pkt;
4487 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4488 	struct iwm_mcc_update_resp *mcc_resp;
4489 	int n_channels;
4490 	uint16_t mcc;
4491 #endif
4492 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4493 
4494 	if (!iwm_mvm_is_lar_supported(sc)) {
4495 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4496 		    __func__);
4497 		return 0;
4498 	}
4499 
4500 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4501 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4502 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4503 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4504 	else
4505 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4506 
4507 	if (resp_v2)
4508 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4509 	else
4510 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4511 
4512 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4513 	    "send MCC update to FW with '%c%c' src = %d\n",
4514 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4515 
4516 	ret = iwm_send_cmd(sc, &hcmd);
4517 	if (ret)
4518 		return ret;
4519 
4520 #ifdef IWM_DEBUG
4521 	pkt = hcmd.resp_pkt;
4522 
4523 	/* Extract MCC response */
4524 	if (resp_v2) {
4525 		mcc_resp = (void *)pkt->data;
4526 		mcc = mcc_resp->mcc;
4527 		n_channels =  le32toh(mcc_resp->n_channels);
4528 	} else {
4529 		mcc_resp_v1 = (void *)pkt->data;
4530 		mcc = mcc_resp_v1->mcc;
4531 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4532 	}
4533 
4534 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4535 	if (mcc == 0)
4536 		mcc = 0x3030;  /* "00" - world */
4537 
4538 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4539 	    "regulatory domain '%c%c' (%d channels available)\n",
4540 	    mcc >> 8, mcc & 0xff, n_channels);
4541 #endif
4542 	iwm_free_resp(sc, &hcmd);
4543 
4544 	return 0;
4545 }
4546 
4547 static void
4548 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4549 {
4550 	struct iwm_host_cmd cmd = {
4551 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4552 		.len = { sizeof(uint32_t), },
4553 		.data = { &backoff, },
4554 	};
4555 
4556 	if (iwm_send_cmd(sc, &cmd) != 0) {
4557 		device_printf(sc->sc_dev,
4558 		    "failed to change thermal tx backoff\n");
4559 	}
4560 }
4561 
4562 static int
4563 iwm_init_hw(struct iwm_softc *sc)
4564 {
4565 	struct ieee80211com *ic = &sc->sc_ic;
4566 	int error, i, ac;
4567 
4568 	sc->sf_state = IWM_SF_UNINIT;
4569 
4570 	if ((error = iwm_start_hw(sc)) != 0) {
4571 		printf("iwm_start_hw: failed %d\n", error);
4572 		return error;
4573 	}
4574 
4575 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4576 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4577 		return error;
4578 	}
4579 
4580 	/*
4581 	 * should stop and start HW since that INIT
4582 	 * image just loaded
4583 	 */
4584 	iwm_stop_device(sc);
4585 	sc->sc_ps_disabled = FALSE;
4586 	if ((error = iwm_start_hw(sc)) != 0) {
4587 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4588 		return error;
4589 	}
4590 
4591 	/* omstart, this time with the regular firmware */
4592 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4593 	if (error) {
4594 		device_printf(sc->sc_dev, "could not load firmware\n");
4595 		goto error;
4596 	}
4597 
4598 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4599 	if (error)
4600 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4601 
4602 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4603 		device_printf(sc->sc_dev, "bt init conf failed\n");
4604 		goto error;
4605 	}
4606 
4607 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4608 	if (error != 0) {
4609 		device_printf(sc->sc_dev, "antenna config failed\n");
4610 		goto error;
4611 	}
4612 
4613 	/* Send phy db control command and then phy db calibration */
4614 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4615 		goto error;
4616 
4617 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4618 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4619 		goto error;
4620 	}
4621 
4622 	/* Add auxiliary station for scanning */
4623 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4624 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4625 		goto error;
4626 	}
4627 
4628 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4629 		/*
4630 		 * The channel used here isn't relevant as it's
4631 		 * going to be overwritten in the other flows.
4632 		 * For now use the first channel we have.
4633 		 */
4634 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4635 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4636 			goto error;
4637 	}
4638 
4639 	/* Initialize tx backoffs to the minimum. */
4640 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4641 		iwm_mvm_tt_tx_backoff(sc, 0);
4642 
4643 	if (iwm_mvm_config_ltr(sc) != 0)
4644 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4645 
4646 	error = iwm_mvm_power_update_device(sc);
4647 	if (error)
4648 		goto error;
4649 
4650 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4651 		goto error;
4652 
4653 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4654 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4655 			goto error;
4656 	}
4657 
4658 	/* Enable Tx queues. */
4659 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4660 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4661 		    iwm_mvm_ac_to_tx_fifo[ac]);
4662 		if (error)
4663 			goto error;
4664 	}
4665 
4666 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4667 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4668 		goto error;
4669 	}
4670 
4671 	return 0;
4672 
4673  error:
4674 	iwm_stop_device(sc);
4675 	return error;
4676 }
4677 
4678 /* Allow multicast from our BSSID. */
4679 static int
4680 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4681 {
4682 	struct ieee80211_node *ni = vap->iv_bss;
4683 	struct iwm_mcast_filter_cmd *cmd;
4684 	size_t size;
4685 	int error;
4686 
4687 	size = roundup(sizeof(*cmd), 4);
4688 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4689 	if (cmd == NULL)
4690 		return ENOMEM;
4691 	cmd->filter_own = 1;
4692 	cmd->port_id = 0;
4693 	cmd->count = 0;
4694 	cmd->pass_all = 1;
4695 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4696 
4697 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4698 	    IWM_CMD_SYNC, size, cmd);
4699 	free(cmd, M_DEVBUF);
4700 
4701 	return (error);
4702 }
4703 
4704 /*
4705  * ifnet interfaces
4706  */
4707 
4708 static void
4709 iwm_init(struct iwm_softc *sc)
4710 {
4711 	int error;
4712 
4713 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4714 		return;
4715 	}
4716 	sc->sc_generation++;
4717 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4718 
4719 	if ((error = iwm_init_hw(sc)) != 0) {
4720 		printf("iwm_init_hw failed %d\n", error);
4721 		iwm_stop(sc);
4722 		return;
4723 	}
4724 
4725 	/*
4726 	 * Ok, firmware loaded and we are jogging
4727 	 */
4728 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4729 }
4730 
4731 static int
4732 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4733 {
4734 	struct iwm_softc *sc;
4735 	int error;
4736 
4737 	sc = ic->ic_softc;
4738 
4739 	IWM_LOCK(sc);
4740 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4741 		IWM_UNLOCK(sc);
4742 		return (ENXIO);
4743 	}
4744 	error = mbufq_enqueue(&sc->sc_snd, m);
4745 	if (error) {
4746 		IWM_UNLOCK(sc);
4747 		return (error);
4748 	}
4749 	iwm_start(sc);
4750 	IWM_UNLOCK(sc);
4751 	return (0);
4752 }
4753 
4754 /*
4755  * Dequeue packets from sendq and call send.
4756  */
4757 static void
4758 iwm_start(struct iwm_softc *sc)
4759 {
4760 	struct ieee80211_node *ni;
4761 	struct mbuf *m;
4762 	int ac = 0;
4763 
4764 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4765 	while (sc->qfullmsk == 0 &&
4766 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4767 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4768 		if (iwm_tx(sc, m, ni, ac) != 0) {
4769 			if_inc_counter(ni->ni_vap->iv_ifp,
4770 			    IFCOUNTER_OERRORS, 1);
4771 			ieee80211_free_node(ni);
4772 			continue;
4773 		}
4774 		if (sc->sc_tx_timer == 0) {
4775 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4776 			    sc);
4777 		}
4778 		sc->sc_tx_timer = 15;
4779 	}
4780 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4781 }
4782 
4783 static void
4784 iwm_stop(struct iwm_softc *sc)
4785 {
4786 
4787 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4788 	sc->sc_flags |= IWM_FLAG_STOPPED;
4789 	sc->sc_generation++;
4790 	iwm_led_blink_stop(sc);
4791 	sc->sc_tx_timer = 0;
4792 	iwm_stop_device(sc);
4793 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4794 }
4795 
4796 static void
4797 iwm_watchdog(void *arg)
4798 {
4799 	struct iwm_softc *sc = arg;
4800 	struct ieee80211com *ic = &sc->sc_ic;
4801 
4802 	if (sc->sc_attached == 0)
4803 		return;
4804 
4805 	if (sc->sc_tx_timer > 0) {
4806 		if (--sc->sc_tx_timer == 0) {
4807 			device_printf(sc->sc_dev, "device timeout\n");
4808 #ifdef IWM_DEBUG
4809 			iwm_nic_error(sc);
4810 #endif
4811 			ieee80211_restart_all(ic);
4812 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4813 			return;
4814 		}
4815 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4816 	}
4817 }
4818 
4819 static void
4820 iwm_parent(struct ieee80211com *ic)
4821 {
4822 	struct iwm_softc *sc = ic->ic_softc;
4823 	int startall = 0;
4824 
4825 	IWM_LOCK(sc);
4826 	if (ic->ic_nrunning > 0) {
4827 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4828 			iwm_init(sc);
4829 			startall = 1;
4830 		}
4831 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4832 		iwm_stop(sc);
4833 	IWM_UNLOCK(sc);
4834 	if (startall)
4835 		ieee80211_start_all(ic);
4836 }
4837 
4838 /*
4839  * The interrupt side of things
4840  */
4841 
4842 /*
4843  * error dumping routines are from iwlwifi/mvm/utils.c
4844  */
4845 
4846 /*
4847  * Note: This structure is read from the device with IO accesses,
4848  * and the reading already does the endian conversion. As it is
4849  * read with uint32_t-sized accesses, any members with a different size
4850  * need to be ordered correctly though!
4851  */
4852 struct iwm_error_event_table {
4853 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4854 	uint32_t error_id;		/* type of error */
4855 	uint32_t trm_hw_status0;	/* TRM HW status */
4856 	uint32_t trm_hw_status1;	/* TRM HW status */
4857 	uint32_t blink2;		/* branch link */
4858 	uint32_t ilink1;		/* interrupt link */
4859 	uint32_t ilink2;		/* interrupt link */
4860 	uint32_t data1;		/* error-specific data */
4861 	uint32_t data2;		/* error-specific data */
4862 	uint32_t data3;		/* error-specific data */
4863 	uint32_t bcon_time;		/* beacon timer */
4864 	uint32_t tsf_low;		/* network timestamp function timer */
4865 	uint32_t tsf_hi;		/* network timestamp function timer */
4866 	uint32_t gp1;		/* GP1 timer register */
4867 	uint32_t gp2;		/* GP2 timer register */
4868 	uint32_t fw_rev_type;	/* firmware revision type */
4869 	uint32_t major;		/* uCode version major */
4870 	uint32_t minor;		/* uCode version minor */
4871 	uint32_t hw_ver;		/* HW Silicon version */
4872 	uint32_t brd_ver;		/* HW board version */
4873 	uint32_t log_pc;		/* log program counter */
4874 	uint32_t frame_ptr;		/* frame pointer */
4875 	uint32_t stack_ptr;		/* stack pointer */
4876 	uint32_t hcmd;		/* last host command header */
4877 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4878 				 * rxtx_flag */
4879 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4880 				 * host_flag */
4881 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4882 				 * enc_flag */
4883 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4884 				 * time_flag */
4885 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4886 				 * wico interrupt */
4887 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4888 	uint32_t wait_event;		/* wait event() caller address */
4889 	uint32_t l2p_control;	/* L2pControlField */
4890 	uint32_t l2p_duration;	/* L2pDurationField */
4891 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4892 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4893 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4894 				 * (LMPM_PMG_SEL) */
4895 	uint32_t u_timestamp;	/* indicate when the date and time of the
4896 				 * compilation */
4897 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4898 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4899 
4900 /*
4901  * UMAC error struct - relevant starting from family 8000 chip.
4902  * Note: This structure is read from the device with IO accesses,
4903  * and the reading already does the endian conversion. As it is
4904  * read with u32-sized accesses, any members with a different size
4905  * need to be ordered correctly though!
4906  */
4907 struct iwm_umac_error_event_table {
4908 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4909 	uint32_t error_id;	/* type of error */
4910 	uint32_t blink1;	/* branch link */
4911 	uint32_t blink2;	/* branch link */
4912 	uint32_t ilink1;	/* interrupt link */
4913 	uint32_t ilink2;	/* interrupt link */
4914 	uint32_t data1;		/* error-specific data */
4915 	uint32_t data2;		/* error-specific data */
4916 	uint32_t data3;		/* error-specific data */
4917 	uint32_t umac_major;
4918 	uint32_t umac_minor;
4919 	uint32_t frame_pointer;	/* core register 27*/
4920 	uint32_t stack_pointer;	/* core register 28 */
4921 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4922 	uint32_t nic_isr_pref;	/* ISR status register */
4923 } __packed;
4924 
4925 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4926 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4927 
4928 #ifdef IWM_DEBUG
4929 struct {
4930 	const char *name;
4931 	uint8_t num;
4932 } advanced_lookup[] = {
4933 	{ "NMI_INTERRUPT_WDG", 0x34 },
4934 	{ "SYSASSERT", 0x35 },
4935 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4936 	{ "BAD_COMMAND", 0x38 },
4937 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4938 	{ "FATAL_ERROR", 0x3D },
4939 	{ "NMI_TRM_HW_ERR", 0x46 },
4940 	{ "NMI_INTERRUPT_TRM", 0x4C },
4941 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4942 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4943 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4944 	{ "NMI_INTERRUPT_HOST", 0x66 },
4945 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4946 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4947 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4948 	{ "ADVANCED_SYSASSERT", 0 },
4949 };
4950 
4951 static const char *
4952 iwm_desc_lookup(uint32_t num)
4953 {
4954 	int i;
4955 
4956 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4957 		if (advanced_lookup[i].num == num)
4958 			return advanced_lookup[i].name;
4959 
4960 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4961 	return advanced_lookup[i].name;
4962 }
4963 
4964 static void
4965 iwm_nic_umac_error(struct iwm_softc *sc)
4966 {
4967 	struct iwm_umac_error_event_table table;
4968 	uint32_t base;
4969 
4970 	base = sc->umac_error_event_table;
4971 
4972 	if (base < 0x800000) {
4973 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4974 		    base);
4975 		return;
4976 	}
4977 
4978 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4979 		device_printf(sc->sc_dev, "reading errlog failed\n");
4980 		return;
4981 	}
4982 
4983 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4984 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4985 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4986 		    sc->sc_flags, table.valid);
4987 	}
4988 
4989 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4990 		iwm_desc_lookup(table.error_id));
4991 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4992 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4993 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4994 	    table.ilink1);
4995 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4996 	    table.ilink2);
4997 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4998 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4999 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5000 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5001 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5002 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5003 	    table.frame_pointer);
5004 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5005 	    table.stack_pointer);
5006 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5007 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5008 	    table.nic_isr_pref);
5009 }
5010 
5011 /*
5012  * Support for dumping the error log seemed like a good idea ...
5013  * but it's mostly hex junk and the only sensible thing is the
5014  * hw/ucode revision (which we know anyway).  Since it's here,
5015  * I'll just leave it in, just in case e.g. the Intel guys want to
5016  * help us decipher some "ADVANCED_SYSASSERT" later.
5017  */
5018 static void
5019 iwm_nic_error(struct iwm_softc *sc)
5020 {
5021 	struct iwm_error_event_table table;
5022 	uint32_t base;
5023 
5024 	device_printf(sc->sc_dev, "dumping device error log\n");
5025 	base = sc->error_event_table[0];
5026 	if (base < 0x800000) {
5027 		device_printf(sc->sc_dev,
5028 		    "Invalid error log pointer 0x%08x\n", base);
5029 		return;
5030 	}
5031 
5032 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5033 		device_printf(sc->sc_dev, "reading errlog failed\n");
5034 		return;
5035 	}
5036 
5037 	if (!table.valid) {
5038 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5039 		return;
5040 	}
5041 
5042 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5043 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5044 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5045 		    sc->sc_flags, table.valid);
5046 	}
5047 
5048 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5049 	    iwm_desc_lookup(table.error_id));
5050 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5051 	    table.trm_hw_status0);
5052 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5053 	    table.trm_hw_status1);
5054 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5055 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5056 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5057 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5058 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5059 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5060 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5061 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5062 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5063 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5064 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5065 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5066 	    table.fw_rev_type);
5067 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5068 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5069 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5070 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5071 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5072 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5073 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5074 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5075 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5076 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5077 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5078 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5079 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5080 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5081 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5082 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5083 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5084 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5085 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5086 
5087 	if (sc->umac_error_event_table)
5088 		iwm_nic_umac_error(sc);
5089 }
5090 #endif
5091 
5092 static void
5093 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5094 {
5095 	struct ieee80211com *ic = &sc->sc_ic;
5096 	struct iwm_cmd_response *cresp;
5097 	struct mbuf *m1;
5098 	uint32_t offset = 0;
5099 	uint32_t maxoff = IWM_RBUF_SIZE;
5100 	uint32_t nextoff;
5101 	boolean_t stolen = FALSE;
5102 
5103 #define HAVEROOM(a)	\
5104     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5105 
5106 	while (HAVEROOM(offset)) {
5107 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5108 		    offset);
5109 		int qid, idx, code, len;
5110 
5111 		qid = pkt->hdr.qid;
5112 		idx = pkt->hdr.idx;
5113 
5114 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5115 
5116 		/*
5117 		 * randomly get these from the firmware, no idea why.
5118 		 * they at least seem harmless, so just ignore them for now
5119 		 */
5120 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5121 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5122 			break;
5123 		}
5124 
5125 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5126 		    "rx packet qid=%d idx=%d type=%x\n",
5127 		    qid & ~0x80, pkt->hdr.idx, code);
5128 
5129 		len = iwm_rx_packet_len(pkt);
5130 		len += sizeof(uint32_t); /* account for status word */
5131 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5132 
5133 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5134 
5135 		switch (code) {
5136 		case IWM_REPLY_RX_PHY_CMD:
5137 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5138 			break;
5139 
5140 		case IWM_REPLY_RX_MPDU_CMD: {
5141 			/*
5142 			 * If this is the last frame in the RX buffer, we
5143 			 * can directly feed the mbuf to the sharks here.
5144 			 */
5145 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5146 			    struct iwm_rx_packet *, nextoff);
5147 			if (!HAVEROOM(nextoff) ||
5148 			    (nextpkt->hdr.code == 0 &&
5149 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5150 			     nextpkt->hdr.idx == 0) ||
5151 			    (nextpkt->len_n_flags ==
5152 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5153 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5154 					stolen = FALSE;
5155 					/* Make sure we abort the loop */
5156 					nextoff = maxoff;
5157 				}
5158 				break;
5159 			}
5160 
5161 			/*
5162 			 * Use m_copym instead of m_split, because that
5163 			 * makes it easier to keep a valid rx buffer in
5164 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5165 			 *
5166 			 * We need to start m_copym() at offset 0, to get the
5167 			 * M_PKTHDR flag preserved.
5168 			 */
5169 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5170 			if (m1) {
5171 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5172 					stolen = TRUE;
5173 				else
5174 					m_freem(m1);
5175 			}
5176 			break;
5177 		}
5178 
5179 		case IWM_TX_CMD:
5180 			iwm_mvm_rx_tx_cmd(sc, pkt);
5181 			break;
5182 
5183 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5184 			struct iwm_missed_beacons_notif *resp;
5185 			int missed;
5186 
5187 			/* XXX look at mac_id to determine interface ID */
5188 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5189 
5190 			resp = (void *)pkt->data;
5191 			missed = le32toh(resp->consec_missed_beacons);
5192 
5193 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5194 			    "%s: MISSED_BEACON: mac_id=%d, "
5195 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5196 			    "num_rx=%d\n",
5197 			    __func__,
5198 			    le32toh(resp->mac_id),
5199 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5200 			    le32toh(resp->consec_missed_beacons),
5201 			    le32toh(resp->num_expected_beacons),
5202 			    le32toh(resp->num_recvd_beacons));
5203 
5204 			/* Be paranoid */
5205 			if (vap == NULL)
5206 				break;
5207 
5208 			/* XXX no net80211 locking? */
5209 			if (vap->iv_state == IEEE80211_S_RUN &&
5210 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5211 				if (missed > vap->iv_bmissthreshold) {
5212 					/* XXX bad locking; turn into task */
5213 					IWM_UNLOCK(sc);
5214 					ieee80211_beacon_miss(ic);
5215 					IWM_LOCK(sc);
5216 				}
5217 			}
5218 
5219 			break;
5220 		}
5221 
5222 		case IWM_MFUART_LOAD_NOTIFICATION:
5223 			break;
5224 
5225 		case IWM_MVM_ALIVE:
5226 			break;
5227 
5228 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5229 			break;
5230 
5231 		case IWM_STATISTICS_NOTIFICATION:
5232 			iwm_mvm_handle_rx_statistics(sc, pkt);
5233 			break;
5234 
5235 		case IWM_NVM_ACCESS_CMD:
5236 		case IWM_MCC_UPDATE_CMD:
5237 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5238 				memcpy(sc->sc_cmd_resp,
5239 				    pkt, sizeof(sc->sc_cmd_resp));
5240 			}
5241 			break;
5242 
5243 		case IWM_MCC_CHUB_UPDATE_CMD: {
5244 			struct iwm_mcc_chub_notif *notif;
5245 			notif = (void *)pkt->data;
5246 
5247 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5248 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5249 			sc->sc_fw_mcc[2] = '\0';
5250 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5251 			    "fw source %d sent CC '%s'\n",
5252 			    notif->source_id, sc->sc_fw_mcc);
5253 			break;
5254 		}
5255 
5256 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5257 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5258 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5259 			struct iwm_dts_measurement_notif_v1 *notif;
5260 
5261 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5262 				device_printf(sc->sc_dev,
5263 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5264 				break;
5265 			}
5266 			notif = (void *)pkt->data;
5267 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5268 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5269 			    notif->temp);
5270 			break;
5271 		}
5272 
5273 		case IWM_PHY_CONFIGURATION_CMD:
5274 		case IWM_TX_ANT_CONFIGURATION_CMD:
5275 		case IWM_ADD_STA:
5276 		case IWM_MAC_CONTEXT_CMD:
5277 		case IWM_REPLY_SF_CFG_CMD:
5278 		case IWM_POWER_TABLE_CMD:
5279 		case IWM_LTR_CONFIG:
5280 		case IWM_PHY_CONTEXT_CMD:
5281 		case IWM_BINDING_CONTEXT_CMD:
5282 		case IWM_TIME_EVENT_CMD:
5283 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5284 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5285 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5286 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5287 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5288 		case IWM_REPLY_BEACON_FILTERING_CMD:
5289 		case IWM_MAC_PM_POWER_TABLE:
5290 		case IWM_TIME_QUOTA_CMD:
5291 		case IWM_REMOVE_STA:
5292 		case IWM_TXPATH_FLUSH:
5293 		case IWM_LQ_CMD:
5294 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5295 				 IWM_FW_PAGING_BLOCK_CMD):
5296 		case IWM_BT_CONFIG:
5297 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5298 			cresp = (void *)pkt->data;
5299 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5300 				memcpy(sc->sc_cmd_resp,
5301 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5302 			}
5303 			break;
5304 
5305 		/* ignore */
5306 		case IWM_PHY_DB_CMD:
5307 			break;
5308 
5309 		case IWM_INIT_COMPLETE_NOTIF:
5310 			break;
5311 
5312 		case IWM_SCAN_OFFLOAD_COMPLETE:
5313 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5314 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5315 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5316 				ieee80211_runtask(ic, &sc->sc_es_task);
5317 			}
5318 			break;
5319 
5320 		case IWM_SCAN_ITERATION_COMPLETE: {
5321 			struct iwm_lmac_scan_complete_notif *notif;
5322 			notif = (void *)pkt->data;
5323 			break;
5324 		}
5325 
5326 		case IWM_SCAN_COMPLETE_UMAC:
5327 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5328 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5329 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5330 				ieee80211_runtask(ic, &sc->sc_es_task);
5331 			}
5332 			break;
5333 
5334 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5335 			struct iwm_umac_scan_iter_complete_notif *notif;
5336 			notif = (void *)pkt->data;
5337 
5338 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5339 			    "complete, status=0x%x, %d channels scanned\n",
5340 			    notif->status, notif->scanned_channels);
5341 			break;
5342 		}
5343 
5344 		case IWM_REPLY_ERROR: {
5345 			struct iwm_error_resp *resp;
5346 			resp = (void *)pkt->data;
5347 
5348 			device_printf(sc->sc_dev,
5349 			    "firmware error 0x%x, cmd 0x%x\n",
5350 			    le32toh(resp->error_type),
5351 			    resp->cmd_id);
5352 			break;
5353 		}
5354 
5355 		case IWM_TIME_EVENT_NOTIFICATION:
5356 			iwm_mvm_rx_time_event_notif(sc, pkt);
5357 			break;
5358 
5359 		/*
5360 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5361 		 * messages. Just ignore them for now.
5362 		 */
5363 		case IWM_DEBUG_LOG_MSG:
5364 			break;
5365 
5366 		case IWM_MCAST_FILTER_CMD:
5367 			break;
5368 
5369 		case IWM_SCD_QUEUE_CFG: {
5370 			struct iwm_scd_txq_cfg_rsp *rsp;
5371 			rsp = (void *)pkt->data;
5372 
5373 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5374 			    "queue cfg token=0x%x sta_id=%d "
5375 			    "tid=%d scd_queue=%d\n",
5376 			    rsp->token, rsp->sta_id, rsp->tid,
5377 			    rsp->scd_queue);
5378 			break;
5379 		}
5380 
5381 		default:
5382 			device_printf(sc->sc_dev,
5383 			    "frame %d/%d %x UNHANDLED (this should "
5384 			    "not happen)\n", qid & ~0x80, idx,
5385 			    pkt->len_n_flags);
5386 			break;
5387 		}
5388 
5389 		/*
5390 		 * Why test bit 0x80?  The Linux driver:
5391 		 *
5392 		 * There is one exception:  uCode sets bit 15 when it
5393 		 * originates the response/notification, i.e. when the
5394 		 * response/notification is not a direct response to a
5395 		 * command sent by the driver.  For example, uCode issues
5396 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5397 		 * it is not a direct response to any driver command.
5398 		 *
5399 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5400 		 * uses a slightly different format for pkt->hdr, and "qid"
5401 		 * is actually the upper byte of a two-byte field.
5402 		 */
5403 		if (!(qid & (1 << 7)))
5404 			iwm_cmd_done(sc, pkt);
5405 
5406 		offset = nextoff;
5407 	}
5408 	if (stolen)
5409 		m_freem(m);
5410 #undef HAVEROOM
5411 }
5412 
5413 /*
5414  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5415  * Basic structure from if_iwn
5416  */
5417 static void
5418 iwm_notif_intr(struct iwm_softc *sc)
5419 {
5420 	uint16_t hw;
5421 
5422 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5423 	    BUS_DMASYNC_POSTREAD);
5424 
5425 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5426 
5427 	/*
5428 	 * Process responses
5429 	 */
5430 	while (sc->rxq.cur != hw) {
5431 		struct iwm_rx_ring *ring = &sc->rxq;
5432 		struct iwm_rx_data *data = &ring->data[ring->cur];
5433 
5434 		bus_dmamap_sync(ring->data_dmat, data->map,
5435 		    BUS_DMASYNC_POSTREAD);
5436 
5437 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5438 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5439 		iwm_handle_rxb(sc, data->m);
5440 
5441 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5442 	}
5443 
5444 	/*
5445 	 * Tell the firmware that it can reuse the ring entries that
5446 	 * we have just processed.
5447 	 * Seems like the hardware gets upset unless we align
5448 	 * the write by 8??
5449 	 */
5450 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5451 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5452 }
5453 
5454 static void
5455 iwm_intr(void *arg)
5456 {
5457 	struct iwm_softc *sc = arg;
5458 	int handled = 0;
5459 	int r1, r2, rv = 0;
5460 	int isperiodic = 0;
5461 
5462 	IWM_LOCK(sc);
5463 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5464 
5465 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5466 		uint32_t *ict = sc->ict_dma.vaddr;
5467 		int tmp;
5468 
5469 		tmp = htole32(ict[sc->ict_cur]);
5470 		if (!tmp)
5471 			goto out_ena;
5472 
5473 		/*
5474 		 * ok, there was something.  keep plowing until we have all.
5475 		 */
5476 		r1 = r2 = 0;
5477 		while (tmp) {
5478 			r1 |= tmp;
5479 			ict[sc->ict_cur] = 0;
5480 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5481 			tmp = htole32(ict[sc->ict_cur]);
5482 		}
5483 
5484 		/* this is where the fun begins.  don't ask */
5485 		if (r1 == 0xffffffff)
5486 			r1 = 0;
5487 
5488 		/* i am not expected to understand this */
5489 		if (r1 & 0xc0000)
5490 			r1 |= 0x8000;
5491 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5492 	} else {
5493 		r1 = IWM_READ(sc, IWM_CSR_INT);
5494 		/* "hardware gone" (where, fishing?) */
5495 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5496 			goto out;
5497 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5498 	}
5499 	if (r1 == 0 && r2 == 0) {
5500 		goto out_ena;
5501 	}
5502 
5503 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5504 
5505 	/* Safely ignore these bits for debug checks below */
5506 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5507 
5508 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5509 		int i;
5510 		struct ieee80211com *ic = &sc->sc_ic;
5511 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5512 
5513 #ifdef IWM_DEBUG
5514 		iwm_nic_error(sc);
5515 #endif
5516 		/* Dump driver status (TX and RX rings) while we're here. */
5517 		device_printf(sc->sc_dev, "driver status:\n");
5518 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5519 			struct iwm_tx_ring *ring = &sc->txq[i];
5520 			device_printf(sc->sc_dev,
5521 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5522 			    "queued=%-3d\n",
5523 			    i, ring->qid, ring->cur, ring->queued);
5524 		}
5525 		device_printf(sc->sc_dev,
5526 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5527 		device_printf(sc->sc_dev,
5528 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5529 
5530 		/* Reset our firmware state tracking. */
5531 		sc->sc_firmware_state = 0;
5532 		/* Don't stop the device; just do a VAP restart */
5533 		IWM_UNLOCK(sc);
5534 
5535 		if (vap == NULL) {
5536 			printf("%s: null vap\n", __func__);
5537 			return;
5538 		}
5539 
5540 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5541 		    "restarting\n", __func__, vap->iv_state);
5542 
5543 		ieee80211_restart_all(ic);
5544 		return;
5545 	}
5546 
5547 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5548 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5549 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5550 		iwm_stop(sc);
5551 		rv = 1;
5552 		goto out;
5553 	}
5554 
5555 	/* firmware chunk loaded */
5556 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5557 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5558 		handled |= IWM_CSR_INT_BIT_FH_TX;
5559 		sc->sc_fw_chunk_done = 1;
5560 		wakeup(&sc->sc_fw);
5561 	}
5562 
5563 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5564 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5565 		if (iwm_check_rfkill(sc)) {
5566 			device_printf(sc->sc_dev,
5567 			    "%s: rfkill switch, disabling interface\n",
5568 			    __func__);
5569 			iwm_stop(sc);
5570 		}
5571 	}
5572 
5573 	/*
5574 	 * The Linux driver uses periodic interrupts to avoid races.
5575 	 * We cargo-cult like it's going out of fashion.
5576 	 */
5577 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5578 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5579 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5580 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5581 			IWM_WRITE_1(sc,
5582 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5583 		isperiodic = 1;
5584 	}
5585 
5586 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5587 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5588 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5589 
5590 		iwm_notif_intr(sc);
5591 
5592 		/* enable periodic interrupt, see above */
5593 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5594 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5595 			    IWM_CSR_INT_PERIODIC_ENA);
5596 	}
5597 
5598 	if (__predict_false(r1 & ~handled))
5599 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5600 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5601 	rv = 1;
5602 
5603  out_ena:
5604 	iwm_restore_interrupts(sc);
5605  out:
5606 	IWM_UNLOCK(sc);
5607 	return;
5608 }
5609 
5610 /*
5611  * Autoconf glue-sniffing
5612  */
5613 #define	PCI_VENDOR_INTEL		0x8086
5614 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5615 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5616 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5617 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5618 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5619 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5620 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5621 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5622 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5623 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5624 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5625 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5626 
5627 static const struct iwm_devices {
5628 	uint16_t		device;
5629 	const struct iwm_cfg	*cfg;
5630 } iwm_devices[] = {
5631 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5632 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5633 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5634 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5635 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5636 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5637 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5638 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5639 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5640 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5641 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5642 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5643 };
5644 
5645 static int
5646 iwm_probe(device_t dev)
5647 {
5648 	int i;
5649 
5650 	for (i = 0; i < nitems(iwm_devices); i++) {
5651 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5652 		    pci_get_device(dev) == iwm_devices[i].device) {
5653 			device_set_desc(dev, iwm_devices[i].cfg->name);
5654 			return (BUS_PROBE_DEFAULT);
5655 		}
5656 	}
5657 
5658 	return (ENXIO);
5659 }
5660 
5661 static int
5662 iwm_dev_check(device_t dev)
5663 {
5664 	struct iwm_softc *sc;
5665 	uint16_t devid;
5666 	int i;
5667 
5668 	sc = device_get_softc(dev);
5669 
5670 	devid = pci_get_device(dev);
5671 	for (i = 0; i < nitems(iwm_devices); i++) {
5672 		if (iwm_devices[i].device == devid) {
5673 			sc->cfg = iwm_devices[i].cfg;
5674 			return (0);
5675 		}
5676 	}
5677 	device_printf(dev, "unknown adapter type\n");
5678 	return ENXIO;
5679 }
5680 
5681 /* PCI registers */
5682 #define PCI_CFG_RETRY_TIMEOUT	0x041
5683 
5684 static int
5685 iwm_pci_attach(device_t dev)
5686 {
5687 	struct iwm_softc *sc;
5688 	int count, error, rid;
5689 	uint16_t reg;
5690 
5691 	sc = device_get_softc(dev);
5692 
5693 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5694 	 * PCI Tx retries from interfering with C3 CPU state */
5695 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5696 
5697 	/* Enable bus-mastering and hardware bug workaround. */
5698 	pci_enable_busmaster(dev);
5699 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5700 	/* if !MSI */
5701 	if (reg & PCIM_STATUS_INTxSTATE) {
5702 		reg &= ~PCIM_STATUS_INTxSTATE;
5703 	}
5704 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5705 
5706 	rid = PCIR_BAR(0);
5707 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5708 	    RF_ACTIVE);
5709 	if (sc->sc_mem == NULL) {
5710 		device_printf(sc->sc_dev, "can't map mem space\n");
5711 		return (ENXIO);
5712 	}
5713 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5714 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5715 
5716 	/* Install interrupt handler. */
5717 	count = 1;
5718 	rid = 0;
5719 	if (pci_alloc_msi(dev, &count) == 0)
5720 		rid = 1;
5721 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5722 	    (rid != 0 ? 0 : RF_SHAREABLE));
5723 	if (sc->sc_irq == NULL) {
5724 		device_printf(dev, "can't map interrupt\n");
5725 			return (ENXIO);
5726 	}
5727 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5728 	    NULL, iwm_intr, sc, &sc->sc_ih);
5729 	if (sc->sc_ih == NULL) {
5730 		device_printf(dev, "can't establish interrupt");
5731 			return (ENXIO);
5732 	}
5733 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5734 
5735 	return (0);
5736 }
5737 
5738 static void
5739 iwm_pci_detach(device_t dev)
5740 {
5741 	struct iwm_softc *sc = device_get_softc(dev);
5742 
5743 	if (sc->sc_irq != NULL) {
5744 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5745 		bus_release_resource(dev, SYS_RES_IRQ,
5746 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5747 		pci_release_msi(dev);
5748         }
5749 	if (sc->sc_mem != NULL)
5750 		bus_release_resource(dev, SYS_RES_MEMORY,
5751 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5752 }
5753 
5754 static int
5755 iwm_attach(device_t dev)
5756 {
5757 	struct iwm_softc *sc = device_get_softc(dev);
5758 	struct ieee80211com *ic = &sc->sc_ic;
5759 	int error;
5760 	int txq_i, i;
5761 
5762 	sc->sc_dev = dev;
5763 	sc->sc_attached = 1;
5764 	IWM_LOCK_INIT(sc);
5765 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5766 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5767 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5768 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5769 
5770 	error = iwm_dev_check(dev);
5771 	if (error != 0)
5772 		goto fail;
5773 
5774 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5775 	if (sc->sc_notif_wait == NULL) {
5776 		device_printf(dev, "failed to init notification wait struct\n");
5777 		goto fail;
5778 	}
5779 
5780 	sc->sf_state = IWM_SF_UNINIT;
5781 
5782 	/* Init phy db */
5783 	sc->sc_phy_db = iwm_phy_db_init(sc);
5784 	if (!sc->sc_phy_db) {
5785 		device_printf(dev, "Cannot init phy_db\n");
5786 		goto fail;
5787 	}
5788 
5789 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5790 	sc->last_ebs_successful = TRUE;
5791 
5792 	/* PCI attach */
5793 	error = iwm_pci_attach(dev);
5794 	if (error != 0)
5795 		goto fail;
5796 
5797 	sc->sc_wantresp = -1;
5798 
5799 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5800 	/*
5801 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5802 	 * changed, and now the revision step also includes bit 0-1 (no more
5803 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5804 	 * in the old format.
5805 	 */
5806 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
5807 		int ret;
5808 		uint32_t hw_step;
5809 
5810 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5811 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5812 
5813 		if (iwm_prepare_card_hw(sc) != 0) {
5814 			device_printf(dev, "could not initialize hardware\n");
5815 			goto fail;
5816 		}
5817 
5818 		/*
5819 		 * In order to recognize C step the driver should read the
5820 		 * chip version id located at the AUX bus MISC address.
5821 		 */
5822 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5823 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5824 		DELAY(2);
5825 
5826 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5827 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5828 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5829 				   25000);
5830 		if (!ret) {
5831 			device_printf(sc->sc_dev,
5832 			    "Failed to wake up the nic\n");
5833 			goto fail;
5834 		}
5835 
5836 		if (iwm_nic_lock(sc)) {
5837 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5838 			hw_step |= IWM_ENABLE_WFPM;
5839 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5840 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5841 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5842 			if (hw_step == 0x3)
5843 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5844 						(IWM_SILICON_C_STEP << 2);
5845 			iwm_nic_unlock(sc);
5846 		} else {
5847 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5848 			goto fail;
5849 		}
5850 	}
5851 
5852 	/* special-case 7265D, it has the same PCI IDs. */
5853 	if (sc->cfg == &iwm7265_cfg &&
5854 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5855 		sc->cfg = &iwm7265d_cfg;
5856 	}
5857 
5858 	/* Allocate DMA memory for firmware transfers. */
5859 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5860 		device_printf(dev, "could not allocate memory for firmware\n");
5861 		goto fail;
5862 	}
5863 
5864 	/* Allocate "Keep Warm" page. */
5865 	if ((error = iwm_alloc_kw(sc)) != 0) {
5866 		device_printf(dev, "could not allocate keep warm page\n");
5867 		goto fail;
5868 	}
5869 
5870 	/* We use ICT interrupts */
5871 	if ((error = iwm_alloc_ict(sc)) != 0) {
5872 		device_printf(dev, "could not allocate ICT table\n");
5873 		goto fail;
5874 	}
5875 
5876 	/* Allocate TX scheduler "rings". */
5877 	if ((error = iwm_alloc_sched(sc)) != 0) {
5878 		device_printf(dev, "could not allocate TX scheduler rings\n");
5879 		goto fail;
5880 	}
5881 
5882 	/* Allocate TX rings */
5883 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5884 		if ((error = iwm_alloc_tx_ring(sc,
5885 		    &sc->txq[txq_i], txq_i)) != 0) {
5886 			device_printf(dev,
5887 			    "could not allocate TX ring %d\n",
5888 			    txq_i);
5889 			goto fail;
5890 		}
5891 	}
5892 
5893 	/* Allocate RX ring. */
5894 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5895 		device_printf(dev, "could not allocate RX ring\n");
5896 		goto fail;
5897 	}
5898 
5899 	/* Clear pending interrupts. */
5900 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5901 
5902 	ic->ic_softc = sc;
5903 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5904 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5905 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5906 
5907 	/* Set device capabilities. */
5908 	ic->ic_caps =
5909 	    IEEE80211_C_STA |
5910 	    IEEE80211_C_WPA |		/* WPA/RSN */
5911 	    IEEE80211_C_WME |
5912 	    IEEE80211_C_PMGT |
5913 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5914 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5915 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5916 	    ;
5917 	/* Advertise full-offload scanning */
5918 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5919 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5920 		sc->sc_phyctxt[i].id = i;
5921 		sc->sc_phyctxt[i].color = 0;
5922 		sc->sc_phyctxt[i].ref = 0;
5923 		sc->sc_phyctxt[i].channel = NULL;
5924 	}
5925 
5926 	/* Default noise floor */
5927 	sc->sc_noise = -96;
5928 
5929 	/* Max RSSI */
5930 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5931 
5932 #ifdef IWM_DEBUG
5933 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5934 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5935 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5936 #endif
5937 
5938 	error = iwm_read_firmware(sc);
5939 	if (error) {
5940 		goto fail;
5941 	} else if (sc->sc_fw.fw_fp == NULL) {
5942 		/*
5943 		 * XXX Add a solution for properly deferring firmware load
5944 		 *     during bootup.
5945 		 */
5946 		goto fail;
5947 	} else {
5948 		sc->sc_preinit_hook.ich_func = iwm_preinit;
5949 		sc->sc_preinit_hook.ich_arg = sc;
5950 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5951 			device_printf(dev,
5952 			    "config_intrhook_establish failed\n");
5953 			goto fail;
5954 		}
5955 	}
5956 
5957 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5958 	    "<-%s\n", __func__);
5959 
5960 	return 0;
5961 
5962 	/* Free allocated memory if something failed during attachment. */
5963 fail:
5964 	iwm_detach_local(sc, 0);
5965 
5966 	return ENXIO;
5967 }
5968 
5969 static int
5970 iwm_is_valid_ether_addr(uint8_t *addr)
5971 {
5972 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5973 
5974 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5975 		return (FALSE);
5976 
5977 	return (TRUE);
5978 }
5979 
5980 static int
5981 iwm_wme_update(struct ieee80211com *ic)
5982 {
5983 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5984 	struct iwm_softc *sc = ic->ic_softc;
5985 	struct chanAccParams chp;
5986 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5987 	struct iwm_vap *ivp = IWM_VAP(vap);
5988 	struct iwm_node *in;
5989 	struct wmeParams tmp[WME_NUM_AC];
5990 	int aci, error;
5991 
5992 	if (vap == NULL)
5993 		return (0);
5994 
5995 	ieee80211_wme_ic_getparams(ic, &chp);
5996 
5997 	IEEE80211_LOCK(ic);
5998 	for (aci = 0; aci < WME_NUM_AC; aci++)
5999 		tmp[aci] = chp.cap_wmeParams[aci];
6000 	IEEE80211_UNLOCK(ic);
6001 
6002 	IWM_LOCK(sc);
6003 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6004 		const struct wmeParams *ac = &tmp[aci];
6005 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6006 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6007 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6008 		ivp->queue_params[aci].edca_txop =
6009 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6010 	}
6011 	ivp->have_wme = TRUE;
6012 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6013 		in = IWM_NODE(vap->iv_bss);
6014 		if (in->in_assoc) {
6015 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6016 				device_printf(sc->sc_dev,
6017 				    "%s: failed to update MAC\n", __func__);
6018 			}
6019 		}
6020 	}
6021 	IWM_UNLOCK(sc);
6022 
6023 	return (0);
6024 #undef IWM_EXP2
6025 }
6026 
6027 static void
6028 iwm_preinit(void *arg)
6029 {
6030 	struct iwm_softc *sc = arg;
6031 	device_t dev = sc->sc_dev;
6032 	struct ieee80211com *ic = &sc->sc_ic;
6033 	int error;
6034 
6035 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6036 	    "->%s\n", __func__);
6037 
6038 	IWM_LOCK(sc);
6039 	if ((error = iwm_start_hw(sc)) != 0) {
6040 		device_printf(dev, "could not initialize hardware\n");
6041 		IWM_UNLOCK(sc);
6042 		goto fail;
6043 	}
6044 
6045 	error = iwm_run_init_mvm_ucode(sc, 1);
6046 	iwm_stop_device(sc);
6047 	if (error) {
6048 		IWM_UNLOCK(sc);
6049 		goto fail;
6050 	}
6051 	device_printf(dev,
6052 	    "hw rev 0x%x, fw ver %s, address %s\n",
6053 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6054 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6055 
6056 	/* not all hardware can do 5GHz band */
6057 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6058 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6059 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6060 	IWM_UNLOCK(sc);
6061 
6062 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6063 	    ic->ic_channels);
6064 
6065 	/*
6066 	 * At this point we've committed - if we fail to do setup,
6067 	 * we now also have to tear down the net80211 state.
6068 	 */
6069 	ieee80211_ifattach(ic);
6070 	ic->ic_vap_create = iwm_vap_create;
6071 	ic->ic_vap_delete = iwm_vap_delete;
6072 	ic->ic_raw_xmit = iwm_raw_xmit;
6073 	ic->ic_node_alloc = iwm_node_alloc;
6074 	ic->ic_scan_start = iwm_scan_start;
6075 	ic->ic_scan_end = iwm_scan_end;
6076 	ic->ic_update_mcast = iwm_update_mcast;
6077 	ic->ic_getradiocaps = iwm_init_channel_map;
6078 	ic->ic_set_channel = iwm_set_channel;
6079 	ic->ic_scan_curchan = iwm_scan_curchan;
6080 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6081 	ic->ic_wme.wme_update = iwm_wme_update;
6082 	ic->ic_parent = iwm_parent;
6083 	ic->ic_transmit = iwm_transmit;
6084 	iwm_radiotap_attach(sc);
6085 	if (bootverbose)
6086 		ieee80211_announce(ic);
6087 
6088 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6089 	    "<-%s\n", __func__);
6090 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6091 
6092 	return;
6093 fail:
6094 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6095 	iwm_detach_local(sc, 0);
6096 }
6097 
6098 /*
6099  * Attach the interface to 802.11 radiotap.
6100  */
6101 static void
6102 iwm_radiotap_attach(struct iwm_softc *sc)
6103 {
6104         struct ieee80211com *ic = &sc->sc_ic;
6105 
6106 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6107 	    "->%s begin\n", __func__);
6108         ieee80211_radiotap_attach(ic,
6109             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6110                 IWM_TX_RADIOTAP_PRESENT,
6111             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6112                 IWM_RX_RADIOTAP_PRESENT);
6113 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6114 	    "->%s end\n", __func__);
6115 }
6116 
6117 static struct ieee80211vap *
6118 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6119     enum ieee80211_opmode opmode, int flags,
6120     const uint8_t bssid[IEEE80211_ADDR_LEN],
6121     const uint8_t mac[IEEE80211_ADDR_LEN])
6122 {
6123 	struct iwm_vap *ivp;
6124 	struct ieee80211vap *vap;
6125 
6126 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6127 		return NULL;
6128 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6129 	vap = &ivp->iv_vap;
6130 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6131 	vap->iv_bmissthreshold = 10;            /* override default */
6132 	/* Override with driver methods. */
6133 	ivp->iv_newstate = vap->iv_newstate;
6134 	vap->iv_newstate = iwm_newstate;
6135 
6136 	ivp->id = IWM_DEFAULT_MACID;
6137 	ivp->color = IWM_DEFAULT_COLOR;
6138 
6139 	ivp->have_wme = FALSE;
6140 	ivp->ps_disabled = FALSE;
6141 
6142 	ieee80211_ratectl_init(vap);
6143 	/* Complete setup. */
6144 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6145 	    mac);
6146 	ic->ic_opmode = opmode;
6147 
6148 	return vap;
6149 }
6150 
6151 static void
6152 iwm_vap_delete(struct ieee80211vap *vap)
6153 {
6154 	struct iwm_vap *ivp = IWM_VAP(vap);
6155 
6156 	ieee80211_ratectl_deinit(vap);
6157 	ieee80211_vap_detach(vap);
6158 	free(ivp, M_80211_VAP);
6159 }
6160 
6161 static void
6162 iwm_xmit_queue_drain(struct iwm_softc *sc)
6163 {
6164 	struct mbuf *m;
6165 	struct ieee80211_node *ni;
6166 
6167 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6168 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6169 		ieee80211_free_node(ni);
6170 		m_freem(m);
6171 	}
6172 }
6173 
6174 static void
6175 iwm_scan_start(struct ieee80211com *ic)
6176 {
6177 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6178 	struct iwm_softc *sc = ic->ic_softc;
6179 	int error;
6180 
6181 	IWM_LOCK(sc);
6182 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6183 		/* This should not be possible */
6184 		device_printf(sc->sc_dev,
6185 		    "%s: Previous scan not completed yet\n", __func__);
6186 	}
6187 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6188 		error = iwm_mvm_umac_scan(sc);
6189 	else
6190 		error = iwm_mvm_lmac_scan(sc);
6191 	if (error != 0) {
6192 		device_printf(sc->sc_dev, "could not initiate scan\n");
6193 		IWM_UNLOCK(sc);
6194 		ieee80211_cancel_scan(vap);
6195 	} else {
6196 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6197 		iwm_led_blink_start(sc);
6198 		IWM_UNLOCK(sc);
6199 	}
6200 }
6201 
6202 static void
6203 iwm_scan_end(struct ieee80211com *ic)
6204 {
6205 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6206 	struct iwm_softc *sc = ic->ic_softc;
6207 
6208 	IWM_LOCK(sc);
6209 	iwm_led_blink_stop(sc);
6210 	if (vap->iv_state == IEEE80211_S_RUN)
6211 		iwm_mvm_led_enable(sc);
6212 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6213 		/*
6214 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6215 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6216 		 * taskqueue.
6217 		 */
6218 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6219 		iwm_mvm_scan_stop_wait(sc);
6220 	}
6221 	IWM_UNLOCK(sc);
6222 
6223 	/*
6224 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6225 	 * This is to make sure that it won't call ieee80211_scan_done
6226 	 * when we have already started the next scan.
6227 	 */
6228 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6229 }
6230 
6231 static void
6232 iwm_update_mcast(struct ieee80211com *ic)
6233 {
6234 }
6235 
6236 static void
6237 iwm_set_channel(struct ieee80211com *ic)
6238 {
6239 }
6240 
6241 static void
6242 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6243 {
6244 }
6245 
6246 static void
6247 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6248 {
6249 	return;
6250 }
6251 
6252 void
6253 iwm_init_task(void *arg1)
6254 {
6255 	struct iwm_softc *sc = arg1;
6256 
6257 	IWM_LOCK(sc);
6258 	while (sc->sc_flags & IWM_FLAG_BUSY)
6259 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6260 	sc->sc_flags |= IWM_FLAG_BUSY;
6261 	iwm_stop(sc);
6262 	if (sc->sc_ic.ic_nrunning > 0)
6263 		iwm_init(sc);
6264 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6265 	wakeup(&sc->sc_flags);
6266 	IWM_UNLOCK(sc);
6267 }
6268 
6269 static int
6270 iwm_resume(device_t dev)
6271 {
6272 	struct iwm_softc *sc = device_get_softc(dev);
6273 	int do_reinit = 0;
6274 
6275 	/*
6276 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6277 	 * PCI Tx retries from interfering with C3 CPU state.
6278 	 */
6279 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6280 
6281 	if (!sc->sc_attached)
6282 		return 0;
6283 
6284 	iwm_init_task(device_get_softc(dev));
6285 
6286 	IWM_LOCK(sc);
6287 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6288 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6289 		do_reinit = 1;
6290 	}
6291 	IWM_UNLOCK(sc);
6292 
6293 	if (do_reinit)
6294 		ieee80211_resume_all(&sc->sc_ic);
6295 
6296 	return 0;
6297 }
6298 
6299 static int
6300 iwm_suspend(device_t dev)
6301 {
6302 	int do_stop = 0;
6303 	struct iwm_softc *sc = device_get_softc(dev);
6304 
6305 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6306 
6307 	if (!sc->sc_attached)
6308 		return (0);
6309 
6310 	ieee80211_suspend_all(&sc->sc_ic);
6311 
6312 	if (do_stop) {
6313 		IWM_LOCK(sc);
6314 		iwm_stop(sc);
6315 		sc->sc_flags |= IWM_FLAG_SCANNING;
6316 		IWM_UNLOCK(sc);
6317 	}
6318 
6319 	return (0);
6320 }
6321 
6322 static int
6323 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6324 {
6325 	struct iwm_fw_info *fw = &sc->sc_fw;
6326 	device_t dev = sc->sc_dev;
6327 	int i;
6328 
6329 	if (!sc->sc_attached)
6330 		return 0;
6331 	sc->sc_attached = 0;
6332 	if (do_net80211) {
6333 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6334 	}
6335 	iwm_stop_device(sc);
6336 	if (do_net80211) {
6337 		IWM_LOCK(sc);
6338 		iwm_xmit_queue_drain(sc);
6339 		IWM_UNLOCK(sc);
6340 		ieee80211_ifdetach(&sc->sc_ic);
6341 	}
6342 	callout_drain(&sc->sc_led_blink_to);
6343 	callout_drain(&sc->sc_watchdog_to);
6344 
6345 	iwm_phy_db_free(sc->sc_phy_db);
6346 	sc->sc_phy_db = NULL;
6347 
6348 	iwm_free_nvm_data(sc->nvm_data);
6349 
6350 	/* Free descriptor rings */
6351 	iwm_free_rx_ring(sc, &sc->rxq);
6352 	for (i = 0; i < nitems(sc->txq); i++)
6353 		iwm_free_tx_ring(sc, &sc->txq[i]);
6354 
6355 	/* Free firmware */
6356 	if (fw->fw_fp != NULL)
6357 		iwm_fw_info_free(fw);
6358 
6359 	/* Free scheduler */
6360 	iwm_dma_contig_free(&sc->sched_dma);
6361 	iwm_dma_contig_free(&sc->ict_dma);
6362 	iwm_dma_contig_free(&sc->kw_dma);
6363 	iwm_dma_contig_free(&sc->fw_dma);
6364 
6365 	iwm_free_fw_paging(sc);
6366 
6367 	/* Finished with the hardware - detach things */
6368 	iwm_pci_detach(dev);
6369 
6370 	if (sc->sc_notif_wait != NULL) {
6371 		iwm_notification_wait_free(sc->sc_notif_wait);
6372 		sc->sc_notif_wait = NULL;
6373 	}
6374 
6375 	IWM_LOCK_DESTROY(sc);
6376 
6377 	return (0);
6378 }
6379 
6380 static int
6381 iwm_detach(device_t dev)
6382 {
6383 	struct iwm_softc *sc = device_get_softc(dev);
6384 
6385 	return (iwm_detach_local(sc, 1));
6386 }
6387 
6388 static device_method_t iwm_pci_methods[] = {
6389         /* Device interface */
6390         DEVMETHOD(device_probe,         iwm_probe),
6391         DEVMETHOD(device_attach,        iwm_attach),
6392         DEVMETHOD(device_detach,        iwm_detach),
6393         DEVMETHOD(device_suspend,       iwm_suspend),
6394         DEVMETHOD(device_resume,        iwm_resume),
6395 
6396         DEVMETHOD_END
6397 };
6398 
6399 static driver_t iwm_pci_driver = {
6400         "iwm",
6401         iwm_pci_methods,
6402         sizeof (struct iwm_softc)
6403 };
6404 
6405 static devclass_t iwm_devclass;
6406 
6407 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6408 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6409     iwm_devices, nitems(iwm_devices));
6410 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6411 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6412 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6413