xref: /freebsd/sys/dev/iwm/if_iwm.c (revision f39bffc62c1395bde25d152c7f68fdf7cbaab414)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_mvm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_mvm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_sects *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_sects *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_sects *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_sects *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
324 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
325 					    struct iwm_rx_phy_info *);
326 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
327                                       struct iwm_rx_packet *);
328 static int	iwm_get_noise(struct iwm_softc *,
329 		    const struct iwm_mvm_statistics_rx_non_phy *);
330 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
331 		    struct iwm_rx_packet *);
332 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
333 				    uint32_t, boolean_t);
334 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
335                                          struct iwm_rx_packet *,
336 				         struct iwm_node *);
337 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
338 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
339 #if 0
340 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
341                                  uint16_t);
342 #endif
343 static const struct iwm_rate *
344 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
345 			struct mbuf *, struct iwm_tx_cmd *);
346 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
347                        struct ieee80211_node *, int);
348 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
349 			     const struct ieee80211_bpf_params *);
350 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
351 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
352 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
353 static struct ieee80211_node *
354 		iwm_node_alloc(struct ieee80211vap *,
355 		               const uint8_t[IEEE80211_ADDR_LEN]);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
357 static int	iwm_media_change(struct ifnet *);
358 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
359 static void	iwm_endscan_cb(void *, int);
360 static int	iwm_send_bt_init_conf(struct iwm_softc *);
361 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
362 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
363 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
364 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
365 static int	iwm_init_hw(struct iwm_softc *);
366 static void	iwm_init(struct iwm_softc *);
367 static void	iwm_start(struct iwm_softc *);
368 static void	iwm_stop(struct iwm_softc *);
369 static void	iwm_watchdog(void *);
370 static void	iwm_parent(struct ieee80211com *);
371 #ifdef IWM_DEBUG
372 static const char *
373 		iwm_desc_lookup(uint32_t);
374 static void	iwm_nic_error(struct iwm_softc *);
375 static void	iwm_nic_umac_error(struct iwm_softc *);
376 #endif
377 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
378 static void	iwm_notif_intr(struct iwm_softc *);
379 static void	iwm_intr(void *);
380 static int	iwm_attach(device_t);
381 static int	iwm_is_valid_ether_addr(uint8_t *);
382 static void	iwm_preinit(void *);
383 static int	iwm_detach_local(struct iwm_softc *sc, int);
384 static void	iwm_init_task(void *);
385 static void	iwm_radiotap_attach(struct iwm_softc *);
386 static struct ieee80211vap *
387 		iwm_vap_create(struct ieee80211com *,
388 		               const char [IFNAMSIZ], int,
389 		               enum ieee80211_opmode, int,
390 		               const uint8_t [IEEE80211_ADDR_LEN],
391 		               const uint8_t [IEEE80211_ADDR_LEN]);
392 static void	iwm_vap_delete(struct ieee80211vap *);
393 static void	iwm_xmit_queue_drain(struct iwm_softc *);
394 static void	iwm_scan_start(struct ieee80211com *);
395 static void	iwm_scan_end(struct ieee80211com *);
396 static void	iwm_update_mcast(struct ieee80211com *);
397 static void	iwm_set_channel(struct ieee80211com *);
398 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
399 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
400 static int	iwm_detach(device_t);
401 
402 static int	iwm_lar_disable = 0;
403 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
404 
405 /*
406  * Firmware parser.
407  */
408 
409 static int
410 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
411 {
412 	const struct iwm_fw_cscheme_list *l = (const void *)data;
413 
414 	if (dlen < sizeof(*l) ||
415 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
416 		return EINVAL;
417 
418 	/* we don't actually store anything for now, always use s/w crypto */
419 
420 	return 0;
421 }
422 
423 static int
424 iwm_firmware_store_section(struct iwm_softc *sc,
425     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
426 {
427 	struct iwm_fw_sects *fws;
428 	struct iwm_fw_desc *fwone;
429 
430 	if (type >= IWM_UCODE_TYPE_MAX)
431 		return EINVAL;
432 	if (dlen < sizeof(uint32_t))
433 		return EINVAL;
434 
435 	fws = &sc->sc_fw.fw_sects[type];
436 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
437 		return EINVAL;
438 
439 	fwone = &fws->fw_sect[fws->fw_count];
440 
441 	/* first 32bit are device load offset */
442 	memcpy(&fwone->offset, data, sizeof(uint32_t));
443 
444 	/* rest is data */
445 	fwone->data = data + sizeof(uint32_t);
446 	fwone->len = dlen - sizeof(uint32_t);
447 
448 	fws->fw_count++;
449 
450 	return 0;
451 }
452 
453 #define IWM_DEFAULT_SCAN_CHANNELS 40
454 
455 /* iwlwifi: iwl-drv.c */
456 struct iwm_tlv_calib_data {
457 	uint32_t ucode_type;
458 	struct iwm_tlv_calib_ctrl calib;
459 } __packed;
460 
461 static int
462 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
463 {
464 	const struct iwm_tlv_calib_data *def_calib = data;
465 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
466 
467 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
468 		device_printf(sc->sc_dev,
469 		    "Wrong ucode_type %u for default "
470 		    "calibration.\n", ucode_type);
471 		return EINVAL;
472 	}
473 
474 	sc->sc_default_calib[ucode_type].flow_trigger =
475 	    def_calib->calib.flow_trigger;
476 	sc->sc_default_calib[ucode_type].event_trigger =
477 	    def_calib->calib.event_trigger;
478 
479 	return 0;
480 }
481 
482 static int
483 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
484 			struct iwm_ucode_capabilities *capa)
485 {
486 	const struct iwm_ucode_api *ucode_api = (const void *)data;
487 	uint32_t api_index = le32toh(ucode_api->api_index);
488 	uint32_t api_flags = le32toh(ucode_api->api_flags);
489 	int i;
490 
491 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
492 		device_printf(sc->sc_dev,
493 		    "api flags index %d larger than supported by driver\n",
494 		    api_index);
495 		/* don't return an error so we can load FW that has more bits */
496 		return 0;
497 	}
498 
499 	for (i = 0; i < 32; i++) {
500 		if (api_flags & (1U << i))
501 			setbit(capa->enabled_api, i + 32 * api_index);
502 	}
503 
504 	return 0;
505 }
506 
507 static int
508 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
509 			   struct iwm_ucode_capabilities *capa)
510 {
511 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
512 	uint32_t api_index = le32toh(ucode_capa->api_index);
513 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
514 	int i;
515 
516 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
517 		device_printf(sc->sc_dev,
518 		    "capa flags index %d larger than supported by driver\n",
519 		    api_index);
520 		/* don't return an error so we can load FW that has more bits */
521 		return 0;
522 	}
523 
524 	for (i = 0; i < 32; i++) {
525 		if (api_flags & (1U << i))
526 			setbit(capa->enabled_capa, i + 32 * api_index);
527 	}
528 
529 	return 0;
530 }
531 
532 static void
533 iwm_fw_info_free(struct iwm_fw_info *fw)
534 {
535 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
536 	fw->fw_fp = NULL;
537 	/* don't touch fw->fw_status */
538 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
539 }
540 
541 static int
542 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
543 {
544 	struct iwm_fw_info *fw = &sc->sc_fw;
545 	const struct iwm_tlv_ucode_header *uhdr;
546 	const struct iwm_ucode_tlv *tlv;
547 	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
548 	enum iwm_ucode_tlv_type tlv_type;
549 	const struct firmware *fwp;
550 	const uint8_t *data;
551 	uint32_t tlv_len;
552 	uint32_t usniffer_img;
553 	const uint8_t *tlv_data;
554 	uint32_t paging_mem_size;
555 	int num_of_cpus;
556 	int error = 0;
557 	size_t len;
558 
559 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
560 	    ucode_type != IWM_UCODE_INIT)
561 		return 0;
562 
563 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
564 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
565 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
566 
567 	if (fw->fw_fp != NULL)
568 		iwm_fw_info_free(fw);
569 
570 	/*
571 	 * Load firmware into driver memory.
572 	 * fw_fp will be set.
573 	 */
574 	IWM_UNLOCK(sc);
575 	fwp = firmware_get(sc->cfg->fw_name);
576 	IWM_LOCK(sc);
577 	if (fwp == NULL) {
578 		device_printf(sc->sc_dev,
579 		    "could not read firmware %s (error %d)\n",
580 		    sc->cfg->fw_name, error);
581 		goto out;
582 	}
583 	fw->fw_fp = fwp;
584 
585 	/* (Re-)Initialize default values. */
586 	capa->flags = 0;
587 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
588 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
589 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
590 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
591 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
592 
593 	/*
594 	 * Parse firmware contents
595 	 */
596 
597 	uhdr = (const void *)fw->fw_fp->data;
598 	if (*(const uint32_t *)fw->fw_fp->data != 0
599 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
600 		device_printf(sc->sc_dev, "invalid firmware %s\n",
601 		    sc->cfg->fw_name);
602 		error = EINVAL;
603 		goto out;
604 	}
605 
606 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
607 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
608 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
609 	    IWM_UCODE_API(le32toh(uhdr->ver)));
610 	data = uhdr->data;
611 	len = fw->fw_fp->datasize - sizeof(*uhdr);
612 
613 	while (len >= sizeof(*tlv)) {
614 		len -= sizeof(*tlv);
615 		tlv = (const void *)data;
616 
617 		tlv_len = le32toh(tlv->length);
618 		tlv_type = le32toh(tlv->type);
619 		tlv_data = tlv->data;
620 
621 		if (len < tlv_len) {
622 			device_printf(sc->sc_dev,
623 			    "firmware too short: %zu bytes\n",
624 			    len);
625 			error = EINVAL;
626 			goto parse_out;
627 		}
628 		len -= roundup2(tlv_len, 4);
629 		data += sizeof(tlv) + roundup2(tlv_len, 4);
630 
631 		switch ((int)tlv_type) {
632 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
633 			if (tlv_len != sizeof(uint32_t)) {
634 				device_printf(sc->sc_dev,
635 				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
636 				    __func__,
637 				    (int) tlv_len);
638 				error = EINVAL;
639 				goto parse_out;
640 			}
641 			capa->max_probe_length =
642 			    le32_to_cpup((const uint32_t *)tlv_data);
643 			/* limit it to something sensible */
644 			if (capa->max_probe_length >
645 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
646 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
647 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
648 				    "ridiculous\n", __func__);
649 				error = EINVAL;
650 				goto parse_out;
651 			}
652 			break;
653 		case IWM_UCODE_TLV_PAN:
654 			if (tlv_len) {
655 				device_printf(sc->sc_dev,
656 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
657 				    __func__,
658 				    (int) tlv_len);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
663 			break;
664 		case IWM_UCODE_TLV_FLAGS:
665 			if (tlv_len < sizeof(uint32_t)) {
666 				device_printf(sc->sc_dev,
667 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
668 				    __func__,
669 				    (int) tlv_len);
670 				error = EINVAL;
671 				goto parse_out;
672 			}
673 			if (tlv_len % sizeof(uint32_t)) {
674 				device_printf(sc->sc_dev,
675 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
676 				    __func__,
677 				    (int) tlv_len);
678 				error = EINVAL;
679 				goto parse_out;
680 			}
681 			/*
682 			 * Apparently there can be many flags, but Linux driver
683 			 * parses only the first one, and so do we.
684 			 *
685 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
686 			 * Intentional or a bug?  Observations from
687 			 * current firmware file:
688 			 *  1) TLV_PAN is parsed first
689 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
690 			 * ==> this resets TLV_PAN to itself... hnnnk
691 			 */
692 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
693 			break;
694 		case IWM_UCODE_TLV_CSCHEME:
695 			if ((error = iwm_store_cscheme(sc,
696 			    tlv_data, tlv_len)) != 0) {
697 				device_printf(sc->sc_dev,
698 				    "%s: iwm_store_cscheme(): returned %d\n",
699 				    __func__,
700 				    error);
701 				goto parse_out;
702 			}
703 			break;
704 		case IWM_UCODE_TLV_NUM_OF_CPU:
705 			if (tlv_len != sizeof(uint32_t)) {
706 				device_printf(sc->sc_dev,
707 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
708 				    __func__,
709 				    (int) tlv_len);
710 				error = EINVAL;
711 				goto parse_out;
712 			}
713 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
714 			if (num_of_cpus == 2) {
715 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
716 					TRUE;
717 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
718 					TRUE;
719 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
720 					TRUE;
721 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
722 				device_printf(sc->sc_dev,
723 				    "%s: Driver supports only 1 or 2 CPUs\n",
724 				    __func__);
725 				error = EINVAL;
726 				goto parse_out;
727 			}
728 			break;
729 		case IWM_UCODE_TLV_SEC_RT:
730 			if ((error = iwm_firmware_store_section(sc,
731 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
732 				device_printf(sc->sc_dev,
733 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
734 				    __func__,
735 				    error);
736 				goto parse_out;
737 			}
738 			break;
739 		case IWM_UCODE_TLV_SEC_INIT:
740 			if ((error = iwm_firmware_store_section(sc,
741 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
742 				device_printf(sc->sc_dev,
743 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
744 				    __func__,
745 				    error);
746 				goto parse_out;
747 			}
748 			break;
749 		case IWM_UCODE_TLV_SEC_WOWLAN:
750 			if ((error = iwm_firmware_store_section(sc,
751 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
752 				device_printf(sc->sc_dev,
753 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
754 				    __func__,
755 				    error);
756 				goto parse_out;
757 			}
758 			break;
759 		case IWM_UCODE_TLV_DEF_CALIB:
760 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
761 				device_printf(sc->sc_dev,
762 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
763 				    __func__,
764 				    (int) tlv_len,
765 				    (int) sizeof(struct iwm_tlv_calib_data));
766 				error = EINVAL;
767 				goto parse_out;
768 			}
769 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
770 				device_printf(sc->sc_dev,
771 				    "%s: iwm_set_default_calib() failed: %d\n",
772 				    __func__,
773 				    error);
774 				goto parse_out;
775 			}
776 			break;
777 		case IWM_UCODE_TLV_PHY_SKU:
778 			if (tlv_len != sizeof(uint32_t)) {
779 				error = EINVAL;
780 				device_printf(sc->sc_dev,
781 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
782 				    __func__,
783 				    (int) tlv_len);
784 				goto parse_out;
785 			}
786 			sc->sc_fw.phy_config =
787 			    le32_to_cpup((const uint32_t *)tlv_data);
788 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
790 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
791 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
793 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
794 			break;
795 
796 		case IWM_UCODE_TLV_API_CHANGES_SET: {
797 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
798 				error = EINVAL;
799 				goto parse_out;
800 			}
801 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802 				error = EINVAL;
803 				goto parse_out;
804 			}
805 			break;
806 		}
807 
808 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810 				error = EINVAL;
811 				goto parse_out;
812 			}
813 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814 				error = EINVAL;
815 				goto parse_out;
816 			}
817 			break;
818 		}
819 
820 		case 48: /* undocumented TLV */
821 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823 			/* ignore, not used by current driver */
824 			break;
825 
826 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827 			if ((error = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829 			    tlv_len)) != 0)
830 				goto parse_out;
831 			break;
832 
833 		case IWM_UCODE_TLV_PAGING:
834 			if (tlv_len != sizeof(uint32_t)) {
835 				error = EINVAL;
836 				goto parse_out;
837 			}
838 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
839 
840 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841 			    "%s: Paging: paging enabled (size = %u bytes)\n",
842 			    __func__, paging_mem_size);
843 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844 				device_printf(sc->sc_dev,
845 					"%s: Paging: driver supports up to %u bytes for paging image\n",
846 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
847 				error = EINVAL;
848 				goto out;
849 			}
850 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851 				device_printf(sc->sc_dev,
852 				    "%s: Paging: image isn't multiple %u\n",
853 				    __func__, IWM_FW_PAGING_SIZE);
854 				error = EINVAL;
855 				goto out;
856 			}
857 
858 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
859 			    paging_mem_size;
860 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
862 			    paging_mem_size;
863 			break;
864 
865 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866 			if (tlv_len != sizeof(uint32_t)) {
867 				error = EINVAL;
868 				goto parse_out;
869 			}
870 			capa->n_scan_channels =
871 			    le32_to_cpup((const uint32_t *)tlv_data);
872 			break;
873 
874 		case IWM_UCODE_TLV_FW_VERSION:
875 			if (tlv_len != sizeof(uint32_t) * 3) {
876 				error = EINVAL;
877 				goto parse_out;
878 			}
879 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880 			    "%d.%d.%d",
881 			    le32toh(((const uint32_t *)tlv_data)[0]),
882 			    le32toh(((const uint32_t *)tlv_data)[1]),
883 			    le32toh(((const uint32_t *)tlv_data)[2]));
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			device_printf(sc->sc_dev,
891 			    "%s: unknown firmware section %d, abort\n",
892 			    __func__, tlv_type);
893 			error = EINVAL;
894 			goto parse_out;
895 		}
896 	}
897 
898 	KASSERT(error == 0, ("unhandled error"));
899 
900  parse_out:
901 	if (error) {
902 		device_printf(sc->sc_dev, "firmware parse error %d, "
903 		    "section type %d\n", error, tlv_type);
904 	}
905 
906  out:
907 	if (error) {
908 		fw->fw_status = IWM_FW_STATUS_NONE;
909 		if (fw->fw_fp != NULL)
910 			iwm_fw_info_free(fw);
911 	} else
912 		fw->fw_status = IWM_FW_STATUS_DONE;
913 	wakeup(&sc->sc_fw);
914 
915 	return error;
916 }
917 
918 /*
919  * DMA resource routines
920  */
921 
922 /* fwmem is used to load firmware onto the card */
923 static int
924 iwm_alloc_fwmem(struct iwm_softc *sc)
925 {
926 	/* Must be aligned on a 16-byte boundary. */
927 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
928 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
929 }
930 
931 /* tx scheduler rings.  not used? */
932 static int
933 iwm_alloc_sched(struct iwm_softc *sc)
934 {
935 	/* TX scheduler rings must be aligned on a 1KB boundary. */
936 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
937 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
938 }
939 
940 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
941 static int
942 iwm_alloc_kw(struct iwm_softc *sc)
943 {
944 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
945 }
946 
947 /* interrupt cause table */
948 static int
949 iwm_alloc_ict(struct iwm_softc *sc)
950 {
951 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
952 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
953 }
954 
955 static int
956 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
957 {
958 	bus_size_t size;
959 	int i, error;
960 
961 	ring->cur = 0;
962 
963 	/* Allocate RX descriptors (256-byte aligned). */
964 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
965 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
966 	if (error != 0) {
967 		device_printf(sc->sc_dev,
968 		    "could not allocate RX ring DMA memory\n");
969 		goto fail;
970 	}
971 	ring->desc = ring->desc_dma.vaddr;
972 
973 	/* Allocate RX status area (16-byte aligned). */
974 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
975 	    sizeof(*ring->stat), 16);
976 	if (error != 0) {
977 		device_printf(sc->sc_dev,
978 		    "could not allocate RX status DMA memory\n");
979 		goto fail;
980 	}
981 	ring->stat = ring->stat_dma.vaddr;
982 
983         /* Create RX buffer DMA tag. */
984         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
985             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
986             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
987         if (error != 0) {
988                 device_printf(sc->sc_dev,
989                     "%s: could not create RX buf DMA tag, error %d\n",
990                     __func__, error);
991                 goto fail;
992         }
993 
994 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
995 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
996 	if (error != 0) {
997 		device_printf(sc->sc_dev,
998 		    "%s: could not create RX buf DMA map, error %d\n",
999 		    __func__, error);
1000 		goto fail;
1001 	}
1002 	/*
1003 	 * Allocate and map RX buffers.
1004 	 */
1005 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1006 		struct iwm_rx_data *data = &ring->data[i];
1007 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1008 		if (error != 0) {
1009 			device_printf(sc->sc_dev,
1010 			    "%s: could not create RX buf DMA map, error %d\n",
1011 			    __func__, error);
1012 			goto fail;
1013 		}
1014 		data->m = NULL;
1015 
1016 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1017 			goto fail;
1018 		}
1019 	}
1020 	return 0;
1021 
1022 fail:	iwm_free_rx_ring(sc, ring);
1023 	return error;
1024 }
1025 
1026 static void
1027 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1028 {
1029 	/* Reset the ring state */
1030 	ring->cur = 0;
1031 
1032 	/*
1033 	 * The hw rx ring index in shared memory must also be cleared,
1034 	 * otherwise the discrepancy can cause reprocessing chaos.
1035 	 */
1036 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1037 }
1038 
1039 static void
1040 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1041 {
1042 	int i;
1043 
1044 	iwm_dma_contig_free(&ring->desc_dma);
1045 	iwm_dma_contig_free(&ring->stat_dma);
1046 
1047 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1048 		struct iwm_rx_data *data = &ring->data[i];
1049 
1050 		if (data->m != NULL) {
1051 			bus_dmamap_sync(ring->data_dmat, data->map,
1052 			    BUS_DMASYNC_POSTREAD);
1053 			bus_dmamap_unload(ring->data_dmat, data->map);
1054 			m_freem(data->m);
1055 			data->m = NULL;
1056 		}
1057 		if (data->map != NULL) {
1058 			bus_dmamap_destroy(ring->data_dmat, data->map);
1059 			data->map = NULL;
1060 		}
1061 	}
1062 	if (ring->spare_map != NULL) {
1063 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1064 		ring->spare_map = NULL;
1065 	}
1066 	if (ring->data_dmat != NULL) {
1067 		bus_dma_tag_destroy(ring->data_dmat);
1068 		ring->data_dmat = NULL;
1069 	}
1070 }
1071 
1072 static int
1073 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1074 {
1075 	bus_addr_t paddr;
1076 	bus_size_t size;
1077 	size_t maxsize;
1078 	int nsegments;
1079 	int i, error;
1080 
1081 	ring->qid = qid;
1082 	ring->queued = 0;
1083 	ring->cur = 0;
1084 
1085 	/* Allocate TX descriptors (256-byte aligned). */
1086 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1087 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1088 	if (error != 0) {
1089 		device_printf(sc->sc_dev,
1090 		    "could not allocate TX ring DMA memory\n");
1091 		goto fail;
1092 	}
1093 	ring->desc = ring->desc_dma.vaddr;
1094 
1095 	/*
1096 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1097 	 * to allocate commands space for other rings.
1098 	 */
1099 	if (qid > IWM_MVM_CMD_QUEUE)
1100 		return 0;
1101 
1102 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1103 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1104 	if (error != 0) {
1105 		device_printf(sc->sc_dev,
1106 		    "could not allocate TX cmd DMA memory\n");
1107 		goto fail;
1108 	}
1109 	ring->cmd = ring->cmd_dma.vaddr;
1110 
1111 	/* FW commands may require more mapped space than packets. */
1112 	if (qid == IWM_MVM_CMD_QUEUE) {
1113 		maxsize = IWM_RBUF_SIZE;
1114 		nsegments = 1;
1115 	} else {
1116 		maxsize = MCLBYTES;
1117 		nsegments = IWM_MAX_SCATTER - 2;
1118 	}
1119 
1120 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1121 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1122             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1123 	if (error != 0) {
1124 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1125 		goto fail;
1126 	}
1127 
1128 	paddr = ring->cmd_dma.paddr;
1129 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1130 		struct iwm_tx_data *data = &ring->data[i];
1131 
1132 		data->cmd_paddr = paddr;
1133 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1134 		    + offsetof(struct iwm_tx_cmd, scratch);
1135 		paddr += sizeof(struct iwm_device_cmd);
1136 
1137 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1138 		if (error != 0) {
1139 			device_printf(sc->sc_dev,
1140 			    "could not create TX buf DMA map\n");
1141 			goto fail;
1142 		}
1143 	}
1144 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1145 	    ("invalid physical address"));
1146 	return 0;
1147 
1148 fail:	iwm_free_tx_ring(sc, ring);
1149 	return error;
1150 }
1151 
1152 static void
1153 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1154 {
1155 	int i;
1156 
1157 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1158 		struct iwm_tx_data *data = &ring->data[i];
1159 
1160 		if (data->m != NULL) {
1161 			bus_dmamap_sync(ring->data_dmat, data->map,
1162 			    BUS_DMASYNC_POSTWRITE);
1163 			bus_dmamap_unload(ring->data_dmat, data->map);
1164 			m_freem(data->m);
1165 			data->m = NULL;
1166 		}
1167 	}
1168 	/* Clear TX descriptors. */
1169 	memset(ring->desc, 0, ring->desc_dma.size);
1170 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1171 	    BUS_DMASYNC_PREWRITE);
1172 	sc->qfullmsk &= ~(1 << ring->qid);
1173 	ring->queued = 0;
1174 	ring->cur = 0;
1175 
1176 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1177 		iwm_pcie_clear_cmd_in_flight(sc);
1178 }
1179 
1180 static void
1181 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1182 {
1183 	int i;
1184 
1185 	iwm_dma_contig_free(&ring->desc_dma);
1186 	iwm_dma_contig_free(&ring->cmd_dma);
1187 
1188 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1189 		struct iwm_tx_data *data = &ring->data[i];
1190 
1191 		if (data->m != NULL) {
1192 			bus_dmamap_sync(ring->data_dmat, data->map,
1193 			    BUS_DMASYNC_POSTWRITE);
1194 			bus_dmamap_unload(ring->data_dmat, data->map);
1195 			m_freem(data->m);
1196 			data->m = NULL;
1197 		}
1198 		if (data->map != NULL) {
1199 			bus_dmamap_destroy(ring->data_dmat, data->map);
1200 			data->map = NULL;
1201 		}
1202 	}
1203 	if (ring->data_dmat != NULL) {
1204 		bus_dma_tag_destroy(ring->data_dmat);
1205 		ring->data_dmat = NULL;
1206 	}
1207 }
1208 
1209 /*
1210  * High-level hardware frobbing routines
1211  */
1212 
1213 static void
1214 iwm_enable_interrupts(struct iwm_softc *sc)
1215 {
1216 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1217 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1218 }
1219 
1220 static void
1221 iwm_restore_interrupts(struct iwm_softc *sc)
1222 {
1223 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1224 }
1225 
1226 static void
1227 iwm_disable_interrupts(struct iwm_softc *sc)
1228 {
1229 	/* disable interrupts */
1230 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1231 
1232 	/* acknowledge all interrupts */
1233 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1234 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1235 }
1236 
1237 static void
1238 iwm_ict_reset(struct iwm_softc *sc)
1239 {
1240 	iwm_disable_interrupts(sc);
1241 
1242 	/* Reset ICT table. */
1243 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1244 	sc->ict_cur = 0;
1245 
1246 	/* Set physical address of ICT table (4KB aligned). */
1247 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1248 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1249 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1250 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1251 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1252 
1253 	/* Switch to ICT interrupt mode in driver. */
1254 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1255 
1256 	/* Re-enable interrupts. */
1257 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1258 	iwm_enable_interrupts(sc);
1259 }
1260 
1261 /* iwlwifi pcie/trans.c */
1262 
1263 /*
1264  * Since this .. hard-resets things, it's time to actually
1265  * mark the first vap (if any) as having no mac context.
1266  * It's annoying, but since the driver is potentially being
1267  * stop/start'ed whilst active (thanks openbsd port!) we
1268  * have to correctly track this.
1269  */
1270 static void
1271 iwm_stop_device(struct iwm_softc *sc)
1272 {
1273 	struct ieee80211com *ic = &sc->sc_ic;
1274 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1275 	int chnl, qid;
1276 	uint32_t mask = 0;
1277 
1278 	/* tell the device to stop sending interrupts */
1279 	iwm_disable_interrupts(sc);
1280 
1281 	/*
1282 	 * FreeBSD-local: mark the first vap as not-uploaded,
1283 	 * so the next transition through auth/assoc
1284 	 * will correctly populate the MAC context.
1285 	 */
1286 	if (vap) {
1287 		struct iwm_vap *iv = IWM_VAP(vap);
1288 		iv->phy_ctxt = NULL;
1289 		iv->is_uploaded = 0;
1290 	}
1291 
1292 	/* device going down, Stop using ICT table */
1293 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1294 
1295 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1296 
1297 	if (iwm_nic_lock(sc)) {
1298 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1299 
1300 		/* Stop each Tx DMA channel */
1301 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1302 			IWM_WRITE(sc,
1303 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1304 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1305 		}
1306 
1307 		/* Wait for DMA channels to be idle */
1308 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1309 		    5000)) {
1310 			device_printf(sc->sc_dev,
1311 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1312 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1313 		}
1314 		iwm_nic_unlock(sc);
1315 	}
1316 	iwm_pcie_rx_stop(sc);
1317 
1318 	/* Stop RX ring. */
1319 	iwm_reset_rx_ring(sc, &sc->rxq);
1320 
1321 	/* Reset all TX rings. */
1322 	for (qid = 0; qid < nitems(sc->txq); qid++)
1323 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1324 
1325 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1326 		/* Power-down device's busmaster DMA clocks */
1327 		if (iwm_nic_lock(sc)) {
1328 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1329 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1330 			iwm_nic_unlock(sc);
1331 		}
1332 		DELAY(5);
1333 	}
1334 
1335 	/* Make sure (redundant) we've released our request to stay awake */
1336 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1337 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1338 
1339 	/* Stop the device, and put it in low power state */
1340 	iwm_apm_stop(sc);
1341 
1342 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1343 	 * Clean again the interrupt here
1344 	 */
1345 	iwm_disable_interrupts(sc);
1346 	/* stop and reset the on-board processor */
1347 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1348 
1349 	/*
1350 	 * Even if we stop the HW, we still want the RF kill
1351 	 * interrupt
1352 	 */
1353 	iwm_enable_rfkill_int(sc);
1354 	iwm_check_rfkill(sc);
1355 }
1356 
1357 /* iwlwifi: mvm/ops.c */
1358 static void
1359 iwm_mvm_nic_config(struct iwm_softc *sc)
1360 {
1361 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1362 	uint32_t reg_val = 0;
1363 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1364 
1365 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371 
1372 	/* SKU control */
1373 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377 
1378 	/* radio configuration */
1379 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382 
1383 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1384 
1385 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1386 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1387 	    radio_cfg_step, radio_cfg_dash);
1388 
1389 	/*
1390 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1391 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1392 	 * to lose ownership and not being able to obtain it back.
1393 	 */
1394 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1395 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1396 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1397 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1398 	}
1399 }
1400 
1401 static int
1402 iwm_nic_rx_init(struct iwm_softc *sc)
1403 {
1404 	/*
1405 	 * Initialize RX ring.  This is from the iwn driver.
1406 	 */
1407 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1408 
1409 	/* Stop Rx DMA */
1410 	iwm_pcie_rx_stop(sc);
1411 
1412 	if (!iwm_nic_lock(sc))
1413 		return EBUSY;
1414 
1415 	/* reset and flush pointers */
1416 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1417 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1418 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1419 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1420 
1421 	/* Set physical address of RX ring (256-byte aligned). */
1422 	IWM_WRITE(sc,
1423 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1424 
1425 	/* Set physical address of RX status (16-byte aligned). */
1426 	IWM_WRITE(sc,
1427 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1428 
1429 	/* Enable Rx DMA
1430 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1431 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1432 	 *      the credit mechanism in 5000 HW RX FIFO
1433 	 * Direct rx interrupts to hosts
1434 	 * Rx buffer size 4 or 8k or 12k
1435 	 * RB timeout 0x10
1436 	 * 256 RBDs
1437 	 */
1438 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1439 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1440 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1441 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1442 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1443 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1444 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1445 
1446 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1447 
1448 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1449 	if (sc->cfg->host_interrupt_operation_mode)
1450 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1451 
1452 	/*
1453 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1454 	 *
1455 	 * This value should initially be 0 (before preparing any
1456 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1457 	 */
1458 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1459 
1460 	iwm_nic_unlock(sc);
1461 
1462 	return 0;
1463 }
1464 
1465 static int
1466 iwm_nic_tx_init(struct iwm_softc *sc)
1467 {
1468 	int qid;
1469 
1470 	if (!iwm_nic_lock(sc))
1471 		return EBUSY;
1472 
1473 	/* Deactivate TX scheduler. */
1474 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1475 
1476 	/* Set physical address of "keep warm" page (16-byte aligned). */
1477 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1478 
1479 	/* Initialize TX rings. */
1480 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1481 		struct iwm_tx_ring *txq = &sc->txq[qid];
1482 
1483 		/* Set physical address of TX ring (256-byte aligned). */
1484 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1485 		    txq->desc_dma.paddr >> 8);
1486 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1487 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1488 		    __func__,
1489 		    qid, txq->desc,
1490 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1491 	}
1492 
1493 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1494 
1495 	iwm_nic_unlock(sc);
1496 
1497 	return 0;
1498 }
1499 
1500 static int
1501 iwm_nic_init(struct iwm_softc *sc)
1502 {
1503 	int error;
1504 
1505 	iwm_apm_init(sc);
1506 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1507 		iwm_set_pwr(sc);
1508 
1509 	iwm_mvm_nic_config(sc);
1510 
1511 	if ((error = iwm_nic_rx_init(sc)) != 0)
1512 		return error;
1513 
1514 	/*
1515 	 * Ditto for TX, from iwn
1516 	 */
1517 	if ((error = iwm_nic_tx_init(sc)) != 0)
1518 		return error;
1519 
1520 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1521 	    "%s: shadow registers enabled\n", __func__);
1522 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1523 
1524 	return 0;
1525 }
1526 
1527 int
1528 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1529 {
1530 	if (!iwm_nic_lock(sc)) {
1531 		device_printf(sc->sc_dev,
1532 		    "%s: cannot enable txq %d\n",
1533 		    __func__,
1534 		    qid);
1535 		return EBUSY;
1536 	}
1537 
1538 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1539 
1540 	if (qid == IWM_MVM_CMD_QUEUE) {
1541 		/* unactivate before configuration */
1542 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1543 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1544 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1545 
1546 		iwm_nic_unlock(sc);
1547 
1548 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1549 
1550 		if (!iwm_nic_lock(sc)) {
1551 			device_printf(sc->sc_dev,
1552 			    "%s: cannot enable txq %d\n", __func__, qid);
1553 			return EBUSY;
1554 		}
1555 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1556 		iwm_nic_unlock(sc);
1557 
1558 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1559 		/* Set scheduler window size and frame limit. */
1560 		iwm_write_mem32(sc,
1561 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1562 		    sizeof(uint32_t),
1563 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1564 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1565 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1566 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1567 
1568 		if (!iwm_nic_lock(sc)) {
1569 			device_printf(sc->sc_dev,
1570 			    "%s: cannot enable txq %d\n", __func__, qid);
1571 			return EBUSY;
1572 		}
1573 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1574 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1575 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1576 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1577 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1578 	} else {
1579 		struct iwm_scd_txq_cfg_cmd cmd;
1580 		int error;
1581 
1582 		iwm_nic_unlock(sc);
1583 
1584 		memset(&cmd, 0, sizeof(cmd));
1585 		cmd.scd_queue = qid;
1586 		cmd.enable = 1;
1587 		cmd.sta_id = sta_id;
1588 		cmd.tx_fifo = fifo;
1589 		cmd.aggregate = 0;
1590 		cmd.window = IWM_FRAME_LIMIT;
1591 
1592 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1593 		    sizeof(cmd), &cmd);
1594 		if (error) {
1595 			device_printf(sc->sc_dev,
1596 			    "cannot enable txq %d\n", qid);
1597 			return error;
1598 		}
1599 
1600 		if (!iwm_nic_lock(sc))
1601 			return EBUSY;
1602 	}
1603 
1604 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1605 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1606 
1607 	iwm_nic_unlock(sc);
1608 
1609 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1610 	    __func__, qid, fifo);
1611 
1612 	return 0;
1613 }
1614 
1615 static int
1616 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1617 {
1618 	int error, chnl;
1619 
1620 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1621 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1622 
1623 	if (!iwm_nic_lock(sc))
1624 		return EBUSY;
1625 
1626 	iwm_ict_reset(sc);
1627 
1628 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1629 	if (scd_base_addr != 0 &&
1630 	    scd_base_addr != sc->scd_base_addr) {
1631 		device_printf(sc->sc_dev,
1632 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1633 		    __func__, sc->scd_base_addr, scd_base_addr);
1634 	}
1635 
1636 	iwm_nic_unlock(sc);
1637 
1638 	/* reset context data, TX status and translation data */
1639 	error = iwm_write_mem(sc,
1640 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1641 	    NULL, clear_dwords);
1642 	if (error)
1643 		return EBUSY;
1644 
1645 	if (!iwm_nic_lock(sc))
1646 		return EBUSY;
1647 
1648 	/* Set physical address of TX scheduler rings (1KB aligned). */
1649 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1650 
1651 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1652 
1653 	iwm_nic_unlock(sc);
1654 
1655 	/* enable command channel */
1656 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1657 	if (error)
1658 		return error;
1659 
1660 	if (!iwm_nic_lock(sc))
1661 		return EBUSY;
1662 
1663 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1664 
1665 	/* Enable DMA channels. */
1666 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1667 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1668 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1669 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1670 	}
1671 
1672 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1673 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1674 
1675 	iwm_nic_unlock(sc);
1676 
1677 	/* Enable L1-Active */
1678 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1679 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1680 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1681 	}
1682 
1683 	return error;
1684 }
1685 
1686 /*
1687  * NVM read access and content parsing.  We do not support
1688  * external NVM or writing NVM.
1689  * iwlwifi/mvm/nvm.c
1690  */
1691 
1692 /* Default NVM size to read */
1693 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1694 
1695 #define IWM_NVM_WRITE_OPCODE 1
1696 #define IWM_NVM_READ_OPCODE 0
1697 
1698 /* load nvm chunk response */
1699 enum {
1700 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1701 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1702 };
1703 
1704 static int
1705 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1706 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1707 {
1708 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1709 		.offset = htole16(offset),
1710 		.length = htole16(length),
1711 		.type = htole16(section),
1712 		.op_code = IWM_NVM_READ_OPCODE,
1713 	};
1714 	struct iwm_nvm_access_resp *nvm_resp;
1715 	struct iwm_rx_packet *pkt;
1716 	struct iwm_host_cmd cmd = {
1717 		.id = IWM_NVM_ACCESS_CMD,
1718 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1719 		.data = { &nvm_access_cmd, },
1720 	};
1721 	int ret, bytes_read, offset_read;
1722 	uint8_t *resp_data;
1723 
1724 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1725 
1726 	ret = iwm_send_cmd(sc, &cmd);
1727 	if (ret) {
1728 		device_printf(sc->sc_dev,
1729 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1730 		return ret;
1731 	}
1732 
1733 	pkt = cmd.resp_pkt;
1734 
1735 	/* Extract NVM response */
1736 	nvm_resp = (void *)pkt->data;
1737 	ret = le16toh(nvm_resp->status);
1738 	bytes_read = le16toh(nvm_resp->length);
1739 	offset_read = le16toh(nvm_resp->offset);
1740 	resp_data = nvm_resp->data;
1741 	if (ret) {
1742 		if ((offset != 0) &&
1743 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1744 			/*
1745 			 * meaning of NOT_VALID_ADDRESS:
1746 			 * driver try to read chunk from address that is
1747 			 * multiple of 2K and got an error since addr is empty.
1748 			 * meaning of (offset != 0): driver already
1749 			 * read valid data from another chunk so this case
1750 			 * is not an error.
1751 			 */
1752 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1753 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1754 				    offset);
1755 			*len = 0;
1756 			ret = 0;
1757 		} else {
1758 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1759 				    "NVM access command failed with status %d\n", ret);
1760 			ret = EIO;
1761 		}
1762 		goto exit;
1763 	}
1764 
1765 	if (offset_read != offset) {
1766 		device_printf(sc->sc_dev,
1767 		    "NVM ACCESS response with invalid offset %d\n",
1768 		    offset_read);
1769 		ret = EINVAL;
1770 		goto exit;
1771 	}
1772 
1773 	if (bytes_read > length) {
1774 		device_printf(sc->sc_dev,
1775 		    "NVM ACCESS response with too much data "
1776 		    "(%d bytes requested, %d bytes received)\n",
1777 		    length, bytes_read);
1778 		ret = EINVAL;
1779 		goto exit;
1780 	}
1781 
1782 	/* Write data to NVM */
1783 	memcpy(data + offset, resp_data, bytes_read);
1784 	*len = bytes_read;
1785 
1786  exit:
1787 	iwm_free_resp(sc, &cmd);
1788 	return ret;
1789 }
1790 
1791 /*
1792  * Reads an NVM section completely.
1793  * NICs prior to 7000 family don't have a real NVM, but just read
1794  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1795  * by uCode, we need to manually check in this case that we don't
1796  * overflow and try to read more than the EEPROM size.
1797  * For 7000 family NICs, we supply the maximal size we can read, and
1798  * the uCode fills the response with as much data as we can,
1799  * without overflowing, so no check is needed.
1800  */
1801 static int
1802 iwm_nvm_read_section(struct iwm_softc *sc,
1803 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1804 {
1805 	uint16_t seglen, length, offset = 0;
1806 	int ret;
1807 
1808 	/* Set nvm section read length */
1809 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1810 
1811 	seglen = length;
1812 
1813 	/* Read the NVM until exhausted (reading less than requested) */
1814 	while (seglen == length) {
1815 		/* Check no memory assumptions fail and cause an overflow */
1816 		if ((size_read + offset + length) >
1817 		    sc->cfg->eeprom_size) {
1818 			device_printf(sc->sc_dev,
1819 			    "EEPROM size is too small for NVM\n");
1820 			return ENOBUFS;
1821 		}
1822 
1823 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1824 		if (ret) {
1825 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1826 				    "Cannot read NVM from section %d offset %d, length %d\n",
1827 				    section, offset, length);
1828 			return ret;
1829 		}
1830 		offset += seglen;
1831 	}
1832 
1833 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1834 		    "NVM section %d read completed\n", section);
1835 	*len = offset;
1836 	return 0;
1837 }
1838 
1839 /*
1840  * BEGIN IWM_NVM_PARSE
1841  */
1842 
1843 /* iwlwifi/iwl-nvm-parse.c */
1844 
1845 /* NVM offsets (in words) definitions */
1846 enum iwm_nvm_offsets {
1847 	/* NVM HW-Section offset (in words) definitions */
1848 	IWM_HW_ADDR = 0x15,
1849 
1850 /* NVM SW-Section offset (in words) definitions */
1851 	IWM_NVM_SW_SECTION = 0x1C0,
1852 	IWM_NVM_VERSION = 0,
1853 	IWM_RADIO_CFG = 1,
1854 	IWM_SKU = 2,
1855 	IWM_N_HW_ADDRS = 3,
1856 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1857 
1858 /* NVM calibration section offset (in words) definitions */
1859 	IWM_NVM_CALIB_SECTION = 0x2B8,
1860 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1861 };
1862 
1863 enum iwm_8000_nvm_offsets {
1864 	/* NVM HW-Section offset (in words) definitions */
1865 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1866 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1867 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1868 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1869 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1870 
1871 	/* NVM SW-Section offset (in words) definitions */
1872 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1873 	IWM_NVM_VERSION_8000 = 0,
1874 	IWM_RADIO_CFG_8000 = 0,
1875 	IWM_SKU_8000 = 2,
1876 	IWM_N_HW_ADDRS_8000 = 3,
1877 
1878 	/* NVM REGULATORY -Section offset (in words) definitions */
1879 	IWM_NVM_CHANNELS_8000 = 0,
1880 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1881 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1882 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1883 
1884 	/* NVM calibration section offset (in words) definitions */
1885 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1886 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1887 };
1888 
1889 /* SKU Capabilities (actual values from NVM definition) */
1890 enum nvm_sku_bits {
1891 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1892 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1893 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1894 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1895 };
1896 
1897 /* radio config bits (actual values from NVM definition) */
1898 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1899 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1900 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1901 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1902 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1903 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1904 
1905 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1906 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1907 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1908 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1909 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1910 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1911 
1912 #define DEFAULT_MAX_TX_POWER 16
1913 
1914 /**
1915  * enum iwm_nvm_channel_flags - channel flags in NVM
1916  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1917  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1918  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1919  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1920  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1921  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1922  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1923  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1924  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1925  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1926  */
1927 enum iwm_nvm_channel_flags {
1928 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1929 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1930 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1931 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1932 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1933 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1934 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1935 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1936 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1937 };
1938 
1939 /*
1940  * Translate EEPROM flags to net80211.
1941  */
1942 static uint32_t
1943 iwm_eeprom_channel_flags(uint16_t ch_flags)
1944 {
1945 	uint32_t nflags;
1946 
1947 	nflags = 0;
1948 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1949 		nflags |= IEEE80211_CHAN_PASSIVE;
1950 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1951 		nflags |= IEEE80211_CHAN_NOADHOC;
1952 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1953 		nflags |= IEEE80211_CHAN_DFS;
1954 		/* Just in case. */
1955 		nflags |= IEEE80211_CHAN_NOADHOC;
1956 	}
1957 
1958 	return (nflags);
1959 }
1960 
1961 static void
1962 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1963     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1964     const uint8_t bands[])
1965 {
1966 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1967 	uint32_t nflags;
1968 	uint16_t ch_flags;
1969 	uint8_t ieee;
1970 	int error;
1971 
1972 	for (; ch_idx < ch_num; ch_idx++) {
1973 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1974 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1975 			ieee = iwm_nvm_channels[ch_idx];
1976 		else
1977 			ieee = iwm_nvm_channels_8000[ch_idx];
1978 
1979 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1980 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1981 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1982 			    ieee, ch_flags,
1983 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1984 			    "5.2" : "2.4");
1985 			continue;
1986 		}
1987 
1988 		nflags = iwm_eeprom_channel_flags(ch_flags);
1989 		error = ieee80211_add_channel(chans, maxchans, nchans,
1990 		    ieee, 0, 0, nflags, bands);
1991 		if (error != 0)
1992 			break;
1993 
1994 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1995 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1996 		    ieee, ch_flags,
1997 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1998 		    "5.2" : "2.4");
1999 	}
2000 }
2001 
2002 static void
2003 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2004     struct ieee80211_channel chans[])
2005 {
2006 	struct iwm_softc *sc = ic->ic_softc;
2007 	struct iwm_nvm_data *data = sc->nvm_data;
2008 	uint8_t bands[IEEE80211_MODE_BYTES];
2009 	size_t ch_num;
2010 
2011 	memset(bands, 0, sizeof(bands));
2012 	/* 1-13: 11b/g channels. */
2013 	setbit(bands, IEEE80211_MODE_11B);
2014 	setbit(bands, IEEE80211_MODE_11G);
2015 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2016 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2017 
2018 	/* 14: 11b channel only. */
2019 	clrbit(bands, IEEE80211_MODE_11G);
2020 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2021 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2022 
2023 	if (data->sku_cap_band_52GHz_enable) {
2024 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2025 			ch_num = nitems(iwm_nvm_channels);
2026 		else
2027 			ch_num = nitems(iwm_nvm_channels_8000);
2028 		memset(bands, 0, sizeof(bands));
2029 		setbit(bands, IEEE80211_MODE_11A);
2030 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2031 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2032 	}
2033 }
2034 
2035 static void
2036 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2037 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2038 {
2039 	const uint8_t *hw_addr;
2040 
2041 	if (mac_override) {
2042 		static const uint8_t reserved_mac[] = {
2043 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2044 		};
2045 
2046 		hw_addr = (const uint8_t *)(mac_override +
2047 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2048 
2049 		/*
2050 		 * Store the MAC address from MAO section.
2051 		 * No byte swapping is required in MAO section
2052 		 */
2053 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2054 
2055 		/*
2056 		 * Force the use of the OTP MAC address in case of reserved MAC
2057 		 * address in the NVM, or if address is given but invalid.
2058 		 */
2059 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2060 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2061 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2062 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2063 			return;
2064 
2065 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2066 		    "%s: mac address from nvm override section invalid\n",
2067 		    __func__);
2068 	}
2069 
2070 	if (nvm_hw) {
2071 		/* read the mac address from WFMP registers */
2072 		uint32_t mac_addr0 =
2073 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2074 		uint32_t mac_addr1 =
2075 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2076 
2077 		hw_addr = (const uint8_t *)&mac_addr0;
2078 		data->hw_addr[0] = hw_addr[3];
2079 		data->hw_addr[1] = hw_addr[2];
2080 		data->hw_addr[2] = hw_addr[1];
2081 		data->hw_addr[3] = hw_addr[0];
2082 
2083 		hw_addr = (const uint8_t *)&mac_addr1;
2084 		data->hw_addr[4] = hw_addr[1];
2085 		data->hw_addr[5] = hw_addr[0];
2086 
2087 		return;
2088 	}
2089 
2090 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2091 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2092 }
2093 
2094 static int
2095 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2096 	    const uint16_t *phy_sku)
2097 {
2098 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2099 		return le16_to_cpup(nvm_sw + IWM_SKU);
2100 
2101 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2102 }
2103 
2104 static int
2105 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2106 {
2107 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2108 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2109 	else
2110 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2111 						IWM_NVM_VERSION_8000));
2112 }
2113 
2114 static int
2115 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2116 		  const uint16_t *phy_sku)
2117 {
2118         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2119                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2120 
2121         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2122 }
2123 
2124 static int
2125 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2126 {
2127 	int n_hw_addr;
2128 
2129 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2130 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2131 
2132 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2133 
2134         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2135 }
2136 
2137 static void
2138 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2139 		  uint32_t radio_cfg)
2140 {
2141 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2142 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2143 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2144 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2145 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2146 		return;
2147 	}
2148 
2149 	/* set the radio configuration for family 8000 */
2150 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2151 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2152 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2153 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2154 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2155 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2156 }
2157 
2158 static int
2159 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2160 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2161 {
2162 #ifdef notyet /* for FAMILY 9000 */
2163 	if (cfg->mac_addr_from_csr) {
2164 		iwm_set_hw_address_from_csr(sc, data);
2165         } else
2166 #endif
2167 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2168 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2169 
2170 		/* The byte order is little endian 16 bit, meaning 214365 */
2171 		data->hw_addr[0] = hw_addr[1];
2172 		data->hw_addr[1] = hw_addr[0];
2173 		data->hw_addr[2] = hw_addr[3];
2174 		data->hw_addr[3] = hw_addr[2];
2175 		data->hw_addr[4] = hw_addr[5];
2176 		data->hw_addr[5] = hw_addr[4];
2177 	} else {
2178 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2179 	}
2180 
2181 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2182 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2183 		return EINVAL;
2184 	}
2185 
2186 	return 0;
2187 }
2188 
2189 static struct iwm_nvm_data *
2190 iwm_parse_nvm_data(struct iwm_softc *sc,
2191 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2192 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2193 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2194 {
2195 	struct iwm_nvm_data *data;
2196 	uint32_t sku, radio_cfg;
2197 	uint16_t lar_config;
2198 
2199 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2200 		data = malloc(sizeof(*data) +
2201 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2202 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2203 	} else {
2204 		data = malloc(sizeof(*data) +
2205 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2206 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2207 	}
2208 	if (!data)
2209 		return NULL;
2210 
2211 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2212 
2213 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2214 	iwm_set_radio_cfg(sc, data, radio_cfg);
2215 
2216 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2217 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2218 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2219 	data->sku_cap_11n_enable = 0;
2220 
2221 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2222 
2223 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2224 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2225 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2226 				       IWM_NVM_LAR_OFFSET_8000;
2227 
2228 		lar_config = le16_to_cpup(regulatory + lar_offset);
2229 		data->lar_enabled = !!(lar_config &
2230 				       IWM_NVM_LAR_ENABLED_8000);
2231 	}
2232 
2233 	/* If no valid mac address was found - bail out */
2234 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2235 		free(data, M_DEVBUF);
2236 		return NULL;
2237 	}
2238 
2239 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2240 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2241 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2242 	} else {
2243 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2244 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2245 	}
2246 
2247 	return data;
2248 }
2249 
2250 static void
2251 iwm_free_nvm_data(struct iwm_nvm_data *data)
2252 {
2253 	if (data != NULL)
2254 		free(data, M_DEVBUF);
2255 }
2256 
2257 static struct iwm_nvm_data *
2258 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2259 {
2260 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2261 
2262 	/* Checking for required sections */
2263 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2264 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2265 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2266 			device_printf(sc->sc_dev,
2267 			    "Can't parse empty OTP/NVM sections\n");
2268 			return NULL;
2269 		}
2270 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2271 		/* SW and REGULATORY sections are mandatory */
2272 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2273 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2274 			device_printf(sc->sc_dev,
2275 			    "Can't parse empty OTP/NVM sections\n");
2276 			return NULL;
2277 		}
2278 		/* MAC_OVERRIDE or at least HW section must exist */
2279 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2280 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2281 			device_printf(sc->sc_dev,
2282 			    "Can't parse mac_address, empty sections\n");
2283 			return NULL;
2284 		}
2285 
2286 		/* PHY_SKU section is mandatory in B0 */
2287 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2288 			device_printf(sc->sc_dev,
2289 			    "Can't parse phy_sku in B0, empty sections\n");
2290 			return NULL;
2291 		}
2292 	} else {
2293 		panic("unknown device family %d\n", sc->cfg->device_family);
2294 	}
2295 
2296 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2297 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2298 	calib = (const uint16_t *)
2299 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2300 	regulatory = (const uint16_t *)
2301 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2302 	mac_override = (const uint16_t *)
2303 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2304 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2305 
2306 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2307 	    phy_sku, regulatory);
2308 }
2309 
2310 static int
2311 iwm_nvm_init(struct iwm_softc *sc)
2312 {
2313 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2314 	int i, ret, section;
2315 	uint32_t size_read = 0;
2316 	uint8_t *nvm_buffer, *temp;
2317 	uint16_t len;
2318 
2319 	memset(nvm_sections, 0, sizeof(nvm_sections));
2320 
2321 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2322 		return EINVAL;
2323 
2324 	/* load NVM values from nic */
2325 	/* Read From FW NVM */
2326 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2327 
2328 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2329 	if (!nvm_buffer)
2330 		return ENOMEM;
2331 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2332 		/* we override the constness for initial read */
2333 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2334 					   &len, size_read);
2335 		if (ret)
2336 			continue;
2337 		size_read += len;
2338 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2339 		if (!temp) {
2340 			ret = ENOMEM;
2341 			break;
2342 		}
2343 		memcpy(temp, nvm_buffer, len);
2344 
2345 		nvm_sections[section].data = temp;
2346 		nvm_sections[section].length = len;
2347 	}
2348 	if (!size_read)
2349 		device_printf(sc->sc_dev, "OTP is blank\n");
2350 	free(nvm_buffer, M_DEVBUF);
2351 
2352 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2353 	if (!sc->nvm_data)
2354 		return EINVAL;
2355 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2356 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2357 
2358 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2359 		if (nvm_sections[i].data != NULL)
2360 			free(nvm_sections[i].data, M_DEVBUF);
2361 	}
2362 
2363 	return 0;
2364 }
2365 
2366 static int
2367 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2368 	const struct iwm_fw_desc *section)
2369 {
2370 	struct iwm_dma_info *dma = &sc->fw_dma;
2371 	uint8_t *v_addr;
2372 	bus_addr_t p_addr;
2373 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2374 	int ret = 0;
2375 
2376 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2377 		    "%s: [%d] uCode section being loaded...\n",
2378 		    __func__, section_num);
2379 
2380 	v_addr = dma->vaddr;
2381 	p_addr = dma->paddr;
2382 
2383 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2384 		uint32_t copy_size, dst_addr;
2385 		int extended_addr = FALSE;
2386 
2387 		copy_size = MIN(chunk_sz, section->len - offset);
2388 		dst_addr = section->offset + offset;
2389 
2390 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2391 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2392 			extended_addr = TRUE;
2393 
2394 		if (extended_addr)
2395 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2396 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2397 
2398 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2399 		    copy_size);
2400 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2401 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2402 						   copy_size);
2403 
2404 		if (extended_addr)
2405 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2406 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2407 
2408 		if (ret) {
2409 			device_printf(sc->sc_dev,
2410 			    "%s: Could not load the [%d] uCode section\n",
2411 			    __func__, section_num);
2412 			break;
2413 		}
2414 	}
2415 
2416 	return ret;
2417 }
2418 
2419 /*
2420  * ucode
2421  */
2422 static int
2423 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2424 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2425 {
2426 	int ret;
2427 
2428 	sc->sc_fw_chunk_done = 0;
2429 
2430 	if (!iwm_nic_lock(sc))
2431 		return EBUSY;
2432 
2433 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2434 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2435 
2436 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2437 	    dst_addr);
2438 
2439 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2440 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2441 
2442 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2443 	    (iwm_get_dma_hi_addr(phy_addr)
2444 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2445 
2446 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2447 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2448 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2449 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2450 
2451 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2452 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2453 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2454 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2455 
2456 	iwm_nic_unlock(sc);
2457 
2458 	/* wait up to 5s for this segment to load */
2459 	ret = 0;
2460 	while (!sc->sc_fw_chunk_done) {
2461 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2462 		if (ret)
2463 			break;
2464 	}
2465 
2466 	if (ret != 0) {
2467 		device_printf(sc->sc_dev,
2468 		    "fw chunk addr 0x%x len %d failed to load\n",
2469 		    dst_addr, byte_cnt);
2470 		return ETIMEDOUT;
2471 	}
2472 
2473 	return 0;
2474 }
2475 
2476 static int
2477 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2478 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2479 {
2480 	int shift_param;
2481 	int i, ret = 0, sec_num = 0x1;
2482 	uint32_t val, last_read_idx = 0;
2483 
2484 	if (cpu == 1) {
2485 		shift_param = 0;
2486 		*first_ucode_section = 0;
2487 	} else {
2488 		shift_param = 16;
2489 		(*first_ucode_section)++;
2490 	}
2491 
2492 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2493 		last_read_idx = i;
2494 
2495 		/*
2496 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2497 		 * CPU1 to CPU2.
2498 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2499 		 * CPU2 non paged to CPU2 paging sec.
2500 		 */
2501 		if (!image->fw_sect[i].data ||
2502 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2503 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2504 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2505 				    "Break since Data not valid or Empty section, sec = %d\n",
2506 				    i);
2507 			break;
2508 		}
2509 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2510 		if (ret)
2511 			return ret;
2512 
2513 		/* Notify the ucode of the loaded section number and status */
2514 		if (iwm_nic_lock(sc)) {
2515 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2516 			val = val | (sec_num << shift_param);
2517 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2518 			sec_num = (sec_num << 1) | 0x1;
2519 			iwm_nic_unlock(sc);
2520 		}
2521 	}
2522 
2523 	*first_ucode_section = last_read_idx;
2524 
2525 	iwm_enable_interrupts(sc);
2526 
2527 	if (iwm_nic_lock(sc)) {
2528 		if (cpu == 1)
2529 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2530 		else
2531 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2532 		iwm_nic_unlock(sc);
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static int
2539 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2540 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2541 {
2542 	int shift_param;
2543 	int i, ret = 0;
2544 	uint32_t last_read_idx = 0;
2545 
2546 	if (cpu == 1) {
2547 		shift_param = 0;
2548 		*first_ucode_section = 0;
2549 	} else {
2550 		shift_param = 16;
2551 		(*first_ucode_section)++;
2552 	}
2553 
2554 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2555 		last_read_idx = i;
2556 
2557 		/*
2558 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2559 		 * CPU1 to CPU2.
2560 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2561 		 * CPU2 non paged to CPU2 paging sec.
2562 		 */
2563 		if (!image->fw_sect[i].data ||
2564 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2565 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2566 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2567 				    "Break since Data not valid or Empty section, sec = %d\n",
2568 				     i);
2569 			break;
2570 		}
2571 
2572 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2573 		if (ret)
2574 			return ret;
2575 	}
2576 
2577 	*first_ucode_section = last_read_idx;
2578 
2579 	return 0;
2580 
2581 }
2582 
2583 static int
2584 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2585 	const struct iwm_fw_sects *image)
2586 {
2587 	int ret = 0;
2588 	int first_ucode_section;
2589 
2590 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2591 		     image->is_dual_cpus ? "Dual" : "Single");
2592 
2593 	/* load to FW the binary non secured sections of CPU1 */
2594 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2595 	if (ret)
2596 		return ret;
2597 
2598 	if (image->is_dual_cpus) {
2599 		/* set CPU2 header address */
2600 		if (iwm_nic_lock(sc)) {
2601 			iwm_write_prph(sc,
2602 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2603 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2604 			iwm_nic_unlock(sc);
2605 		}
2606 
2607 		/* load to FW the binary sections of CPU2 */
2608 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2609 						 &first_ucode_section);
2610 		if (ret)
2611 			return ret;
2612 	}
2613 
2614 	iwm_enable_interrupts(sc);
2615 
2616 	/* release CPU reset */
2617 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2618 
2619 	return 0;
2620 }
2621 
2622 int
2623 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2624 	const struct iwm_fw_sects *image)
2625 {
2626 	int ret = 0;
2627 	int first_ucode_section;
2628 
2629 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2630 		    image->is_dual_cpus ? "Dual" : "Single");
2631 
2632 	/* configure the ucode to be ready to get the secured image */
2633 	/* release CPU reset */
2634 	if (iwm_nic_lock(sc)) {
2635 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2636 		    IWM_RELEASE_CPU_RESET_BIT);
2637 		iwm_nic_unlock(sc);
2638 	}
2639 
2640 	/* load to FW the binary Secured sections of CPU1 */
2641 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2642 	    &first_ucode_section);
2643 	if (ret)
2644 		return ret;
2645 
2646 	/* load to FW the binary sections of CPU2 */
2647 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2648 	    &first_ucode_section);
2649 }
2650 
2651 /* XXX Get rid of this definition */
2652 static inline void
2653 iwm_enable_fw_load_int(struct iwm_softc *sc)
2654 {
2655 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2656 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2657 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2658 }
2659 
2660 /* XXX Add proper rfkill support code */
2661 static int
2662 iwm_start_fw(struct iwm_softc *sc,
2663 	const struct iwm_fw_sects *fw)
2664 {
2665 	int ret;
2666 
2667 	/* This may fail if AMT took ownership of the device */
2668 	if (iwm_prepare_card_hw(sc)) {
2669 		device_printf(sc->sc_dev,
2670 		    "%s: Exit HW not ready\n", __func__);
2671 		ret = EIO;
2672 		goto out;
2673 	}
2674 
2675 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2676 
2677 	iwm_disable_interrupts(sc);
2678 
2679 	/* make sure rfkill handshake bits are cleared */
2680 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2681 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2682 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2683 
2684 	/* clear (again), then enable host interrupts */
2685 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2686 
2687 	ret = iwm_nic_init(sc);
2688 	if (ret) {
2689 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2690 		goto out;
2691 	}
2692 
2693 	/*
2694 	 * Now, we load the firmware and don't want to be interrupted, even
2695 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2696 	 * FH_TX interrupt which is needed to load the firmware). If the
2697 	 * RF-Kill switch is toggled, we will find out after having loaded
2698 	 * the firmware and return the proper value to the caller.
2699 	 */
2700 	iwm_enable_fw_load_int(sc);
2701 
2702 	/* really make sure rfkill handshake bits are cleared */
2703 	/* maybe we should write a few times more?  just to make sure */
2704 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2705 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2706 
2707 	/* Load the given image to the HW */
2708 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2709 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2710 	else
2711 		ret = iwm_pcie_load_given_ucode(sc, fw);
2712 
2713 	/* XXX re-check RF-Kill state */
2714 
2715 out:
2716 	return ret;
2717 }
2718 
2719 static int
2720 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2721 {
2722 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2723 		.valid = htole32(valid_tx_ant),
2724 	};
2725 
2726 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2727 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2728 }
2729 
2730 /* iwlwifi: mvm/fw.c */
2731 static int
2732 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2733 {
2734 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2735 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2736 
2737 	/* Set parameters */
2738 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2739 	phy_cfg_cmd.calib_control.event_trigger =
2740 	    sc->sc_default_calib[ucode_type].event_trigger;
2741 	phy_cfg_cmd.calib_control.flow_trigger =
2742 	    sc->sc_default_calib[ucode_type].flow_trigger;
2743 
2744 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2745 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2746 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2747 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2748 }
2749 
2750 static int
2751 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2752 {
2753 	struct iwm_mvm_alive_data *alive_data = data;
2754 	struct iwm_mvm_alive_resp_ver1 *palive1;
2755 	struct iwm_mvm_alive_resp_ver2 *palive2;
2756 	struct iwm_mvm_alive_resp *palive;
2757 
2758 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2759 		palive1 = (void *)pkt->data;
2760 
2761 		sc->support_umac_log = FALSE;
2762                 sc->error_event_table =
2763                         le32toh(palive1->error_event_table_ptr);
2764                 sc->log_event_table =
2765                         le32toh(palive1->log_event_table_ptr);
2766                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2767 
2768                 alive_data->valid = le16toh(palive1->status) ==
2769                                     IWM_ALIVE_STATUS_OK;
2770                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2771 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2772 			     le16toh(palive1->status), palive1->ver_type,
2773                              palive1->ver_subtype, palive1->flags);
2774 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2775 		palive2 = (void *)pkt->data;
2776 		sc->error_event_table =
2777 			le32toh(palive2->error_event_table_ptr);
2778 		sc->log_event_table =
2779 			le32toh(palive2->log_event_table_ptr);
2780 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2781 		sc->umac_error_event_table =
2782                         le32toh(palive2->error_info_addr);
2783 
2784 		alive_data->valid = le16toh(palive2->status) ==
2785 				    IWM_ALIVE_STATUS_OK;
2786 		if (sc->umac_error_event_table)
2787 			sc->support_umac_log = TRUE;
2788 
2789 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2790 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2791 			    le16toh(palive2->status), palive2->ver_type,
2792 			    palive2->ver_subtype, palive2->flags);
2793 
2794 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2795 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2796 			    palive2->umac_major, palive2->umac_minor);
2797 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2798 		palive = (void *)pkt->data;
2799 
2800 		sc->error_event_table =
2801 			le32toh(palive->error_event_table_ptr);
2802 		sc->log_event_table =
2803 			le32toh(palive->log_event_table_ptr);
2804 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2805 		sc->umac_error_event_table =
2806 			le32toh(palive->error_info_addr);
2807 
2808 		alive_data->valid = le16toh(palive->status) ==
2809 				    IWM_ALIVE_STATUS_OK;
2810 		if (sc->umac_error_event_table)
2811 			sc->support_umac_log = TRUE;
2812 
2813 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2814 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2815 			    le16toh(palive->status), palive->ver_type,
2816 			    palive->ver_subtype, palive->flags);
2817 
2818 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2819 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2820 			    le32toh(palive->umac_major),
2821 			    le32toh(palive->umac_minor));
2822 	}
2823 
2824 	return TRUE;
2825 }
2826 
2827 static int
2828 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2829 	struct iwm_rx_packet *pkt, void *data)
2830 {
2831 	struct iwm_phy_db *phy_db = data;
2832 
2833 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2834 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2835 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2836 			    __func__, pkt->hdr.code);
2837 		}
2838 		return TRUE;
2839 	}
2840 
2841 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2842 		device_printf(sc->sc_dev,
2843 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2844 	}
2845 
2846 	return FALSE;
2847 }
2848 
2849 static int
2850 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2851 	enum iwm_ucode_type ucode_type)
2852 {
2853 	struct iwm_notification_wait alive_wait;
2854 	struct iwm_mvm_alive_data alive_data;
2855 	const struct iwm_fw_sects *fw;
2856 	enum iwm_ucode_type old_type = sc->cur_ucode;
2857 	int error;
2858 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2859 
2860 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2861 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2862 			error);
2863 		return error;
2864 	}
2865 	fw = &sc->sc_fw.fw_sects[ucode_type];
2866 	sc->cur_ucode = ucode_type;
2867 	sc->ucode_loaded = FALSE;
2868 
2869 	memset(&alive_data, 0, sizeof(alive_data));
2870 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2871 				   alive_cmd, nitems(alive_cmd),
2872 				   iwm_alive_fn, &alive_data);
2873 
2874 	error = iwm_start_fw(sc, fw);
2875 	if (error) {
2876 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2877 		sc->cur_ucode = old_type;
2878 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2879 		return error;
2880 	}
2881 
2882 	/*
2883 	 * Some things may run in the background now, but we
2884 	 * just wait for the ALIVE notification here.
2885 	 */
2886 	IWM_UNLOCK(sc);
2887 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2888 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2889 	IWM_LOCK(sc);
2890 	if (error) {
2891 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2892 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2893 			if (iwm_nic_lock(sc)) {
2894 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2895 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2896 				iwm_nic_unlock(sc);
2897 			}
2898 			device_printf(sc->sc_dev,
2899 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2900 			    a, b);
2901 		}
2902 		sc->cur_ucode = old_type;
2903 		return error;
2904 	}
2905 
2906 	if (!alive_data.valid) {
2907 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2908 		    __func__);
2909 		sc->cur_ucode = old_type;
2910 		return EIO;
2911 	}
2912 
2913 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2914 
2915 	/*
2916 	 * configure and operate fw paging mechanism.
2917 	 * driver configures the paging flow only once, CPU2 paging image
2918 	 * included in the IWM_UCODE_INIT image.
2919 	 */
2920 	if (fw->paging_mem_size) {
2921 		error = iwm_save_fw_paging(sc, fw);
2922 		if (error) {
2923 			device_printf(sc->sc_dev,
2924 			    "%s: failed to save the FW paging image\n",
2925 			    __func__);
2926 			return error;
2927 		}
2928 
2929 		error = iwm_send_paging_cmd(sc, fw);
2930 		if (error) {
2931 			device_printf(sc->sc_dev,
2932 			    "%s: failed to send the paging cmd\n", __func__);
2933 			iwm_free_fw_paging(sc);
2934 			return error;
2935 		}
2936 	}
2937 
2938 	if (!error)
2939 		sc->ucode_loaded = TRUE;
2940 	return error;
2941 }
2942 
2943 /*
2944  * mvm misc bits
2945  */
2946 
2947 /*
2948  * follows iwlwifi/fw.c
2949  */
2950 static int
2951 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2952 {
2953 	struct iwm_notification_wait calib_wait;
2954 	static const uint16_t init_complete[] = {
2955 		IWM_INIT_COMPLETE_NOTIF,
2956 		IWM_CALIB_RES_NOTIF_PHY_DB
2957 	};
2958 	int ret;
2959 
2960 	/* do not operate with rfkill switch turned on */
2961 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2962 		device_printf(sc->sc_dev,
2963 		    "radio is disabled by hardware switch\n");
2964 		return EPERM;
2965 	}
2966 
2967 	iwm_init_notification_wait(sc->sc_notif_wait,
2968 				   &calib_wait,
2969 				   init_complete,
2970 				   nitems(init_complete),
2971 				   iwm_wait_phy_db_entry,
2972 				   sc->sc_phy_db);
2973 
2974 	/* Will also start the device */
2975 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2976 	if (ret) {
2977 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2978 		    ret);
2979 		goto error;
2980 	}
2981 
2982 	if (justnvm) {
2983 		/* Read nvm */
2984 		ret = iwm_nvm_init(sc);
2985 		if (ret) {
2986 			device_printf(sc->sc_dev, "failed to read nvm\n");
2987 			goto error;
2988 		}
2989 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2990 		goto error;
2991 	}
2992 
2993 	ret = iwm_send_bt_init_conf(sc);
2994 	if (ret) {
2995 		device_printf(sc->sc_dev,
2996 		    "failed to send bt coex configuration: %d\n", ret);
2997 		goto error;
2998 	}
2999 
3000 	/* Send TX valid antennas before triggering calibrations */
3001 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3002 	if (ret) {
3003 		device_printf(sc->sc_dev,
3004 		    "failed to send antennas before calibration: %d\n", ret);
3005 		goto error;
3006 	}
3007 
3008 	/*
3009 	 * Send phy configurations command to init uCode
3010 	 * to start the 16.0 uCode init image internal calibrations.
3011 	 */
3012 	ret = iwm_send_phy_cfg_cmd(sc);
3013 	if (ret) {
3014 		device_printf(sc->sc_dev,
3015 		    "%s: Failed to run INIT calibrations: %d\n",
3016 		    __func__, ret);
3017 		goto error;
3018 	}
3019 
3020 	/*
3021 	 * Nothing to do but wait for the init complete notification
3022 	 * from the firmware.
3023 	 */
3024 	IWM_UNLOCK(sc);
3025 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3026 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3027 	IWM_LOCK(sc);
3028 
3029 
3030 	goto out;
3031 
3032 error:
3033 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3034 out:
3035 	return ret;
3036 }
3037 
3038 /*
3039  * receive side
3040  */
3041 
3042 /* (re)stock rx ring, called at init-time and at runtime */
3043 static int
3044 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3045 {
3046 	struct iwm_rx_ring *ring = &sc->rxq;
3047 	struct iwm_rx_data *data = &ring->data[idx];
3048 	struct mbuf *m;
3049 	bus_dmamap_t dmamap;
3050 	bus_dma_segment_t seg;
3051 	int nsegs, error;
3052 
3053 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3054 	if (m == NULL)
3055 		return ENOBUFS;
3056 
3057 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3058 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3059 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3060 	if (error != 0) {
3061 		device_printf(sc->sc_dev,
3062 		    "%s: can't map mbuf, error %d\n", __func__, error);
3063 		m_freem(m);
3064 		return error;
3065 	}
3066 
3067 	if (data->m != NULL)
3068 		bus_dmamap_unload(ring->data_dmat, data->map);
3069 
3070 	/* Swap ring->spare_map with data->map */
3071 	dmamap = data->map;
3072 	data->map = ring->spare_map;
3073 	ring->spare_map = dmamap;
3074 
3075 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3076 	data->m = m;
3077 
3078 	/* Update RX descriptor. */
3079 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3080 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3081 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3082 	    BUS_DMASYNC_PREWRITE);
3083 
3084 	return 0;
3085 }
3086 
3087 /* iwlwifi: mvm/rx.c */
3088 /*
3089  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3090  * values are reported by the fw as positive values - need to negate
3091  * to obtain their dBM.  Account for missing antennas by replacing 0
3092  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3093  */
3094 static int
3095 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3096 {
3097 	int energy_a, energy_b, energy_c, max_energy;
3098 	uint32_t val;
3099 
3100 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3101 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3102 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3103 	energy_a = energy_a ? -energy_a : -256;
3104 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3105 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3106 	energy_b = energy_b ? -energy_b : -256;
3107 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3108 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3109 	energy_c = energy_c ? -energy_c : -256;
3110 	max_energy = MAX(energy_a, energy_b);
3111 	max_energy = MAX(max_energy, energy_c);
3112 
3113 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3114 	    "energy In A %d B %d C %d , and max %d\n",
3115 	    energy_a, energy_b, energy_c, max_energy);
3116 
3117 	return max_energy;
3118 }
3119 
3120 static void
3121 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3122 {
3123 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3124 
3125 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3126 
3127 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3128 }
3129 
3130 /*
3131  * Retrieve the average noise (in dBm) among receivers.
3132  */
3133 static int
3134 iwm_get_noise(struct iwm_softc *sc,
3135     const struct iwm_mvm_statistics_rx_non_phy *stats)
3136 {
3137 	int i, total, nbant, noise;
3138 
3139 	total = nbant = noise = 0;
3140 	for (i = 0; i < 3; i++) {
3141 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3142 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3143 		    __func__,
3144 		    i,
3145 		    noise);
3146 
3147 		if (noise) {
3148 			total += noise;
3149 			nbant++;
3150 		}
3151 	}
3152 
3153 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3154 	    __func__, nbant, total);
3155 #if 0
3156 	/* There should be at least one antenna but check anyway. */
3157 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3158 #else
3159 	/* For now, just hard-code it to -96 to be safe */
3160 	return (-96);
3161 #endif
3162 }
3163 
3164 static void
3165 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3166 {
3167 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3168 
3169 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3170 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3171 }
3172 
3173 /*
3174  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3175  *
3176  * Handles the actual data of the Rx packet from the fw
3177  */
3178 static boolean_t
3179 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3180 	boolean_t stolen)
3181 {
3182 	struct ieee80211com *ic = &sc->sc_ic;
3183 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3184 	struct ieee80211_frame *wh;
3185 	struct ieee80211_node *ni;
3186 	struct ieee80211_rx_stats rxs;
3187 	struct iwm_rx_phy_info *phy_info;
3188 	struct iwm_rx_mpdu_res_start *rx_res;
3189 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3190 	uint32_t len;
3191 	uint32_t rx_pkt_status;
3192 	int rssi;
3193 
3194 	phy_info = &sc->sc_last_phy_info;
3195 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3196 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3197 	len = le16toh(rx_res->byte_count);
3198 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3199 
3200 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3201 		device_printf(sc->sc_dev,
3202 		    "dsp size out of range [0,20]: %d\n",
3203 		    phy_info->cfg_phy_cnt);
3204 		goto fail;
3205 	}
3206 
3207 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3208 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3209 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3210 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3211 		goto fail;
3212 	}
3213 
3214 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3215 
3216 	/* Map it to relative value */
3217 	rssi = rssi - sc->sc_noise;
3218 
3219 	/* replenish ring for the buffer we're going to feed to the sharks */
3220 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3221 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3222 		    __func__);
3223 		goto fail;
3224 	}
3225 
3226 	m->m_data = pkt->data + sizeof(*rx_res);
3227 	m->m_pkthdr.len = m->m_len = len;
3228 
3229 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3230 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3231 
3232 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3233 
3234 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3235 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3236 	    __func__,
3237 	    le16toh(phy_info->channel),
3238 	    le16toh(phy_info->phy_flags));
3239 
3240 	/*
3241 	 * Populate an RX state struct with the provided information.
3242 	 */
3243 	bzero(&rxs, sizeof(rxs));
3244 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3245 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3246 	rxs.c_ieee = le16toh(phy_info->channel);
3247 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3248 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3249 	} else {
3250 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3251 	}
3252 
3253 	/* rssi is in 1/2db units */
3254 	rxs.c_rssi = rssi * 2;
3255 	rxs.c_nf = sc->sc_noise;
3256 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3257 		if (ni)
3258 			ieee80211_free_node(ni);
3259 		goto fail;
3260 	}
3261 
3262 	if (ieee80211_radiotap_active_vap(vap)) {
3263 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3264 
3265 		tap->wr_flags = 0;
3266 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3267 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3268 		tap->wr_chan_freq = htole16(rxs.c_freq);
3269 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3270 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3271 		tap->wr_dbm_antsignal = (int8_t)rssi;
3272 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3273 		tap->wr_tsft = phy_info->system_timestamp;
3274 		switch (phy_info->rate) {
3275 		/* CCK rates. */
3276 		case  10: tap->wr_rate =   2; break;
3277 		case  20: tap->wr_rate =   4; break;
3278 		case  55: tap->wr_rate =  11; break;
3279 		case 110: tap->wr_rate =  22; break;
3280 		/* OFDM rates. */
3281 		case 0xd: tap->wr_rate =  12; break;
3282 		case 0xf: tap->wr_rate =  18; break;
3283 		case 0x5: tap->wr_rate =  24; break;
3284 		case 0x7: tap->wr_rate =  36; break;
3285 		case 0x9: tap->wr_rate =  48; break;
3286 		case 0xb: tap->wr_rate =  72; break;
3287 		case 0x1: tap->wr_rate =  96; break;
3288 		case 0x3: tap->wr_rate = 108; break;
3289 		/* Unknown rate: should not happen. */
3290 		default:  tap->wr_rate =   0;
3291 		}
3292 	}
3293 
3294 	IWM_UNLOCK(sc);
3295 	if (ni != NULL) {
3296 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3297 		ieee80211_input_mimo(ni, m);
3298 		ieee80211_free_node(ni);
3299 	} else {
3300 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3301 		ieee80211_input_mimo_all(ic, m);
3302 	}
3303 	IWM_LOCK(sc);
3304 
3305 	return TRUE;
3306 
3307 fail:
3308 	counter_u64_add(ic->ic_ierrors, 1);
3309 	return FALSE;
3310 }
3311 
3312 static int
3313 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3314 	struct iwm_node *in)
3315 {
3316 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3317 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3318 	struct ieee80211_node *ni = &in->in_ni;
3319 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3320 
3321 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3322 
3323 	/* Update rate control statistics. */
3324 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3325 	    __func__,
3326 	    (int) le16toh(tx_resp->status.status),
3327 	    (int) le16toh(tx_resp->status.sequence),
3328 	    tx_resp->frame_count,
3329 	    tx_resp->bt_kill_count,
3330 	    tx_resp->failure_rts,
3331 	    tx_resp->failure_frame,
3332 	    le32toh(tx_resp->initial_rate),
3333 	    (int) le16toh(tx_resp->wireless_media_time));
3334 
3335 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3336 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3337 	txs->short_retries = tx_resp->failure_rts;
3338 	txs->long_retries = tx_resp->failure_frame;
3339 	if (status != IWM_TX_STATUS_SUCCESS &&
3340 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3341 		switch (status) {
3342 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3343 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3344 			break;
3345 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3346 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3347 			break;
3348 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3349 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3350 			break;
3351 		default:
3352 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3353 			break;
3354 		}
3355 	} else {
3356 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3357 	}
3358 	ieee80211_ratectl_tx_complete(ni, txs);
3359 
3360 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3361 }
3362 
3363 static void
3364 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3365 {
3366 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3367 	int idx = cmd_hdr->idx;
3368 	int qid = cmd_hdr->qid;
3369 	struct iwm_tx_ring *ring = &sc->txq[qid];
3370 	struct iwm_tx_data *txd = &ring->data[idx];
3371 	struct iwm_node *in = txd->in;
3372 	struct mbuf *m = txd->m;
3373 	int status;
3374 
3375 	KASSERT(txd->done == 0, ("txd not done"));
3376 	KASSERT(txd->in != NULL, ("txd without node"));
3377 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3378 
3379 	sc->sc_tx_timer = 0;
3380 
3381 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3382 
3383 	/* Unmap and free mbuf. */
3384 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3385 	bus_dmamap_unload(ring->data_dmat, txd->map);
3386 
3387 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3388 	    "free txd %p, in %p\n", txd, txd->in);
3389 	txd->done = 1;
3390 	txd->m = NULL;
3391 	txd->in = NULL;
3392 
3393 	ieee80211_tx_complete(&in->in_ni, m, status);
3394 
3395 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3396 		sc->qfullmsk &= ~(1 << ring->qid);
3397 		if (sc->qfullmsk == 0) {
3398 			iwm_start(sc);
3399 		}
3400 	}
3401 }
3402 
3403 /*
3404  * transmit side
3405  */
3406 
3407 /*
3408  * Process a "command done" firmware notification.  This is where we wakeup
3409  * processes waiting for a synchronous command completion.
3410  * from if_iwn
3411  */
3412 static void
3413 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3414 {
3415 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3416 	struct iwm_tx_data *data;
3417 
3418 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3419 		return;	/* Not a command ack. */
3420 	}
3421 
3422 	/* XXX wide commands? */
3423 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3424 	    "cmd notification type 0x%x qid %d idx %d\n",
3425 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3426 
3427 	data = &ring->data[pkt->hdr.idx];
3428 
3429 	/* If the command was mapped in an mbuf, free it. */
3430 	if (data->m != NULL) {
3431 		bus_dmamap_sync(ring->data_dmat, data->map,
3432 		    BUS_DMASYNC_POSTWRITE);
3433 		bus_dmamap_unload(ring->data_dmat, data->map);
3434 		m_freem(data->m);
3435 		data->m = NULL;
3436 	}
3437 	wakeup(&ring->desc[pkt->hdr.idx]);
3438 
3439 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3440 		device_printf(sc->sc_dev,
3441 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3442 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3443 		/* XXX call iwm_force_nmi() */
3444 	}
3445 
3446 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3447 	ring->queued--;
3448 	if (ring->queued == 0)
3449 		iwm_pcie_clear_cmd_in_flight(sc);
3450 }
3451 
3452 #if 0
3453 /*
3454  * necessary only for block ack mode
3455  */
3456 void
3457 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3458 	uint16_t len)
3459 {
3460 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3461 	uint16_t w_val;
3462 
3463 	scd_bc_tbl = sc->sched_dma.vaddr;
3464 
3465 	len += 8; /* magic numbers came naturally from paris */
3466 	len = roundup(len, 4) / 4;
3467 
3468 	w_val = htole16(sta_id << 12 | len);
3469 
3470 	/* Update TX scheduler. */
3471 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3472 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3473 	    BUS_DMASYNC_PREWRITE);
3474 
3475 	/* I really wonder what this is ?!? */
3476 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3477 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3478 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3479 		    BUS_DMASYNC_PREWRITE);
3480 	}
3481 }
3482 #endif
3483 
3484 /*
3485  * Take an 802.11 (non-n) rate, find the relevant rate
3486  * table entry.  return the index into in_ridx[].
3487  *
3488  * The caller then uses that index back into in_ridx
3489  * to figure out the rate index programmed /into/
3490  * the firmware for this given node.
3491  */
3492 static int
3493 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3494     uint8_t rate)
3495 {
3496 	int i;
3497 	uint8_t r;
3498 
3499 	for (i = 0; i < nitems(in->in_ridx); i++) {
3500 		r = iwm_rates[in->in_ridx[i]].rate;
3501 		if (rate == r)
3502 			return (i);
3503 	}
3504 
3505 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3506 	    "%s: couldn't find an entry for rate=%d\n",
3507 	    __func__,
3508 	    rate);
3509 
3510 	/* XXX Return the first */
3511 	/* XXX TODO: have it return the /lowest/ */
3512 	return (0);
3513 }
3514 
3515 static int
3516 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3517 {
3518 	int i;
3519 
3520 	for (i = 0; i < nitems(iwm_rates); i++) {
3521 		if (iwm_rates[i].rate == rate)
3522 			return (i);
3523 	}
3524 	/* XXX error? */
3525 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3526 	    "%s: couldn't find an entry for rate=%d\n",
3527 	    __func__,
3528 	    rate);
3529 	return (0);
3530 }
3531 
3532 /*
3533  * Fill in the rate related information for a transmit command.
3534  */
3535 static const struct iwm_rate *
3536 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3537 	struct mbuf *m, struct iwm_tx_cmd *tx)
3538 {
3539 	struct ieee80211_node *ni = &in->in_ni;
3540 	struct ieee80211_frame *wh;
3541 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3542 	const struct iwm_rate *rinfo;
3543 	int type;
3544 	int ridx, rate_flags;
3545 
3546 	wh = mtod(m, struct ieee80211_frame *);
3547 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3548 
3549 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3550 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3551 
3552 	if (type == IEEE80211_FC0_TYPE_MGT ||
3553 	    type == IEEE80211_FC0_TYPE_CTL ||
3554 	    (m->m_flags & M_EAPOL) != 0) {
3555 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3556 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3557 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3558 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3559 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3560 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3561 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3562 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3563 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3564 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3565 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3566 	} else {
3567 		int i;
3568 
3569 		/* for data frames, use RS table */
3570 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3571 		/* XXX pass pktlen */
3572 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3573 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3574 		ridx = in->in_ridx[i];
3575 
3576 		/* This is the index into the programmed table */
3577 		tx->initial_rate_index = i;
3578 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3579 
3580 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3581 		    "%s: start with i=%d, txrate %d\n",
3582 		    __func__, i, iwm_rates[ridx].rate);
3583 	}
3584 
3585 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3586 	    "%s: frame type=%d txrate %d\n",
3587 	        __func__, type, iwm_rates[ridx].rate);
3588 
3589 	rinfo = &iwm_rates[ridx];
3590 
3591 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3592 	    __func__, ridx,
3593 	    rinfo->rate,
3594 	    !! (IWM_RIDX_IS_CCK(ridx))
3595 	    );
3596 
3597 	/* XXX TODO: hard-coded TX antenna? */
3598 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3599 	if (IWM_RIDX_IS_CCK(ridx))
3600 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3601 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3602 
3603 	return rinfo;
3604 }
3605 
3606 #define TB0_SIZE 16
3607 static int
3608 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3609 {
3610 	struct ieee80211com *ic = &sc->sc_ic;
3611 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3612 	struct iwm_node *in = IWM_NODE(ni);
3613 	struct iwm_tx_ring *ring;
3614 	struct iwm_tx_data *data;
3615 	struct iwm_tfd *desc;
3616 	struct iwm_device_cmd *cmd;
3617 	struct iwm_tx_cmd *tx;
3618 	struct ieee80211_frame *wh;
3619 	struct ieee80211_key *k = NULL;
3620 	struct mbuf *m1;
3621 	const struct iwm_rate *rinfo;
3622 	uint32_t flags;
3623 	u_int hdrlen;
3624 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3625 	int nsegs;
3626 	uint8_t tid, type;
3627 	int i, totlen, error, pad;
3628 
3629 	wh = mtod(m, struct ieee80211_frame *);
3630 	hdrlen = ieee80211_anyhdrsize(wh);
3631 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3632 	tid = 0;
3633 	ring = &sc->txq[ac];
3634 	desc = &ring->desc[ring->cur];
3635 	memset(desc, 0, sizeof(*desc));
3636 	data = &ring->data[ring->cur];
3637 
3638 	/* Fill out iwm_tx_cmd to send to the firmware */
3639 	cmd = &ring->cmd[ring->cur];
3640 	cmd->hdr.code = IWM_TX_CMD;
3641 	cmd->hdr.flags = 0;
3642 	cmd->hdr.qid = ring->qid;
3643 	cmd->hdr.idx = ring->cur;
3644 
3645 	tx = (void *)cmd->data;
3646 	memset(tx, 0, sizeof(*tx));
3647 
3648 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3649 
3650 	/* Encrypt the frame if need be. */
3651 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3652 		/* Retrieve key for TX && do software encryption. */
3653 		k = ieee80211_crypto_encap(ni, m);
3654 		if (k == NULL) {
3655 			m_freem(m);
3656 			return (ENOBUFS);
3657 		}
3658 		/* 802.11 header may have moved. */
3659 		wh = mtod(m, struct ieee80211_frame *);
3660 	}
3661 
3662 	if (ieee80211_radiotap_active_vap(vap)) {
3663 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3664 
3665 		tap->wt_flags = 0;
3666 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3667 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3668 		tap->wt_rate = rinfo->rate;
3669 		if (k != NULL)
3670 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3671 		ieee80211_radiotap_tx(vap, m);
3672 	}
3673 
3674 
3675 	totlen = m->m_pkthdr.len;
3676 
3677 	flags = 0;
3678 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3679 		flags |= IWM_TX_CMD_FLG_ACK;
3680 	}
3681 
3682 	if (type == IEEE80211_FC0_TYPE_DATA
3683 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3684 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3685 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3686 	}
3687 
3688 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3689 	    type != IEEE80211_FC0_TYPE_DATA)
3690 		tx->sta_id = sc->sc_aux_sta.sta_id;
3691 	else
3692 		tx->sta_id = IWM_STATION_ID;
3693 
3694 	if (type == IEEE80211_FC0_TYPE_MGT) {
3695 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3696 
3697 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3698 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3699 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3700 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3701 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3702 		} else {
3703 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3704 		}
3705 	} else {
3706 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3707 	}
3708 
3709 	if (hdrlen & 3) {
3710 		/* First segment length must be a multiple of 4. */
3711 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3712 		pad = 4 - (hdrlen & 3);
3713 	} else
3714 		pad = 0;
3715 
3716 	tx->driver_txop = 0;
3717 	tx->next_frame_len = 0;
3718 
3719 	tx->len = htole16(totlen);
3720 	tx->tid_tspec = tid;
3721 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3722 
3723 	/* Set physical address of "scratch area". */
3724 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3725 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3726 
3727 	/* Copy 802.11 header in TX command. */
3728 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3729 
3730 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3731 
3732 	tx->sec_ctl = 0;
3733 	tx->tx_flags |= htole32(flags);
3734 
3735 	/* Trim 802.11 header. */
3736 	m_adj(m, hdrlen);
3737 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3738 	    segs, &nsegs, BUS_DMA_NOWAIT);
3739 	if (error != 0) {
3740 		if (error != EFBIG) {
3741 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3742 			    error);
3743 			m_freem(m);
3744 			return error;
3745 		}
3746 		/* Too many DMA segments, linearize mbuf. */
3747 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3748 		if (m1 == NULL) {
3749 			device_printf(sc->sc_dev,
3750 			    "%s: could not defrag mbuf\n", __func__);
3751 			m_freem(m);
3752 			return (ENOBUFS);
3753 		}
3754 		m = m1;
3755 
3756 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3757 		    segs, &nsegs, BUS_DMA_NOWAIT);
3758 		if (error != 0) {
3759 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3760 			    error);
3761 			m_freem(m);
3762 			return error;
3763 		}
3764 	}
3765 	data->m = m;
3766 	data->in = in;
3767 	data->done = 0;
3768 
3769 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3770 	    "sending txd %p, in %p\n", data, data->in);
3771 	KASSERT(data->in != NULL, ("node is NULL"));
3772 
3773 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3774 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3775 	    ring->qid, ring->cur, totlen, nsegs,
3776 	    le32toh(tx->tx_flags),
3777 	    le32toh(tx->rate_n_flags),
3778 	    tx->initial_rate_index
3779 	    );
3780 
3781 	/* Fill TX descriptor. */
3782 	desc->num_tbs = 2 + nsegs;
3783 
3784 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3785 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3786 	    (TB0_SIZE << 4);
3787 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3788 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3789 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3790 	      + hdrlen + pad - TB0_SIZE) << 4);
3791 
3792 	/* Other DMA segments are for data payload. */
3793 	for (i = 0; i < nsegs; i++) {
3794 		seg = &segs[i];
3795 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3796 		desc->tbs[i+2].hi_n_len = \
3797 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3798 		    | ((seg->ds_len) << 4);
3799 	}
3800 
3801 	bus_dmamap_sync(ring->data_dmat, data->map,
3802 	    BUS_DMASYNC_PREWRITE);
3803 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3804 	    BUS_DMASYNC_PREWRITE);
3805 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3806 	    BUS_DMASYNC_PREWRITE);
3807 
3808 #if 0
3809 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3810 #endif
3811 
3812 	/* Kick TX ring. */
3813 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3814 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3815 
3816 	/* Mark TX ring as full if we reach a certain threshold. */
3817 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3818 		sc->qfullmsk |= 1 << ring->qid;
3819 	}
3820 
3821 	return 0;
3822 }
3823 
3824 static int
3825 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3826     const struct ieee80211_bpf_params *params)
3827 {
3828 	struct ieee80211com *ic = ni->ni_ic;
3829 	struct iwm_softc *sc = ic->ic_softc;
3830 	int error = 0;
3831 
3832 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3833 	    "->%s begin\n", __func__);
3834 
3835 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3836 		m_freem(m);
3837 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3838 		    "<-%s not RUNNING\n", __func__);
3839 		return (ENETDOWN);
3840         }
3841 
3842 	IWM_LOCK(sc);
3843 	/* XXX fix this */
3844         if (params == NULL) {
3845 		error = iwm_tx(sc, m, ni, 0);
3846 	} else {
3847 		error = iwm_tx(sc, m, ni, 0);
3848 	}
3849 	sc->sc_tx_timer = 5;
3850 	IWM_UNLOCK(sc);
3851 
3852         return (error);
3853 }
3854 
3855 /*
3856  * mvm/tx.c
3857  */
3858 
3859 /*
3860  * Note that there are transports that buffer frames before they reach
3861  * the firmware. This means that after flush_tx_path is called, the
3862  * queue might not be empty. The race-free way to handle this is to:
3863  * 1) set the station as draining
3864  * 2) flush the Tx path
3865  * 3) wait for the transport queues to be empty
3866  */
3867 int
3868 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3869 {
3870 	int ret;
3871 	struct iwm_tx_path_flush_cmd flush_cmd = {
3872 		.queues_ctl = htole32(tfd_msk),
3873 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3874 	};
3875 
3876 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3877 	    sizeof(flush_cmd), &flush_cmd);
3878 	if (ret)
3879                 device_printf(sc->sc_dev,
3880 		    "Flushing tx queue failed: %d\n", ret);
3881 	return ret;
3882 }
3883 
3884 /*
3885  * BEGIN mvm/quota.c
3886  */
3887 
3888 static int
3889 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3890 {
3891 	struct iwm_time_quota_cmd cmd;
3892 	int i, idx, ret, num_active_macs, quota, quota_rem;
3893 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3894 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3895 	uint16_t id;
3896 
3897 	memset(&cmd, 0, sizeof(cmd));
3898 
3899 	/* currently, PHY ID == binding ID */
3900 	if (ivp) {
3901 		id = ivp->phy_ctxt->id;
3902 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3903 		colors[id] = ivp->phy_ctxt->color;
3904 
3905 		if (1)
3906 			n_ifs[id] = 1;
3907 	}
3908 
3909 	/*
3910 	 * The FW's scheduling session consists of
3911 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3912 	 * equally between all the bindings that require quota
3913 	 */
3914 	num_active_macs = 0;
3915 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3916 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3917 		num_active_macs += n_ifs[i];
3918 	}
3919 
3920 	quota = 0;
3921 	quota_rem = 0;
3922 	if (num_active_macs) {
3923 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3924 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3925 	}
3926 
3927 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3928 		if (colors[i] < 0)
3929 			continue;
3930 
3931 		cmd.quotas[idx].id_and_color =
3932 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3933 
3934 		if (n_ifs[i] <= 0) {
3935 			cmd.quotas[idx].quota = htole32(0);
3936 			cmd.quotas[idx].max_duration = htole32(0);
3937 		} else {
3938 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3939 			cmd.quotas[idx].max_duration = htole32(0);
3940 		}
3941 		idx++;
3942 	}
3943 
3944 	/* Give the remainder of the session to the first binding */
3945 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3946 
3947 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3948 	    sizeof(cmd), &cmd);
3949 	if (ret)
3950 		device_printf(sc->sc_dev,
3951 		    "%s: Failed to send quota: %d\n", __func__, ret);
3952 	return ret;
3953 }
3954 
3955 /*
3956  * END mvm/quota.c
3957  */
3958 
3959 /*
3960  * ieee80211 routines
3961  */
3962 
3963 /*
3964  * Change to AUTH state in 80211 state machine.  Roughly matches what
3965  * Linux does in bss_info_changed().
3966  */
3967 static int
3968 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3969 {
3970 	struct ieee80211_node *ni;
3971 	struct iwm_node *in;
3972 	struct iwm_vap *iv = IWM_VAP(vap);
3973 	uint32_t duration;
3974 	int error;
3975 
3976 	/*
3977 	 * XXX i have a feeling that the vap node is being
3978 	 * freed from underneath us. Grr.
3979 	 */
3980 	ni = ieee80211_ref_node(vap->iv_bss);
3981 	in = IWM_NODE(ni);
3982 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3983 	    "%s: called; vap=%p, bss ni=%p\n",
3984 	    __func__,
3985 	    vap,
3986 	    ni);
3987 
3988 	in->in_assoc = 0;
3989 
3990 	/*
3991 	 * Firmware bug - it'll crash if the beacon interval is less
3992 	 * than 16. We can't avoid connecting at all, so refuse the
3993 	 * station state change, this will cause net80211 to abandon
3994 	 * attempts to connect to this AP, and eventually wpa_s will
3995 	 * blacklist the AP...
3996 	 */
3997 	if (ni->ni_intval < 16) {
3998 		device_printf(sc->sc_dev,
3999 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4000 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4001 		error = EINVAL;
4002 		goto out;
4003 	}
4004 
4005 	error = iwm_allow_mcast(vap, sc);
4006 	if (error) {
4007 		device_printf(sc->sc_dev,
4008 		    "%s: failed to set multicast\n", __func__);
4009 		goto out;
4010 	}
4011 
4012 	/*
4013 	 * This is where it deviates from what Linux does.
4014 	 *
4015 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4016 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4017 	 * and always does a mac_ctx_changed().
4018 	 *
4019 	 * The openbsd port doesn't attempt to do that - it reset things
4020 	 * at odd states and does the add here.
4021 	 *
4022 	 * So, until the state handling is fixed (ie, we never reset
4023 	 * the NIC except for a firmware failure, which should drag
4024 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4025 	 * contexts that are required), let's do a dirty hack here.
4026 	 */
4027 	if (iv->is_uploaded) {
4028 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4029 			device_printf(sc->sc_dev,
4030 			    "%s: failed to update MAC\n", __func__);
4031 			goto out;
4032 		}
4033 	} else {
4034 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4035 			device_printf(sc->sc_dev,
4036 			    "%s: failed to add MAC\n", __func__);
4037 			goto out;
4038 		}
4039 	}
4040 
4041 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4042 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4043 		device_printf(sc->sc_dev,
4044 		    "%s: failed update phy ctxt\n", __func__);
4045 		goto out;
4046 	}
4047 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4048 
4049 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4050 		device_printf(sc->sc_dev,
4051 		    "%s: binding update cmd\n", __func__);
4052 		goto out;
4053 	}
4054 	/*
4055 	 * Authentication becomes unreliable when powersaving is left enabled
4056 	 * here. Powersaving will be activated again when association has
4057 	 * finished or is aborted.
4058 	 */
4059 	iv->ps_disabled = TRUE;
4060 	error = iwm_mvm_power_update_mac(sc);
4061 	iv->ps_disabled = FALSE;
4062 	if (error != 0) {
4063 		device_printf(sc->sc_dev,
4064 		    "%s: failed to update power management\n",
4065 		    __func__);
4066 		goto out;
4067 	}
4068 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4069 		device_printf(sc->sc_dev,
4070 		    "%s: failed to add sta\n", __func__);
4071 		goto out;
4072 	}
4073 
4074 	/*
4075 	 * Prevent the FW from wandering off channel during association
4076 	 * by "protecting" the session with a time event.
4077 	 */
4078 	/* XXX duration is in units of TU, not MS */
4079 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4080 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4081 	DELAY(100);
4082 
4083 	error = 0;
4084 out:
4085 	ieee80211_free_node(ni);
4086 	return (error);
4087 }
4088 
4089 static int
4090 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4091 {
4092 	uint32_t tfd_msk;
4093 
4094 	/*
4095 	 * Ok, so *technically* the proper set of calls for going
4096 	 * from RUN back to SCAN is:
4097 	 *
4098 	 * iwm_mvm_power_mac_disable(sc, in);
4099 	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4100 	 * iwm_mvm_rm_sta(sc, in);
4101 	 * iwm_mvm_update_quotas(sc, NULL);
4102 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4103 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4104 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4105 	 *
4106 	 * However, that freezes the device not matter which permutations
4107 	 * and modifications are attempted.  Obviously, this driver is missing
4108 	 * something since it works in the Linux driver, but figuring out what
4109 	 * is missing is a little more complicated.  Now, since we're going
4110 	 * back to nothing anyway, we'll just do a complete device reset.
4111 	 * Up your's, device!
4112 	 */
4113 	/*
4114 	 * Just using 0xf for the queues mask is fine as long as we only
4115 	 * get here from RUN state.
4116 	 */
4117 	tfd_msk = 0xf;
4118 	iwm_xmit_queue_drain(sc);
4119 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4120 	/*
4121 	 * We seem to get away with just synchronously sending the
4122 	 * IWM_TXPATH_FLUSH command.
4123 	 */
4124 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4125 	iwm_stop_device(sc);
4126 	iwm_init_hw(sc);
4127 	if (in)
4128 		in->in_assoc = 0;
4129 	return 0;
4130 
4131 #if 0
4132 	int error;
4133 
4134 	iwm_mvm_power_mac_disable(sc, in);
4135 
4136 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4137 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4138 		return error;
4139 	}
4140 
4141 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4142 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4143 		return error;
4144 	}
4145 	error = iwm_mvm_rm_sta(sc, in);
4146 	in->in_assoc = 0;
4147 	iwm_mvm_update_quotas(sc, NULL);
4148 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4149 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4150 		return error;
4151 	}
4152 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4153 
4154 	iwm_mvm_mac_ctxt_remove(sc, in);
4155 
4156 	return error;
4157 #endif
4158 }
4159 
4160 static struct ieee80211_node *
4161 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4162 {
4163 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4164 	    M_NOWAIT | M_ZERO);
4165 }
4166 
4167 uint8_t
4168 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4169 {
4170 	int i;
4171 	uint8_t rval;
4172 
4173 	for (i = 0; i < rs->rs_nrates; i++) {
4174 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4175 		if (rval == iwm_rates[ridx].rate)
4176 			return rs->rs_rates[i];
4177 	}
4178 
4179 	return 0;
4180 }
4181 
4182 static void
4183 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4184 {
4185 	struct ieee80211_node *ni = &in->in_ni;
4186 	struct iwm_lq_cmd *lq = &in->in_lq;
4187 	int nrates = ni->ni_rates.rs_nrates;
4188 	int i, ridx, tab = 0;
4189 //	int txant = 0;
4190 
4191 	if (nrates > nitems(lq->rs_table)) {
4192 		device_printf(sc->sc_dev,
4193 		    "%s: node supports %d rates, driver handles "
4194 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4195 		return;
4196 	}
4197 	if (nrates == 0) {
4198 		device_printf(sc->sc_dev,
4199 		    "%s: node supports 0 rates, odd!\n", __func__);
4200 		return;
4201 	}
4202 
4203 	/*
4204 	 * XXX .. and most of iwm_node is not initialised explicitly;
4205 	 * it's all just 0x0 passed to the firmware.
4206 	 */
4207 
4208 	/* first figure out which rates we should support */
4209 	/* XXX TODO: this isn't 11n aware /at all/ */
4210 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4211 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4212 	    "%s: nrates=%d\n", __func__, nrates);
4213 
4214 	/*
4215 	 * Loop over nrates and populate in_ridx from the highest
4216 	 * rate to the lowest rate.  Remember, in_ridx[] has
4217 	 * IEEE80211_RATE_MAXSIZE entries!
4218 	 */
4219 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4220 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4221 
4222 		/* Map 802.11 rate to HW rate index. */
4223 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4224 			if (iwm_rates[ridx].rate == rate)
4225 				break;
4226 		if (ridx > IWM_RIDX_MAX) {
4227 			device_printf(sc->sc_dev,
4228 			    "%s: WARNING: device rate for %d not found!\n",
4229 			    __func__, rate);
4230 		} else {
4231 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4232 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4233 			    __func__,
4234 			    i,
4235 			    rate,
4236 			    ridx);
4237 			in->in_ridx[i] = ridx;
4238 		}
4239 	}
4240 
4241 	/* then construct a lq_cmd based on those */
4242 	memset(lq, 0, sizeof(*lq));
4243 	lq->sta_id = IWM_STATION_ID;
4244 
4245 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4246 	if (ni->ni_flags & IEEE80211_NODE_HT)
4247 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4248 
4249 	/*
4250 	 * are these used? (we don't do SISO or MIMO)
4251 	 * need to set them to non-zero, though, or we get an error.
4252 	 */
4253 	lq->single_stream_ant_msk = 1;
4254 	lq->dual_stream_ant_msk = 1;
4255 
4256 	/*
4257 	 * Build the actual rate selection table.
4258 	 * The lowest bits are the rates.  Additionally,
4259 	 * CCK needs bit 9 to be set.  The rest of the bits
4260 	 * we add to the table select the tx antenna
4261 	 * Note that we add the rates in the highest rate first
4262 	 * (opposite of ni_rates).
4263 	 */
4264 	/*
4265 	 * XXX TODO: this should be looping over the min of nrates
4266 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4267 	 */
4268 	for (i = 0; i < nrates; i++) {
4269 		int nextant;
4270 
4271 #if 0
4272 		if (txant == 0)
4273 			txant = iwm_mvm_get_valid_tx_ant(sc);
4274 		nextant = 1<<(ffs(txant)-1);
4275 		txant &= ~nextant;
4276 #else
4277 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4278 #endif
4279 		/*
4280 		 * Map the rate id into a rate index into
4281 		 * our hardware table containing the
4282 		 * configuration to use for this rate.
4283 		 */
4284 		ridx = in->in_ridx[i];
4285 		tab = iwm_rates[ridx].plcp;
4286 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4287 		if (IWM_RIDX_IS_CCK(ridx))
4288 			tab |= IWM_RATE_MCS_CCK_MSK;
4289 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4290 		    "station rate i=%d, rate=%d, hw=%x\n",
4291 		    i, iwm_rates[ridx].rate, tab);
4292 		lq->rs_table[i] = htole32(tab);
4293 	}
4294 	/* then fill the rest with the lowest possible rate */
4295 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4296 		KASSERT(tab != 0, ("invalid tab"));
4297 		lq->rs_table[i] = htole32(tab);
4298 	}
4299 }
4300 
4301 static int
4302 iwm_media_change(struct ifnet *ifp)
4303 {
4304 	struct ieee80211vap *vap = ifp->if_softc;
4305 	struct ieee80211com *ic = vap->iv_ic;
4306 	struct iwm_softc *sc = ic->ic_softc;
4307 	int error;
4308 
4309 	error = ieee80211_media_change(ifp);
4310 	if (error != ENETRESET)
4311 		return error;
4312 
4313 	IWM_LOCK(sc);
4314 	if (ic->ic_nrunning > 0) {
4315 		iwm_stop(sc);
4316 		iwm_init(sc);
4317 	}
4318 	IWM_UNLOCK(sc);
4319 	return error;
4320 }
4321 
4322 
4323 static int
4324 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4325 {
4326 	struct iwm_vap *ivp = IWM_VAP(vap);
4327 	struct ieee80211com *ic = vap->iv_ic;
4328 	struct iwm_softc *sc = ic->ic_softc;
4329 	struct iwm_node *in;
4330 	int error;
4331 
4332 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4333 	    "switching state %s -> %s\n",
4334 	    ieee80211_state_name[vap->iv_state],
4335 	    ieee80211_state_name[nstate]);
4336 	IEEE80211_UNLOCK(ic);
4337 	IWM_LOCK(sc);
4338 
4339 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4340 		iwm_led_blink_stop(sc);
4341 
4342 	/* disable beacon filtering if we're hopping out of RUN */
4343 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4344 		iwm_mvm_disable_beacon_filter(sc);
4345 
4346 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4347 			in->in_assoc = 0;
4348 
4349 		if (nstate == IEEE80211_S_INIT) {
4350 			IWM_UNLOCK(sc);
4351 			IEEE80211_LOCK(ic);
4352 			error = ivp->iv_newstate(vap, nstate, arg);
4353 			IEEE80211_UNLOCK(ic);
4354 			IWM_LOCK(sc);
4355 			iwm_release(sc, NULL);
4356 			IWM_UNLOCK(sc);
4357 			IEEE80211_LOCK(ic);
4358 			return error;
4359 		}
4360 
4361 		/*
4362 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4363 		 * above then the card will be completely reinitialized,
4364 		 * so the driver must do everything necessary to bring the card
4365 		 * from INIT to SCAN.
4366 		 *
4367 		 * Additionally, upon receiving deauth frame from AP,
4368 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4369 		 * state. This will also fail with this driver, so bring the FSM
4370 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4371 		 *
4372 		 * XXX TODO: fix this for FreeBSD!
4373 		 */
4374 		if (nstate == IEEE80211_S_SCAN ||
4375 		    nstate == IEEE80211_S_AUTH ||
4376 		    nstate == IEEE80211_S_ASSOC) {
4377 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4378 			    "Force transition to INIT; MGT=%d\n", arg);
4379 			IWM_UNLOCK(sc);
4380 			IEEE80211_LOCK(ic);
4381 			/* Always pass arg as -1 since we can't Tx right now. */
4382 			/*
4383 			 * XXX arg is just ignored anyway when transitioning
4384 			 *     to IEEE80211_S_INIT.
4385 			 */
4386 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4387 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4388 			    "Going INIT->SCAN\n");
4389 			nstate = IEEE80211_S_SCAN;
4390 			IEEE80211_UNLOCK(ic);
4391 			IWM_LOCK(sc);
4392 		}
4393 	}
4394 
4395 	switch (nstate) {
4396 	case IEEE80211_S_INIT:
4397 	case IEEE80211_S_SCAN:
4398 		if (vap->iv_state == IEEE80211_S_AUTH ||
4399 		    vap->iv_state == IEEE80211_S_ASSOC) {
4400 			int myerr;
4401 			IWM_UNLOCK(sc);
4402 			IEEE80211_LOCK(ic);
4403 			myerr = ivp->iv_newstate(vap, nstate, arg);
4404 			IEEE80211_UNLOCK(ic);
4405 			IWM_LOCK(sc);
4406 			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4407                         if (error) {
4408                                 device_printf(sc->sc_dev,
4409 				    "%s: Failed to remove station: %d\n",
4410 				    __func__, error);
4411 			}
4412 			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4413                         if (error) {
4414                                 device_printf(sc->sc_dev,
4415                                     "%s: Failed to change mac context: %d\n",
4416                                     __func__, error);
4417                         }
4418                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4419                         if (error) {
4420                                 device_printf(sc->sc_dev,
4421                                     "%s: Failed to remove channel ctx: %d\n",
4422                                     __func__, error);
4423                         }
4424 			ivp->phy_ctxt = NULL;
4425 			error = iwm_mvm_power_update_mac(sc);
4426 			if (error != 0) {
4427 				device_printf(sc->sc_dev,
4428 				    "%s: failed to update power management\n",
4429 				    __func__);
4430 			}
4431 			IWM_UNLOCK(sc);
4432 			IEEE80211_LOCK(ic);
4433 			return myerr;
4434 		}
4435 		break;
4436 
4437 	case IEEE80211_S_AUTH:
4438 		if ((error = iwm_auth(vap, sc)) != 0) {
4439 			device_printf(sc->sc_dev,
4440 			    "%s: could not move to auth state: %d\n",
4441 			    __func__, error);
4442 		}
4443 		break;
4444 
4445 	case IEEE80211_S_ASSOC:
4446 		/*
4447 		 * EBS may be disabled due to previous failures reported by FW.
4448 		 * Reset EBS status here assuming environment has been changed.
4449 		 */
4450 		sc->last_ebs_successful = TRUE;
4451 		break;
4452 
4453 	case IEEE80211_S_RUN:
4454 		in = IWM_NODE(vap->iv_bss);
4455 		/* Update the association state, now we have it all */
4456 		/* (eg associd comes in at this point */
4457 		error = iwm_mvm_update_sta(sc, in);
4458 		if (error != 0) {
4459 			device_printf(sc->sc_dev,
4460 			    "%s: failed to update STA\n", __func__);
4461 			IWM_UNLOCK(sc);
4462 			IEEE80211_LOCK(ic);
4463 			return error;
4464 		}
4465 		in->in_assoc = 1;
4466 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4467 		if (error != 0) {
4468 			device_printf(sc->sc_dev,
4469 			    "%s: failed to update MAC: %d\n", __func__, error);
4470 		}
4471 
4472 		iwm_mvm_sf_update(sc, vap, FALSE);
4473 		iwm_mvm_enable_beacon_filter(sc, ivp);
4474 		iwm_mvm_power_update_mac(sc);
4475 		iwm_mvm_update_quotas(sc, ivp);
4476 		iwm_setrates(sc, in);
4477 
4478 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4479 			device_printf(sc->sc_dev,
4480 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4481 		}
4482 
4483 		iwm_mvm_led_enable(sc);
4484 		break;
4485 
4486 	default:
4487 		break;
4488 	}
4489 	IWM_UNLOCK(sc);
4490 	IEEE80211_LOCK(ic);
4491 
4492 	return (ivp->iv_newstate(vap, nstate, arg));
4493 }
4494 
4495 void
4496 iwm_endscan_cb(void *arg, int pending)
4497 {
4498 	struct iwm_softc *sc = arg;
4499 	struct ieee80211com *ic = &sc->sc_ic;
4500 
4501 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4502 	    "%s: scan ended\n",
4503 	    __func__);
4504 
4505 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4506 }
4507 
4508 static int
4509 iwm_send_bt_init_conf(struct iwm_softc *sc)
4510 {
4511 	struct iwm_bt_coex_cmd bt_cmd;
4512 
4513 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4514 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4515 
4516 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4517 	    &bt_cmd);
4518 }
4519 
4520 static boolean_t
4521 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4522 {
4523 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4524 	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4525 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4526 
4527 	if (iwm_lar_disable)
4528 		return FALSE;
4529 
4530 	/*
4531 	 * Enable LAR only if it is supported by the FW (TLV) &&
4532 	 * enabled in the NVM
4533 	 */
4534 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4535 		return nvm_lar && tlv_lar;
4536 	else
4537 		return tlv_lar;
4538 }
4539 
4540 static boolean_t
4541 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4542 {
4543 	return fw_has_api(&sc->ucode_capa,
4544 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4545 	       fw_has_capa(&sc->ucode_capa,
4546 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4547 }
4548 
4549 static int
4550 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4551 {
4552 	struct iwm_mcc_update_cmd mcc_cmd;
4553 	struct iwm_host_cmd hcmd = {
4554 		.id = IWM_MCC_UPDATE_CMD,
4555 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4556 		.data = { &mcc_cmd },
4557 	};
4558 	int ret;
4559 #ifdef IWM_DEBUG
4560 	struct iwm_rx_packet *pkt;
4561 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4562 	struct iwm_mcc_update_resp *mcc_resp;
4563 	int n_channels;
4564 	uint16_t mcc;
4565 #endif
4566 	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4567 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4568 
4569 	if (!iwm_mvm_is_lar_supported(sc)) {
4570 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4571 		    __func__);
4572 		return 0;
4573 	}
4574 
4575 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4576 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4577 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4578 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4579 	else
4580 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4581 
4582 	if (resp_v2)
4583 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4584 	else
4585 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4586 
4587 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4588 	    "send MCC update to FW with '%c%c' src = %d\n",
4589 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4590 
4591 	ret = iwm_send_cmd(sc, &hcmd);
4592 	if (ret)
4593 		return ret;
4594 
4595 #ifdef IWM_DEBUG
4596 	pkt = hcmd.resp_pkt;
4597 
4598 	/* Extract MCC response */
4599 	if (resp_v2) {
4600 		mcc_resp = (void *)pkt->data;
4601 		mcc = mcc_resp->mcc;
4602 		n_channels =  le32toh(mcc_resp->n_channels);
4603 	} else {
4604 		mcc_resp_v1 = (void *)pkt->data;
4605 		mcc = mcc_resp_v1->mcc;
4606 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4607 	}
4608 
4609 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4610 	if (mcc == 0)
4611 		mcc = 0x3030;  /* "00" - world */
4612 
4613 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4614 	    "regulatory domain '%c%c' (%d channels available)\n",
4615 	    mcc >> 8, mcc & 0xff, n_channels);
4616 #endif
4617 	iwm_free_resp(sc, &hcmd);
4618 
4619 	return 0;
4620 }
4621 
4622 static void
4623 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4624 {
4625 	struct iwm_host_cmd cmd = {
4626 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4627 		.len = { sizeof(uint32_t), },
4628 		.data = { &backoff, },
4629 	};
4630 
4631 	if (iwm_send_cmd(sc, &cmd) != 0) {
4632 		device_printf(sc->sc_dev,
4633 		    "failed to change thermal tx backoff\n");
4634 	}
4635 }
4636 
4637 static int
4638 iwm_init_hw(struct iwm_softc *sc)
4639 {
4640 	struct ieee80211com *ic = &sc->sc_ic;
4641 	int error, i, ac;
4642 
4643 	sc->sf_state = IWM_SF_UNINIT;
4644 
4645 	if ((error = iwm_start_hw(sc)) != 0) {
4646 		printf("iwm_start_hw: failed %d\n", error);
4647 		return error;
4648 	}
4649 
4650 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4651 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4652 		return error;
4653 	}
4654 
4655 	/*
4656 	 * should stop and start HW since that INIT
4657 	 * image just loaded
4658 	 */
4659 	iwm_stop_device(sc);
4660 	sc->sc_ps_disabled = FALSE;
4661 	if ((error = iwm_start_hw(sc)) != 0) {
4662 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4663 		return error;
4664 	}
4665 
4666 	/* omstart, this time with the regular firmware */
4667 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4668 	if (error) {
4669 		device_printf(sc->sc_dev, "could not load firmware\n");
4670 		goto error;
4671 	}
4672 
4673 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4674 	if (error)
4675 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4676 
4677 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4678 		device_printf(sc->sc_dev, "bt init conf failed\n");
4679 		goto error;
4680 	}
4681 
4682 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4683 	if (error != 0) {
4684 		device_printf(sc->sc_dev, "antenna config failed\n");
4685 		goto error;
4686 	}
4687 
4688 	/* Send phy db control command and then phy db calibration */
4689 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4690 		goto error;
4691 
4692 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4693 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4694 		goto error;
4695 	}
4696 
4697 	/* Add auxiliary station for scanning */
4698 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4699 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4700 		goto error;
4701 	}
4702 
4703 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4704 		/*
4705 		 * The channel used here isn't relevant as it's
4706 		 * going to be overwritten in the other flows.
4707 		 * For now use the first channel we have.
4708 		 */
4709 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4710 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4711 			goto error;
4712 	}
4713 
4714 	/* Initialize tx backoffs to the minimum. */
4715 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4716 		iwm_mvm_tt_tx_backoff(sc, 0);
4717 
4718 	error = iwm_mvm_power_update_device(sc);
4719 	if (error)
4720 		goto error;
4721 
4722 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4723 		goto error;
4724 
4725 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4726 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4727 			goto error;
4728 	}
4729 
4730 	/* Enable Tx queues. */
4731 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4732 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4733 		    iwm_mvm_ac_to_tx_fifo[ac]);
4734 		if (error)
4735 			goto error;
4736 	}
4737 
4738 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4739 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4740 		goto error;
4741 	}
4742 
4743 	return 0;
4744 
4745  error:
4746 	iwm_stop_device(sc);
4747 	return error;
4748 }
4749 
4750 /* Allow multicast from our BSSID. */
4751 static int
4752 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4753 {
4754 	struct ieee80211_node *ni = vap->iv_bss;
4755 	struct iwm_mcast_filter_cmd *cmd;
4756 	size_t size;
4757 	int error;
4758 
4759 	size = roundup(sizeof(*cmd), 4);
4760 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4761 	if (cmd == NULL)
4762 		return ENOMEM;
4763 	cmd->filter_own = 1;
4764 	cmd->port_id = 0;
4765 	cmd->count = 0;
4766 	cmd->pass_all = 1;
4767 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4768 
4769 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4770 	    IWM_CMD_SYNC, size, cmd);
4771 	free(cmd, M_DEVBUF);
4772 
4773 	return (error);
4774 }
4775 
4776 /*
4777  * ifnet interfaces
4778  */
4779 
4780 static void
4781 iwm_init(struct iwm_softc *sc)
4782 {
4783 	int error;
4784 
4785 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4786 		return;
4787 	}
4788 	sc->sc_generation++;
4789 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4790 
4791 	if ((error = iwm_init_hw(sc)) != 0) {
4792 		printf("iwm_init_hw failed %d\n", error);
4793 		iwm_stop(sc);
4794 		return;
4795 	}
4796 
4797 	/*
4798 	 * Ok, firmware loaded and we are jogging
4799 	 */
4800 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4801 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4802 }
4803 
4804 static int
4805 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4806 {
4807 	struct iwm_softc *sc;
4808 	int error;
4809 
4810 	sc = ic->ic_softc;
4811 
4812 	IWM_LOCK(sc);
4813 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4814 		IWM_UNLOCK(sc);
4815 		return (ENXIO);
4816 	}
4817 	error = mbufq_enqueue(&sc->sc_snd, m);
4818 	if (error) {
4819 		IWM_UNLOCK(sc);
4820 		return (error);
4821 	}
4822 	iwm_start(sc);
4823 	IWM_UNLOCK(sc);
4824 	return (0);
4825 }
4826 
4827 /*
4828  * Dequeue packets from sendq and call send.
4829  */
4830 static void
4831 iwm_start(struct iwm_softc *sc)
4832 {
4833 	struct ieee80211_node *ni;
4834 	struct mbuf *m;
4835 	int ac = 0;
4836 
4837 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4838 	while (sc->qfullmsk == 0 &&
4839 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4840 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4841 		if (iwm_tx(sc, m, ni, ac) != 0) {
4842 			if_inc_counter(ni->ni_vap->iv_ifp,
4843 			    IFCOUNTER_OERRORS, 1);
4844 			ieee80211_free_node(ni);
4845 			continue;
4846 		}
4847 		sc->sc_tx_timer = 15;
4848 	}
4849 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4850 }
4851 
4852 static void
4853 iwm_stop(struct iwm_softc *sc)
4854 {
4855 
4856 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4857 	sc->sc_flags |= IWM_FLAG_STOPPED;
4858 	sc->sc_generation++;
4859 	iwm_led_blink_stop(sc);
4860 	sc->sc_tx_timer = 0;
4861 	iwm_stop_device(sc);
4862 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4863 }
4864 
4865 static void
4866 iwm_watchdog(void *arg)
4867 {
4868 	struct iwm_softc *sc = arg;
4869 	struct ieee80211com *ic = &sc->sc_ic;
4870 
4871 	if (sc->sc_tx_timer > 0) {
4872 		if (--sc->sc_tx_timer == 0) {
4873 			device_printf(sc->sc_dev, "device timeout\n");
4874 #ifdef IWM_DEBUG
4875 			iwm_nic_error(sc);
4876 #endif
4877 			ieee80211_restart_all(ic);
4878 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4879 			return;
4880 		}
4881 	}
4882 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4883 }
4884 
4885 static void
4886 iwm_parent(struct ieee80211com *ic)
4887 {
4888 	struct iwm_softc *sc = ic->ic_softc;
4889 	int startall = 0;
4890 
4891 	IWM_LOCK(sc);
4892 	if (ic->ic_nrunning > 0) {
4893 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4894 			iwm_init(sc);
4895 			startall = 1;
4896 		}
4897 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4898 		iwm_stop(sc);
4899 	IWM_UNLOCK(sc);
4900 	if (startall)
4901 		ieee80211_start_all(ic);
4902 }
4903 
4904 /*
4905  * The interrupt side of things
4906  */
4907 
4908 /*
4909  * error dumping routines are from iwlwifi/mvm/utils.c
4910  */
4911 
4912 /*
4913  * Note: This structure is read from the device with IO accesses,
4914  * and the reading already does the endian conversion. As it is
4915  * read with uint32_t-sized accesses, any members with a different size
4916  * need to be ordered correctly though!
4917  */
4918 struct iwm_error_event_table {
4919 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4920 	uint32_t error_id;		/* type of error */
4921 	uint32_t trm_hw_status0;	/* TRM HW status */
4922 	uint32_t trm_hw_status1;	/* TRM HW status */
4923 	uint32_t blink2;		/* branch link */
4924 	uint32_t ilink1;		/* interrupt link */
4925 	uint32_t ilink2;		/* interrupt link */
4926 	uint32_t data1;		/* error-specific data */
4927 	uint32_t data2;		/* error-specific data */
4928 	uint32_t data3;		/* error-specific data */
4929 	uint32_t bcon_time;		/* beacon timer */
4930 	uint32_t tsf_low;		/* network timestamp function timer */
4931 	uint32_t tsf_hi;		/* network timestamp function timer */
4932 	uint32_t gp1;		/* GP1 timer register */
4933 	uint32_t gp2;		/* GP2 timer register */
4934 	uint32_t fw_rev_type;	/* firmware revision type */
4935 	uint32_t major;		/* uCode version major */
4936 	uint32_t minor;		/* uCode version minor */
4937 	uint32_t hw_ver;		/* HW Silicon version */
4938 	uint32_t brd_ver;		/* HW board version */
4939 	uint32_t log_pc;		/* log program counter */
4940 	uint32_t frame_ptr;		/* frame pointer */
4941 	uint32_t stack_ptr;		/* stack pointer */
4942 	uint32_t hcmd;		/* last host command header */
4943 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4944 				 * rxtx_flag */
4945 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4946 				 * host_flag */
4947 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4948 				 * enc_flag */
4949 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4950 				 * time_flag */
4951 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4952 				 * wico interrupt */
4953 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4954 	uint32_t wait_event;		/* wait event() caller address */
4955 	uint32_t l2p_control;	/* L2pControlField */
4956 	uint32_t l2p_duration;	/* L2pDurationField */
4957 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4958 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4959 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4960 				 * (LMPM_PMG_SEL) */
4961 	uint32_t u_timestamp;	/* indicate when the date and time of the
4962 				 * compilation */
4963 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4964 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4965 
4966 /*
4967  * UMAC error struct - relevant starting from family 8000 chip.
4968  * Note: This structure is read from the device with IO accesses,
4969  * and the reading already does the endian conversion. As it is
4970  * read with u32-sized accesses, any members with a different size
4971  * need to be ordered correctly though!
4972  */
4973 struct iwm_umac_error_event_table {
4974 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4975 	uint32_t error_id;	/* type of error */
4976 	uint32_t blink1;	/* branch link */
4977 	uint32_t blink2;	/* branch link */
4978 	uint32_t ilink1;	/* interrupt link */
4979 	uint32_t ilink2;	/* interrupt link */
4980 	uint32_t data1;		/* error-specific data */
4981 	uint32_t data2;		/* error-specific data */
4982 	uint32_t data3;		/* error-specific data */
4983 	uint32_t umac_major;
4984 	uint32_t umac_minor;
4985 	uint32_t frame_pointer;	/* core register 27*/
4986 	uint32_t stack_pointer;	/* core register 28 */
4987 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4988 	uint32_t nic_isr_pref;	/* ISR status register */
4989 } __packed;
4990 
4991 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4992 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4993 
4994 #ifdef IWM_DEBUG
4995 struct {
4996 	const char *name;
4997 	uint8_t num;
4998 } advanced_lookup[] = {
4999 	{ "NMI_INTERRUPT_WDG", 0x34 },
5000 	{ "SYSASSERT", 0x35 },
5001 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5002 	{ "BAD_COMMAND", 0x38 },
5003 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5004 	{ "FATAL_ERROR", 0x3D },
5005 	{ "NMI_TRM_HW_ERR", 0x46 },
5006 	{ "NMI_INTERRUPT_TRM", 0x4C },
5007 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5008 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5009 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5010 	{ "NMI_INTERRUPT_HOST", 0x66 },
5011 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5012 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5013 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5014 	{ "ADVANCED_SYSASSERT", 0 },
5015 };
5016 
5017 static const char *
5018 iwm_desc_lookup(uint32_t num)
5019 {
5020 	int i;
5021 
5022 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5023 		if (advanced_lookup[i].num == num)
5024 			return advanced_lookup[i].name;
5025 
5026 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5027 	return advanced_lookup[i].name;
5028 }
5029 
5030 static void
5031 iwm_nic_umac_error(struct iwm_softc *sc)
5032 {
5033 	struct iwm_umac_error_event_table table;
5034 	uint32_t base;
5035 
5036 	base = sc->umac_error_event_table;
5037 
5038 	if (base < 0x800000) {
5039 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5040 		    base);
5041 		return;
5042 	}
5043 
5044 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5045 		device_printf(sc->sc_dev, "reading errlog failed\n");
5046 		return;
5047 	}
5048 
5049 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5050 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5051 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5052 		    sc->sc_flags, table.valid);
5053 	}
5054 
5055 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5056 		iwm_desc_lookup(table.error_id));
5057 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5058 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5059 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5060 	    table.ilink1);
5061 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5062 	    table.ilink2);
5063 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5064 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5065 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5066 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5067 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5068 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5069 	    table.frame_pointer);
5070 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5071 	    table.stack_pointer);
5072 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5073 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5074 	    table.nic_isr_pref);
5075 }
5076 
5077 /*
5078  * Support for dumping the error log seemed like a good idea ...
5079  * but it's mostly hex junk and the only sensible thing is the
5080  * hw/ucode revision (which we know anyway).  Since it's here,
5081  * I'll just leave it in, just in case e.g. the Intel guys want to
5082  * help us decipher some "ADVANCED_SYSASSERT" later.
5083  */
5084 static void
5085 iwm_nic_error(struct iwm_softc *sc)
5086 {
5087 	struct iwm_error_event_table table;
5088 	uint32_t base;
5089 
5090 	device_printf(sc->sc_dev, "dumping device error log\n");
5091 	base = sc->error_event_table;
5092 	if (base < 0x800000) {
5093 		device_printf(sc->sc_dev,
5094 		    "Invalid error log pointer 0x%08x\n", base);
5095 		return;
5096 	}
5097 
5098 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5099 		device_printf(sc->sc_dev, "reading errlog failed\n");
5100 		return;
5101 	}
5102 
5103 	if (!table.valid) {
5104 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5105 		return;
5106 	}
5107 
5108 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5109 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5110 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5111 		    sc->sc_flags, table.valid);
5112 	}
5113 
5114 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5115 	    iwm_desc_lookup(table.error_id));
5116 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5117 	    table.trm_hw_status0);
5118 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5119 	    table.trm_hw_status1);
5120 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5121 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5122 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5123 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5124 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5125 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5126 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5127 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5128 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5129 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5130 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5131 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5132 	    table.fw_rev_type);
5133 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5134 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5135 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5136 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5137 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5138 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5139 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5140 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5141 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5142 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5143 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5144 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5145 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5146 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5147 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5148 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5149 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5150 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5151 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5152 
5153 	if (sc->umac_error_event_table)
5154 		iwm_nic_umac_error(sc);
5155 }
5156 #endif
5157 
5158 static void
5159 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5160 {
5161 	struct ieee80211com *ic = &sc->sc_ic;
5162 	struct iwm_cmd_response *cresp;
5163 	struct mbuf *m1;
5164 	uint32_t offset = 0;
5165 	uint32_t maxoff = IWM_RBUF_SIZE;
5166 	uint32_t nextoff;
5167 	boolean_t stolen = FALSE;
5168 
5169 #define HAVEROOM(a)	\
5170     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5171 
5172 	while (HAVEROOM(offset)) {
5173 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5174 		    offset);
5175 		int qid, idx, code, len;
5176 
5177 		qid = pkt->hdr.qid;
5178 		idx = pkt->hdr.idx;
5179 
5180 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5181 
5182 		/*
5183 		 * randomly get these from the firmware, no idea why.
5184 		 * they at least seem harmless, so just ignore them for now
5185 		 */
5186 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5187 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5188 			break;
5189 		}
5190 
5191 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5192 		    "rx packet qid=%d idx=%d type=%x\n",
5193 		    qid & ~0x80, pkt->hdr.idx, code);
5194 
5195 		len = iwm_rx_packet_len(pkt);
5196 		len += sizeof(uint32_t); /* account for status word */
5197 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5198 
5199 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5200 
5201 		switch (code) {
5202 		case IWM_REPLY_RX_PHY_CMD:
5203 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5204 			break;
5205 
5206 		case IWM_REPLY_RX_MPDU_CMD: {
5207 			/*
5208 			 * If this is the last frame in the RX buffer, we
5209 			 * can directly feed the mbuf to the sharks here.
5210 			 */
5211 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5212 			    struct iwm_rx_packet *, nextoff);
5213 			if (!HAVEROOM(nextoff) ||
5214 			    (nextpkt->hdr.code == 0 &&
5215 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5216 			     nextpkt->hdr.idx == 0) ||
5217 			    (nextpkt->len_n_flags ==
5218 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5219 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5220 					stolen = FALSE;
5221 					/* Make sure we abort the loop */
5222 					nextoff = maxoff;
5223 				}
5224 				break;
5225 			}
5226 
5227 			/*
5228 			 * Use m_copym instead of m_split, because that
5229 			 * makes it easier to keep a valid rx buffer in
5230 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5231 			 *
5232 			 * We need to start m_copym() at offset 0, to get the
5233 			 * M_PKTHDR flag preserved.
5234 			 */
5235 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5236 			if (m1) {
5237 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5238 					stolen = TRUE;
5239 				else
5240 					m_freem(m1);
5241 			}
5242 			break;
5243 		}
5244 
5245 		case IWM_TX_CMD:
5246 			iwm_mvm_rx_tx_cmd(sc, pkt);
5247 			break;
5248 
5249 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5250 			struct iwm_missed_beacons_notif *resp;
5251 			int missed;
5252 
5253 			/* XXX look at mac_id to determine interface ID */
5254 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5255 
5256 			resp = (void *)pkt->data;
5257 			missed = le32toh(resp->consec_missed_beacons);
5258 
5259 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5260 			    "%s: MISSED_BEACON: mac_id=%d, "
5261 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5262 			    "num_rx=%d\n",
5263 			    __func__,
5264 			    le32toh(resp->mac_id),
5265 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5266 			    le32toh(resp->consec_missed_beacons),
5267 			    le32toh(resp->num_expected_beacons),
5268 			    le32toh(resp->num_recvd_beacons));
5269 
5270 			/* Be paranoid */
5271 			if (vap == NULL)
5272 				break;
5273 
5274 			/* XXX no net80211 locking? */
5275 			if (vap->iv_state == IEEE80211_S_RUN &&
5276 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5277 				if (missed > vap->iv_bmissthreshold) {
5278 					/* XXX bad locking; turn into task */
5279 					IWM_UNLOCK(sc);
5280 					ieee80211_beacon_miss(ic);
5281 					IWM_LOCK(sc);
5282 				}
5283 			}
5284 
5285 			break;
5286 		}
5287 
5288 		case IWM_MFUART_LOAD_NOTIFICATION:
5289 			break;
5290 
5291 		case IWM_MVM_ALIVE:
5292 			break;
5293 
5294 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5295 			break;
5296 
5297 		case IWM_STATISTICS_NOTIFICATION:
5298 			iwm_mvm_handle_rx_statistics(sc, pkt);
5299 			break;
5300 
5301 		case IWM_NVM_ACCESS_CMD:
5302 		case IWM_MCC_UPDATE_CMD:
5303 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5304 				memcpy(sc->sc_cmd_resp,
5305 				    pkt, sizeof(sc->sc_cmd_resp));
5306 			}
5307 			break;
5308 
5309 		case IWM_MCC_CHUB_UPDATE_CMD: {
5310 			struct iwm_mcc_chub_notif *notif;
5311 			notif = (void *)pkt->data;
5312 
5313 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5314 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5315 			sc->sc_fw_mcc[2] = '\0';
5316 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5317 			    "fw source %d sent CC '%s'\n",
5318 			    notif->source_id, sc->sc_fw_mcc);
5319 			break;
5320 		}
5321 
5322 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5323 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5324 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5325 			struct iwm_dts_measurement_notif_v1 *notif;
5326 
5327 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5328 				device_printf(sc->sc_dev,
5329 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5330 				break;
5331 			}
5332 			notif = (void *)pkt->data;
5333 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5334 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5335 			    notif->temp);
5336 			break;
5337 		}
5338 
5339 		case IWM_PHY_CONFIGURATION_CMD:
5340 		case IWM_TX_ANT_CONFIGURATION_CMD:
5341 		case IWM_ADD_STA:
5342 		case IWM_MAC_CONTEXT_CMD:
5343 		case IWM_REPLY_SF_CFG_CMD:
5344 		case IWM_POWER_TABLE_CMD:
5345 		case IWM_PHY_CONTEXT_CMD:
5346 		case IWM_BINDING_CONTEXT_CMD:
5347 		case IWM_TIME_EVENT_CMD:
5348 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5349 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5350 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5351 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5352 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5353 		case IWM_REPLY_BEACON_FILTERING_CMD:
5354 		case IWM_MAC_PM_POWER_TABLE:
5355 		case IWM_TIME_QUOTA_CMD:
5356 		case IWM_REMOVE_STA:
5357 		case IWM_TXPATH_FLUSH:
5358 		case IWM_LQ_CMD:
5359 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5360 				 IWM_FW_PAGING_BLOCK_CMD):
5361 		case IWM_BT_CONFIG:
5362 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5363 			cresp = (void *)pkt->data;
5364 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5365 				memcpy(sc->sc_cmd_resp,
5366 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5367 			}
5368 			break;
5369 
5370 		/* ignore */
5371 		case IWM_PHY_DB_CMD:
5372 			break;
5373 
5374 		case IWM_INIT_COMPLETE_NOTIF:
5375 			break;
5376 
5377 		case IWM_SCAN_OFFLOAD_COMPLETE:
5378 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5379 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5380 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5381 				ieee80211_runtask(ic, &sc->sc_es_task);
5382 			}
5383 			break;
5384 
5385 		case IWM_SCAN_ITERATION_COMPLETE: {
5386 			struct iwm_lmac_scan_complete_notif *notif;
5387 			notif = (void *)pkt->data;
5388 			break;
5389 		}
5390 
5391 		case IWM_SCAN_COMPLETE_UMAC:
5392 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5393 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5394 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5395 				ieee80211_runtask(ic, &sc->sc_es_task);
5396 			}
5397 			break;
5398 
5399 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5400 			struct iwm_umac_scan_iter_complete_notif *notif;
5401 			notif = (void *)pkt->data;
5402 
5403 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5404 			    "complete, status=0x%x, %d channels scanned\n",
5405 			    notif->status, notif->scanned_channels);
5406 			break;
5407 		}
5408 
5409 		case IWM_REPLY_ERROR: {
5410 			struct iwm_error_resp *resp;
5411 			resp = (void *)pkt->data;
5412 
5413 			device_printf(sc->sc_dev,
5414 			    "firmware error 0x%x, cmd 0x%x\n",
5415 			    le32toh(resp->error_type),
5416 			    resp->cmd_id);
5417 			break;
5418 		}
5419 
5420 		case IWM_TIME_EVENT_NOTIFICATION: {
5421 			struct iwm_time_event_notif *notif;
5422 			notif = (void *)pkt->data;
5423 
5424 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5425 			    "TE notif status = 0x%x action = 0x%x\n",
5426 			    notif->status, notif->action);
5427 			break;
5428 		}
5429 
5430 		/*
5431 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5432 		 * messages. Just ignore them for now.
5433 		 */
5434 		case IWM_DEBUG_LOG_MSG:
5435 			break;
5436 
5437 		case IWM_MCAST_FILTER_CMD:
5438 			break;
5439 
5440 		case IWM_SCD_QUEUE_CFG: {
5441 			struct iwm_scd_txq_cfg_rsp *rsp;
5442 			rsp = (void *)pkt->data;
5443 
5444 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5445 			    "queue cfg token=0x%x sta_id=%d "
5446 			    "tid=%d scd_queue=%d\n",
5447 			    rsp->token, rsp->sta_id, rsp->tid,
5448 			    rsp->scd_queue);
5449 			break;
5450 		}
5451 
5452 		default:
5453 			device_printf(sc->sc_dev,
5454 			    "frame %d/%d %x UNHANDLED (this should "
5455 			    "not happen)\n", qid & ~0x80, idx,
5456 			    pkt->len_n_flags);
5457 			break;
5458 		}
5459 
5460 		/*
5461 		 * Why test bit 0x80?  The Linux driver:
5462 		 *
5463 		 * There is one exception:  uCode sets bit 15 when it
5464 		 * originates the response/notification, i.e. when the
5465 		 * response/notification is not a direct response to a
5466 		 * command sent by the driver.  For example, uCode issues
5467 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5468 		 * it is not a direct response to any driver command.
5469 		 *
5470 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5471 		 * uses a slightly different format for pkt->hdr, and "qid"
5472 		 * is actually the upper byte of a two-byte field.
5473 		 */
5474 		if (!(qid & (1 << 7)))
5475 			iwm_cmd_done(sc, pkt);
5476 
5477 		offset = nextoff;
5478 	}
5479 	if (stolen)
5480 		m_freem(m);
5481 #undef HAVEROOM
5482 }
5483 
5484 /*
5485  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5486  * Basic structure from if_iwn
5487  */
5488 static void
5489 iwm_notif_intr(struct iwm_softc *sc)
5490 {
5491 	uint16_t hw;
5492 
5493 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5494 	    BUS_DMASYNC_POSTREAD);
5495 
5496 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5497 
5498 	/*
5499 	 * Process responses
5500 	 */
5501 	while (sc->rxq.cur != hw) {
5502 		struct iwm_rx_ring *ring = &sc->rxq;
5503 		struct iwm_rx_data *data = &ring->data[ring->cur];
5504 
5505 		bus_dmamap_sync(ring->data_dmat, data->map,
5506 		    BUS_DMASYNC_POSTREAD);
5507 
5508 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5509 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5510 		iwm_handle_rxb(sc, data->m);
5511 
5512 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5513 	}
5514 
5515 	/*
5516 	 * Tell the firmware that it can reuse the ring entries that
5517 	 * we have just processed.
5518 	 * Seems like the hardware gets upset unless we align
5519 	 * the write by 8??
5520 	 */
5521 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5522 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5523 }
5524 
5525 static void
5526 iwm_intr(void *arg)
5527 {
5528 	struct iwm_softc *sc = arg;
5529 	int handled = 0;
5530 	int r1, r2, rv = 0;
5531 	int isperiodic = 0;
5532 
5533 	IWM_LOCK(sc);
5534 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5535 
5536 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5537 		uint32_t *ict = sc->ict_dma.vaddr;
5538 		int tmp;
5539 
5540 		tmp = htole32(ict[sc->ict_cur]);
5541 		if (!tmp)
5542 			goto out_ena;
5543 
5544 		/*
5545 		 * ok, there was something.  keep plowing until we have all.
5546 		 */
5547 		r1 = r2 = 0;
5548 		while (tmp) {
5549 			r1 |= tmp;
5550 			ict[sc->ict_cur] = 0;
5551 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5552 			tmp = htole32(ict[sc->ict_cur]);
5553 		}
5554 
5555 		/* this is where the fun begins.  don't ask */
5556 		if (r1 == 0xffffffff)
5557 			r1 = 0;
5558 
5559 		/* i am not expected to understand this */
5560 		if (r1 & 0xc0000)
5561 			r1 |= 0x8000;
5562 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5563 	} else {
5564 		r1 = IWM_READ(sc, IWM_CSR_INT);
5565 		/* "hardware gone" (where, fishing?) */
5566 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5567 			goto out;
5568 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5569 	}
5570 	if (r1 == 0 && r2 == 0) {
5571 		goto out_ena;
5572 	}
5573 
5574 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5575 
5576 	/* Safely ignore these bits for debug checks below */
5577 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5578 
5579 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5580 		int i;
5581 		struct ieee80211com *ic = &sc->sc_ic;
5582 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5583 
5584 #ifdef IWM_DEBUG
5585 		iwm_nic_error(sc);
5586 #endif
5587 		/* Dump driver status (TX and RX rings) while we're here. */
5588 		device_printf(sc->sc_dev, "driver status:\n");
5589 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5590 			struct iwm_tx_ring *ring = &sc->txq[i];
5591 			device_printf(sc->sc_dev,
5592 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5593 			    "queued=%-3d\n",
5594 			    i, ring->qid, ring->cur, ring->queued);
5595 		}
5596 		device_printf(sc->sc_dev,
5597 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5598 		device_printf(sc->sc_dev,
5599 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5600 
5601 		/* Don't stop the device; just do a VAP restart */
5602 		IWM_UNLOCK(sc);
5603 
5604 		if (vap == NULL) {
5605 			printf("%s: null vap\n", __func__);
5606 			return;
5607 		}
5608 
5609 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5610 		    "restarting\n", __func__, vap->iv_state);
5611 
5612 		ieee80211_restart_all(ic);
5613 		return;
5614 	}
5615 
5616 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5617 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5618 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5619 		iwm_stop(sc);
5620 		rv = 1;
5621 		goto out;
5622 	}
5623 
5624 	/* firmware chunk loaded */
5625 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5626 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5627 		handled |= IWM_CSR_INT_BIT_FH_TX;
5628 		sc->sc_fw_chunk_done = 1;
5629 		wakeup(&sc->sc_fw);
5630 	}
5631 
5632 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5633 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5634 		if (iwm_check_rfkill(sc)) {
5635 			device_printf(sc->sc_dev,
5636 			    "%s: rfkill switch, disabling interface\n",
5637 			    __func__);
5638 			iwm_stop(sc);
5639 		}
5640 	}
5641 
5642 	/*
5643 	 * The Linux driver uses periodic interrupts to avoid races.
5644 	 * We cargo-cult like it's going out of fashion.
5645 	 */
5646 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5647 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5648 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5649 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5650 			IWM_WRITE_1(sc,
5651 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5652 		isperiodic = 1;
5653 	}
5654 
5655 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5656 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5657 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5658 
5659 		iwm_notif_intr(sc);
5660 
5661 		/* enable periodic interrupt, see above */
5662 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5663 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5664 			    IWM_CSR_INT_PERIODIC_ENA);
5665 	}
5666 
5667 	if (__predict_false(r1 & ~handled))
5668 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5669 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5670 	rv = 1;
5671 
5672  out_ena:
5673 	iwm_restore_interrupts(sc);
5674  out:
5675 	IWM_UNLOCK(sc);
5676 	return;
5677 }
5678 
5679 /*
5680  * Autoconf glue-sniffing
5681  */
5682 #define	PCI_VENDOR_INTEL		0x8086
5683 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5684 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5685 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5686 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5687 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5688 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5689 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5690 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5691 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5692 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5693 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5694 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5695 
5696 static const struct iwm_devices {
5697 	uint16_t		device;
5698 	const struct iwm_cfg	*cfg;
5699 } iwm_devices[] = {
5700 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5701 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5702 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5703 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5704 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5705 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5706 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5707 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5708 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5709 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5710 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5711 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5712 };
5713 
5714 static int
5715 iwm_probe(device_t dev)
5716 {
5717 	int i;
5718 
5719 	for (i = 0; i < nitems(iwm_devices); i++) {
5720 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5721 		    pci_get_device(dev) == iwm_devices[i].device) {
5722 			device_set_desc(dev, iwm_devices[i].cfg->name);
5723 			return (BUS_PROBE_DEFAULT);
5724 		}
5725 	}
5726 
5727 	return (ENXIO);
5728 }
5729 
5730 static int
5731 iwm_dev_check(device_t dev)
5732 {
5733 	struct iwm_softc *sc;
5734 	uint16_t devid;
5735 	int i;
5736 
5737 	sc = device_get_softc(dev);
5738 
5739 	devid = pci_get_device(dev);
5740 	for (i = 0; i < nitems(iwm_devices); i++) {
5741 		if (iwm_devices[i].device == devid) {
5742 			sc->cfg = iwm_devices[i].cfg;
5743 			return (0);
5744 		}
5745 	}
5746 	device_printf(dev, "unknown adapter type\n");
5747 	return ENXIO;
5748 }
5749 
5750 /* PCI registers */
5751 #define PCI_CFG_RETRY_TIMEOUT	0x041
5752 
5753 static int
5754 iwm_pci_attach(device_t dev)
5755 {
5756 	struct iwm_softc *sc;
5757 	int count, error, rid;
5758 	uint16_t reg;
5759 
5760 	sc = device_get_softc(dev);
5761 
5762 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5763 	 * PCI Tx retries from interfering with C3 CPU state */
5764 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5765 
5766 	/* Enable bus-mastering and hardware bug workaround. */
5767 	pci_enable_busmaster(dev);
5768 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5769 	/* if !MSI */
5770 	if (reg & PCIM_STATUS_INTxSTATE) {
5771 		reg &= ~PCIM_STATUS_INTxSTATE;
5772 	}
5773 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5774 
5775 	rid = PCIR_BAR(0);
5776 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5777 	    RF_ACTIVE);
5778 	if (sc->sc_mem == NULL) {
5779 		device_printf(sc->sc_dev, "can't map mem space\n");
5780 		return (ENXIO);
5781 	}
5782 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5783 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5784 
5785 	/* Install interrupt handler. */
5786 	count = 1;
5787 	rid = 0;
5788 	if (pci_alloc_msi(dev, &count) == 0)
5789 		rid = 1;
5790 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5791 	    (rid != 0 ? 0 : RF_SHAREABLE));
5792 	if (sc->sc_irq == NULL) {
5793 		device_printf(dev, "can't map interrupt\n");
5794 			return (ENXIO);
5795 	}
5796 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5797 	    NULL, iwm_intr, sc, &sc->sc_ih);
5798 	if (sc->sc_ih == NULL) {
5799 		device_printf(dev, "can't establish interrupt");
5800 			return (ENXIO);
5801 	}
5802 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5803 
5804 	return (0);
5805 }
5806 
5807 static void
5808 iwm_pci_detach(device_t dev)
5809 {
5810 	struct iwm_softc *sc = device_get_softc(dev);
5811 
5812 	if (sc->sc_irq != NULL) {
5813 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5814 		bus_release_resource(dev, SYS_RES_IRQ,
5815 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5816 		pci_release_msi(dev);
5817         }
5818 	if (sc->sc_mem != NULL)
5819 		bus_release_resource(dev, SYS_RES_MEMORY,
5820 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5821 }
5822 
5823 
5824 
5825 static int
5826 iwm_attach(device_t dev)
5827 {
5828 	struct iwm_softc *sc = device_get_softc(dev);
5829 	struct ieee80211com *ic = &sc->sc_ic;
5830 	int error;
5831 	int txq_i, i;
5832 
5833 	sc->sc_dev = dev;
5834 	sc->sc_attached = 1;
5835 	IWM_LOCK_INIT(sc);
5836 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5837 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5838 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5839 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5840 
5841 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5842 	if (sc->sc_notif_wait == NULL) {
5843 		device_printf(dev, "failed to init notification wait struct\n");
5844 		goto fail;
5845 	}
5846 
5847 	sc->sf_state = IWM_SF_UNINIT;
5848 
5849 	/* Init phy db */
5850 	sc->sc_phy_db = iwm_phy_db_init(sc);
5851 	if (!sc->sc_phy_db) {
5852 		device_printf(dev, "Cannot init phy_db\n");
5853 		goto fail;
5854 	}
5855 
5856 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5857 	sc->last_ebs_successful = TRUE;
5858 
5859 	/* PCI attach */
5860 	error = iwm_pci_attach(dev);
5861 	if (error != 0)
5862 		goto fail;
5863 
5864 	sc->sc_wantresp = -1;
5865 
5866 	/* Check device type */
5867 	error = iwm_dev_check(dev);
5868 	if (error != 0)
5869 		goto fail;
5870 
5871 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5872 	/*
5873 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5874 	 * changed, and now the revision step also includes bit 0-1 (no more
5875 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5876 	 * in the old format.
5877 	 */
5878 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5879 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5880 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5881 
5882 	if (iwm_prepare_card_hw(sc) != 0) {
5883 		device_printf(dev, "could not initialize hardware\n");
5884 		goto fail;
5885 	}
5886 
5887 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5888 		int ret;
5889 		uint32_t hw_step;
5890 
5891 		/*
5892 		 * In order to recognize C step the driver should read the
5893 		 * chip version id located at the AUX bus MISC address.
5894 		 */
5895 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5896 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5897 		DELAY(2);
5898 
5899 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5900 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5901 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5902 				   25000);
5903 		if (!ret) {
5904 			device_printf(sc->sc_dev,
5905 			    "Failed to wake up the nic\n");
5906 			goto fail;
5907 		}
5908 
5909 		if (iwm_nic_lock(sc)) {
5910 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5911 			hw_step |= IWM_ENABLE_WFPM;
5912 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5913 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5914 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5915 			if (hw_step == 0x3)
5916 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5917 						(IWM_SILICON_C_STEP << 2);
5918 			iwm_nic_unlock(sc);
5919 		} else {
5920 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5921 			goto fail;
5922 		}
5923 	}
5924 
5925 	/* special-case 7265D, it has the same PCI IDs. */
5926 	if (sc->cfg == &iwm7265_cfg &&
5927 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5928 		sc->cfg = &iwm7265d_cfg;
5929 	}
5930 
5931 	/* Allocate DMA memory for firmware transfers. */
5932 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5933 		device_printf(dev, "could not allocate memory for firmware\n");
5934 		goto fail;
5935 	}
5936 
5937 	/* Allocate "Keep Warm" page. */
5938 	if ((error = iwm_alloc_kw(sc)) != 0) {
5939 		device_printf(dev, "could not allocate keep warm page\n");
5940 		goto fail;
5941 	}
5942 
5943 	/* We use ICT interrupts */
5944 	if ((error = iwm_alloc_ict(sc)) != 0) {
5945 		device_printf(dev, "could not allocate ICT table\n");
5946 		goto fail;
5947 	}
5948 
5949 	/* Allocate TX scheduler "rings". */
5950 	if ((error = iwm_alloc_sched(sc)) != 0) {
5951 		device_printf(dev, "could not allocate TX scheduler rings\n");
5952 		goto fail;
5953 	}
5954 
5955 	/* Allocate TX rings */
5956 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5957 		if ((error = iwm_alloc_tx_ring(sc,
5958 		    &sc->txq[txq_i], txq_i)) != 0) {
5959 			device_printf(dev,
5960 			    "could not allocate TX ring %d\n",
5961 			    txq_i);
5962 			goto fail;
5963 		}
5964 	}
5965 
5966 	/* Allocate RX ring. */
5967 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5968 		device_printf(dev, "could not allocate RX ring\n");
5969 		goto fail;
5970 	}
5971 
5972 	/* Clear pending interrupts. */
5973 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5974 
5975 	ic->ic_softc = sc;
5976 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5977 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5978 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5979 
5980 	/* Set device capabilities. */
5981 	ic->ic_caps =
5982 	    IEEE80211_C_STA |
5983 	    IEEE80211_C_WPA |		/* WPA/RSN */
5984 	    IEEE80211_C_WME |
5985 	    IEEE80211_C_PMGT |
5986 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5987 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5988 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5989 	    ;
5990 	/* Advertise full-offload scanning */
5991 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5992 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5993 		sc->sc_phyctxt[i].id = i;
5994 		sc->sc_phyctxt[i].color = 0;
5995 		sc->sc_phyctxt[i].ref = 0;
5996 		sc->sc_phyctxt[i].channel = NULL;
5997 	}
5998 
5999 	/* Default noise floor */
6000 	sc->sc_noise = -96;
6001 
6002 	/* Max RSSI */
6003 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6004 
6005 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6006 	sc->sc_preinit_hook.ich_arg = sc;
6007 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6008 		device_printf(dev, "config_intrhook_establish failed\n");
6009 		goto fail;
6010 	}
6011 
6012 #ifdef IWM_DEBUG
6013 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6014 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6015 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6016 #endif
6017 
6018 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6019 	    "<-%s\n", __func__);
6020 
6021 	return 0;
6022 
6023 	/* Free allocated memory if something failed during attachment. */
6024 fail:
6025 	iwm_detach_local(sc, 0);
6026 
6027 	return ENXIO;
6028 }
6029 
6030 static int
6031 iwm_is_valid_ether_addr(uint8_t *addr)
6032 {
6033 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6034 
6035 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6036 		return (FALSE);
6037 
6038 	return (TRUE);
6039 }
6040 
6041 static int
6042 iwm_wme_update(struct ieee80211com *ic)
6043 {
6044 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6045 	struct iwm_softc *sc = ic->ic_softc;
6046 	struct chanAccParams chp;
6047 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6048 	struct iwm_vap *ivp = IWM_VAP(vap);
6049 	struct iwm_node *in;
6050 	struct wmeParams tmp[WME_NUM_AC];
6051 	int aci, error;
6052 
6053 	if (vap == NULL)
6054 		return (0);
6055 
6056 	ieee80211_wme_ic_getparams(ic, &chp);
6057 
6058 	IEEE80211_LOCK(ic);
6059 	for (aci = 0; aci < WME_NUM_AC; aci++)
6060 		tmp[aci] = chp.cap_wmeParams[aci];
6061 	IEEE80211_UNLOCK(ic);
6062 
6063 	IWM_LOCK(sc);
6064 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6065 		const struct wmeParams *ac = &tmp[aci];
6066 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6067 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6068 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6069 		ivp->queue_params[aci].edca_txop =
6070 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6071 	}
6072 	ivp->have_wme = TRUE;
6073 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6074 		in = IWM_NODE(vap->iv_bss);
6075 		if (in->in_assoc) {
6076 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6077 				device_printf(sc->sc_dev,
6078 				    "%s: failed to update MAC\n", __func__);
6079 			}
6080 		}
6081 	}
6082 	IWM_UNLOCK(sc);
6083 
6084 	return (0);
6085 #undef IWM_EXP2
6086 }
6087 
6088 static void
6089 iwm_preinit(void *arg)
6090 {
6091 	struct iwm_softc *sc = arg;
6092 	device_t dev = sc->sc_dev;
6093 	struct ieee80211com *ic = &sc->sc_ic;
6094 	int error;
6095 
6096 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6097 	    "->%s\n", __func__);
6098 
6099 	IWM_LOCK(sc);
6100 	if ((error = iwm_start_hw(sc)) != 0) {
6101 		device_printf(dev, "could not initialize hardware\n");
6102 		IWM_UNLOCK(sc);
6103 		goto fail;
6104 	}
6105 
6106 	error = iwm_run_init_mvm_ucode(sc, 1);
6107 	iwm_stop_device(sc);
6108 	if (error) {
6109 		IWM_UNLOCK(sc);
6110 		goto fail;
6111 	}
6112 	device_printf(dev,
6113 	    "hw rev 0x%x, fw ver %s, address %s\n",
6114 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6115 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6116 
6117 	/* not all hardware can do 5GHz band */
6118 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6119 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6120 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6121 	IWM_UNLOCK(sc);
6122 
6123 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6124 	    ic->ic_channels);
6125 
6126 	/*
6127 	 * At this point we've committed - if we fail to do setup,
6128 	 * we now also have to tear down the net80211 state.
6129 	 */
6130 	ieee80211_ifattach(ic);
6131 	ic->ic_vap_create = iwm_vap_create;
6132 	ic->ic_vap_delete = iwm_vap_delete;
6133 	ic->ic_raw_xmit = iwm_raw_xmit;
6134 	ic->ic_node_alloc = iwm_node_alloc;
6135 	ic->ic_scan_start = iwm_scan_start;
6136 	ic->ic_scan_end = iwm_scan_end;
6137 	ic->ic_update_mcast = iwm_update_mcast;
6138 	ic->ic_getradiocaps = iwm_init_channel_map;
6139 	ic->ic_set_channel = iwm_set_channel;
6140 	ic->ic_scan_curchan = iwm_scan_curchan;
6141 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6142 	ic->ic_wme.wme_update = iwm_wme_update;
6143 	ic->ic_parent = iwm_parent;
6144 	ic->ic_transmit = iwm_transmit;
6145 	iwm_radiotap_attach(sc);
6146 	if (bootverbose)
6147 		ieee80211_announce(ic);
6148 
6149 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6150 	    "<-%s\n", __func__);
6151 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6152 
6153 	return;
6154 fail:
6155 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6156 	iwm_detach_local(sc, 0);
6157 }
6158 
6159 /*
6160  * Attach the interface to 802.11 radiotap.
6161  */
6162 static void
6163 iwm_radiotap_attach(struct iwm_softc *sc)
6164 {
6165         struct ieee80211com *ic = &sc->sc_ic;
6166 
6167 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6168 	    "->%s begin\n", __func__);
6169         ieee80211_radiotap_attach(ic,
6170             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6171                 IWM_TX_RADIOTAP_PRESENT,
6172             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6173                 IWM_RX_RADIOTAP_PRESENT);
6174 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6175 	    "->%s end\n", __func__);
6176 }
6177 
6178 static struct ieee80211vap *
6179 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6180     enum ieee80211_opmode opmode, int flags,
6181     const uint8_t bssid[IEEE80211_ADDR_LEN],
6182     const uint8_t mac[IEEE80211_ADDR_LEN])
6183 {
6184 	struct iwm_vap *ivp;
6185 	struct ieee80211vap *vap;
6186 
6187 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6188 		return NULL;
6189 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6190 	vap = &ivp->iv_vap;
6191 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6192 	vap->iv_bmissthreshold = 10;            /* override default */
6193 	/* Override with driver methods. */
6194 	ivp->iv_newstate = vap->iv_newstate;
6195 	vap->iv_newstate = iwm_newstate;
6196 
6197 	ivp->id = IWM_DEFAULT_MACID;
6198 	ivp->color = IWM_DEFAULT_COLOR;
6199 
6200 	ivp->have_wme = FALSE;
6201 	ivp->ps_disabled = FALSE;
6202 
6203 	ieee80211_ratectl_init(vap);
6204 	/* Complete setup. */
6205 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6206 	    mac);
6207 	ic->ic_opmode = opmode;
6208 
6209 	return vap;
6210 }
6211 
6212 static void
6213 iwm_vap_delete(struct ieee80211vap *vap)
6214 {
6215 	struct iwm_vap *ivp = IWM_VAP(vap);
6216 
6217 	ieee80211_ratectl_deinit(vap);
6218 	ieee80211_vap_detach(vap);
6219 	free(ivp, M_80211_VAP);
6220 }
6221 
6222 static void
6223 iwm_xmit_queue_drain(struct iwm_softc *sc)
6224 {
6225 	struct mbuf *m;
6226 	struct ieee80211_node *ni;
6227 
6228 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6229 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6230 		ieee80211_free_node(ni);
6231 		m_freem(m);
6232 	}
6233 }
6234 
6235 static void
6236 iwm_scan_start(struct ieee80211com *ic)
6237 {
6238 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6239 	struct iwm_softc *sc = ic->ic_softc;
6240 	int error;
6241 
6242 	IWM_LOCK(sc);
6243 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6244 		/* This should not be possible */
6245 		device_printf(sc->sc_dev,
6246 		    "%s: Previous scan not completed yet\n", __func__);
6247 	}
6248 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6249 		error = iwm_mvm_umac_scan(sc);
6250 	else
6251 		error = iwm_mvm_lmac_scan(sc);
6252 	if (error != 0) {
6253 		device_printf(sc->sc_dev, "could not initiate scan\n");
6254 		IWM_UNLOCK(sc);
6255 		ieee80211_cancel_scan(vap);
6256 	} else {
6257 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6258 		iwm_led_blink_start(sc);
6259 		IWM_UNLOCK(sc);
6260 	}
6261 }
6262 
6263 static void
6264 iwm_scan_end(struct ieee80211com *ic)
6265 {
6266 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6267 	struct iwm_softc *sc = ic->ic_softc;
6268 
6269 	IWM_LOCK(sc);
6270 	iwm_led_blink_stop(sc);
6271 	if (vap->iv_state == IEEE80211_S_RUN)
6272 		iwm_mvm_led_enable(sc);
6273 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6274 		/*
6275 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6276 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6277 		 * taskqueue.
6278 		 */
6279 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6280 		iwm_mvm_scan_stop_wait(sc);
6281 	}
6282 	IWM_UNLOCK(sc);
6283 
6284 	/*
6285 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6286 	 * This is to make sure that it won't call ieee80211_scan_done
6287 	 * when we have already started the next scan.
6288 	 */
6289 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6290 }
6291 
6292 static void
6293 iwm_update_mcast(struct ieee80211com *ic)
6294 {
6295 }
6296 
6297 static void
6298 iwm_set_channel(struct ieee80211com *ic)
6299 {
6300 }
6301 
6302 static void
6303 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6304 {
6305 }
6306 
6307 static void
6308 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6309 {
6310 	return;
6311 }
6312 
6313 void
6314 iwm_init_task(void *arg1)
6315 {
6316 	struct iwm_softc *sc = arg1;
6317 
6318 	IWM_LOCK(sc);
6319 	while (sc->sc_flags & IWM_FLAG_BUSY)
6320 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6321 	sc->sc_flags |= IWM_FLAG_BUSY;
6322 	iwm_stop(sc);
6323 	if (sc->sc_ic.ic_nrunning > 0)
6324 		iwm_init(sc);
6325 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6326 	wakeup(&sc->sc_flags);
6327 	IWM_UNLOCK(sc);
6328 }
6329 
6330 static int
6331 iwm_resume(device_t dev)
6332 {
6333 	struct iwm_softc *sc = device_get_softc(dev);
6334 	int do_reinit = 0;
6335 
6336 	/*
6337 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6338 	 * PCI Tx retries from interfering with C3 CPU state.
6339 	 */
6340 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6341 	iwm_init_task(device_get_softc(dev));
6342 
6343 	IWM_LOCK(sc);
6344 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6345 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6346 		do_reinit = 1;
6347 	}
6348 	IWM_UNLOCK(sc);
6349 
6350 	if (do_reinit)
6351 		ieee80211_resume_all(&sc->sc_ic);
6352 
6353 	return 0;
6354 }
6355 
6356 static int
6357 iwm_suspend(device_t dev)
6358 {
6359 	int do_stop = 0;
6360 	struct iwm_softc *sc = device_get_softc(dev);
6361 
6362 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6363 
6364 	ieee80211_suspend_all(&sc->sc_ic);
6365 
6366 	if (do_stop) {
6367 		IWM_LOCK(sc);
6368 		iwm_stop(sc);
6369 		sc->sc_flags |= IWM_FLAG_SCANNING;
6370 		IWM_UNLOCK(sc);
6371 	}
6372 
6373 	return (0);
6374 }
6375 
6376 static int
6377 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6378 {
6379 	struct iwm_fw_info *fw = &sc->sc_fw;
6380 	device_t dev = sc->sc_dev;
6381 	int i;
6382 
6383 	if (!sc->sc_attached)
6384 		return 0;
6385 	sc->sc_attached = 0;
6386 
6387 	if (do_net80211)
6388 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6389 
6390 	callout_drain(&sc->sc_led_blink_to);
6391 	callout_drain(&sc->sc_watchdog_to);
6392 	iwm_stop_device(sc);
6393 	if (do_net80211) {
6394 		IWM_LOCK(sc);
6395 		iwm_xmit_queue_drain(sc);
6396 		IWM_UNLOCK(sc);
6397 		ieee80211_ifdetach(&sc->sc_ic);
6398 	}
6399 
6400 	iwm_phy_db_free(sc->sc_phy_db);
6401 	sc->sc_phy_db = NULL;
6402 
6403 	iwm_free_nvm_data(sc->nvm_data);
6404 
6405 	/* Free descriptor rings */
6406 	iwm_free_rx_ring(sc, &sc->rxq);
6407 	for (i = 0; i < nitems(sc->txq); i++)
6408 		iwm_free_tx_ring(sc, &sc->txq[i]);
6409 
6410 	/* Free firmware */
6411 	if (fw->fw_fp != NULL)
6412 		iwm_fw_info_free(fw);
6413 
6414 	/* Free scheduler */
6415 	iwm_dma_contig_free(&sc->sched_dma);
6416 	iwm_dma_contig_free(&sc->ict_dma);
6417 	iwm_dma_contig_free(&sc->kw_dma);
6418 	iwm_dma_contig_free(&sc->fw_dma);
6419 
6420 	iwm_free_fw_paging(sc);
6421 
6422 	/* Finished with the hardware - detach things */
6423 	iwm_pci_detach(dev);
6424 
6425 	if (sc->sc_notif_wait != NULL) {
6426 		iwm_notification_wait_free(sc->sc_notif_wait);
6427 		sc->sc_notif_wait = NULL;
6428 	}
6429 
6430 	IWM_LOCK_DESTROY(sc);
6431 
6432 	return (0);
6433 }
6434 
6435 static int
6436 iwm_detach(device_t dev)
6437 {
6438 	struct iwm_softc *sc = device_get_softc(dev);
6439 
6440 	return (iwm_detach_local(sc, 1));
6441 }
6442 
6443 static device_method_t iwm_pci_methods[] = {
6444         /* Device interface */
6445         DEVMETHOD(device_probe,         iwm_probe),
6446         DEVMETHOD(device_attach,        iwm_attach),
6447         DEVMETHOD(device_detach,        iwm_detach),
6448         DEVMETHOD(device_suspend,       iwm_suspend),
6449         DEVMETHOD(device_resume,        iwm_resume),
6450 
6451         DEVMETHOD_END
6452 };
6453 
6454 static driver_t iwm_pci_driver = {
6455         "iwm",
6456         iwm_pci_methods,
6457         sizeof (struct iwm_softc)
6458 };
6459 
6460 static devclass_t iwm_devclass;
6461 
6462 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6463 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6464 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6465 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6466