xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 99429157e8615dc3b7f11afbe3ed92de7476a5db)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sta.h>
168 
169 #include <dev/iwm/if_iwm_pcie_trans.h>
170 #include <dev/iwm/if_iwm_led.h>
171 #include <dev/iwm/if_iwm_fw.h>
172 
173 /* From DragonflyBSD */
174 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
175 
176 const uint8_t iwm_nvm_channels[] = {
177 	/* 2.4 GHz */
178 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 	/* 5 GHz */
180 	36, 40, 44, 48, 52, 56, 60, 64,
181 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 	149, 153, 157, 161, 165
183 };
184 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
185     "IWM_NUM_CHANNELS is too small");
186 
187 const uint8_t iwm_nvm_channels_8000[] = {
188 	/* 2.4 GHz */
189 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
190 	/* 5 GHz */
191 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
192 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
193 	149, 153, 157, 161, 165, 169, 173, 177, 181
194 };
195 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
196     "IWM_NUM_CHANNELS_8000 is too small");
197 
198 #define IWM_NUM_2GHZ_CHANNELS	14
199 #define IWM_N_HW_ADDR_MASK	0xF
200 
201 /*
202  * XXX For now, there's simply a fixed set of rate table entries
203  * that are populated.
204  */
205 const struct iwm_rate {
206 	uint8_t rate;
207 	uint8_t plcp;
208 } iwm_rates[] = {
209 	{   2,	IWM_RATE_1M_PLCP  },
210 	{   4,	IWM_RATE_2M_PLCP  },
211 	{  11,	IWM_RATE_5M_PLCP  },
212 	{  22,	IWM_RATE_11M_PLCP },
213 	{  12,	IWM_RATE_6M_PLCP  },
214 	{  18,	IWM_RATE_9M_PLCP  },
215 	{  24,	IWM_RATE_12M_PLCP },
216 	{  36,	IWM_RATE_18M_PLCP },
217 	{  48,	IWM_RATE_24M_PLCP },
218 	{  72,	IWM_RATE_36M_PLCP },
219 	{  96,	IWM_RATE_48M_PLCP },
220 	{ 108,	IWM_RATE_54M_PLCP },
221 };
222 #define IWM_RIDX_CCK	0
223 #define IWM_RIDX_OFDM	4
224 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
225 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
226 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
227 
228 struct iwm_nvm_section {
229 	uint16_t length;
230 	uint8_t *data;
231 };
232 
233 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
234 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
235 
236 struct iwm_mvm_alive_data {
237 	int valid;
238 	uint32_t scd_base_addr;
239 };
240 
241 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
242 static int	iwm_firmware_store_section(struct iwm_softc *,
243                                            enum iwm_ucode_type,
244                                            const uint8_t *, size_t);
245 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
246 static void	iwm_fw_info_free(struct iwm_fw_info *);
247 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
248 static int	iwm_alloc_fwmem(struct iwm_softc *);
249 static int	iwm_alloc_sched(struct iwm_softc *);
250 static int	iwm_alloc_kw(struct iwm_softc *);
251 static int	iwm_alloc_ict(struct iwm_softc *);
252 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
256                                   int);
257 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_enable_interrupts(struct iwm_softc *);
260 static void	iwm_restore_interrupts(struct iwm_softc *);
261 static void	iwm_disable_interrupts(struct iwm_softc *);
262 static void	iwm_ict_reset(struct iwm_softc *);
263 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
264 static void	iwm_stop_device(struct iwm_softc *);
265 static void	iwm_mvm_nic_config(struct iwm_softc *);
266 static int	iwm_nic_rx_init(struct iwm_softc *);
267 static int	iwm_nic_tx_init(struct iwm_softc *);
268 static int	iwm_nic_init(struct iwm_softc *);
269 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
270 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
271                                    uint16_t, uint8_t *, uint16_t *);
272 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
273 				     uint16_t *, uint32_t);
274 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
275 static void	iwm_add_channel_band(struct iwm_softc *,
276 		    struct ieee80211_channel[], int, int *, int, size_t,
277 		    const uint8_t[]);
278 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
279 		    struct ieee80211_channel[]);
280 static struct iwm_nvm_data *
281 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282 			   const uint16_t *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *);
285 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
286 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
287 					       struct iwm_nvm_data *,
288 					       const uint16_t *,
289 					       const uint16_t *);
290 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
291 			    const uint16_t *);
292 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
293 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
294 				  const uint16_t *);
295 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
296 				   const uint16_t *);
297 static void	iwm_set_radio_cfg(const struct iwm_softc *,
298 				  struct iwm_nvm_data *, uint32_t);
299 static struct iwm_nvm_data *
300 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
301 static int	iwm_nvm_init(struct iwm_softc *);
302 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
303 				      const struct iwm_fw_desc *);
304 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
305 					     bus_addr_t, uint32_t);
306 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
307 						const struct iwm_fw_sects *,
308 						int, int *);
309 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
310 					   const struct iwm_fw_sects *,
311 					   int, int *);
312 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
313 					       const struct iwm_fw_sects *);
314 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
315 					  const struct iwm_fw_sects *);
316 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
317 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
318 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
319 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
320                                               enum iwm_ucode_type);
321 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
322 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
323 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
324 					    struct iwm_rx_phy_info *);
325 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *sc,
328 		    const struct iwm_mvm_statistics_rx_non_phy *);
329 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
330 				    uint32_t, boolean_t);
331 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
332                                          struct iwm_rx_packet *,
333 				         struct iwm_node *);
334 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
335 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
336 #if 0
337 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
338                                  uint16_t);
339 #endif
340 static const struct iwm_rate *
341 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
342 			struct mbuf *, struct iwm_tx_cmd *);
343 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
344                        struct ieee80211_node *, int);
345 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
346 			     const struct ieee80211_bpf_params *);
347 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
348 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
349 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
350 static struct ieee80211_node *
351 		iwm_node_alloc(struct ieee80211vap *,
352 		               const uint8_t[IEEE80211_ADDR_LEN]);
353 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
354 static int	iwm_media_change(struct ifnet *);
355 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356 static void	iwm_endscan_cb(void *, int);
357 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
358 					struct iwm_sf_cfg_cmd *,
359 					struct ieee80211_node *);
360 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
361 static int	iwm_send_bt_init_conf(struct iwm_softc *);
362 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int	iwm_init_hw(struct iwm_softc *);
365 static void	iwm_init(struct iwm_softc *);
366 static void	iwm_start(struct iwm_softc *);
367 static void	iwm_stop(struct iwm_softc *);
368 static void	iwm_watchdog(void *);
369 static void	iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 		iwm_desc_lookup(uint32_t);
373 static void	iwm_nic_error(struct iwm_softc *);
374 static void	iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void	iwm_notif_intr(struct iwm_softc *);
378 static void	iwm_intr(void *);
379 static int	iwm_attach(device_t);
380 static int	iwm_is_valid_ether_addr(uint8_t *);
381 static void	iwm_preinit(void *);
382 static int	iwm_detach_local(struct iwm_softc *sc, int);
383 static void	iwm_init_task(void *);
384 static void	iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 		iwm_vap_create(struct ieee80211com *,
387 		               const char [IFNAMSIZ], int,
388 		               enum ieee80211_opmode, int,
389 		               const uint8_t [IEEE80211_ADDR_LEN],
390 		               const uint8_t [IEEE80211_ADDR_LEN]);
391 static void	iwm_vap_delete(struct ieee80211vap *);
392 static void	iwm_scan_start(struct ieee80211com *);
393 static void	iwm_scan_end(struct ieee80211com *);
394 static void	iwm_update_mcast(struct ieee80211com *);
395 static void	iwm_set_channel(struct ieee80211com *);
396 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
397 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
398 static int	iwm_detach(device_t);
399 
400 /*
401  * Firmware parser.
402  */
403 
404 static int
405 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
406 {
407 	const struct iwm_fw_cscheme_list *l = (const void *)data;
408 
409 	if (dlen < sizeof(*l) ||
410 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
411 		return EINVAL;
412 
413 	/* we don't actually store anything for now, always use s/w crypto */
414 
415 	return 0;
416 }
417 
418 static int
419 iwm_firmware_store_section(struct iwm_softc *sc,
420     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
421 {
422 	struct iwm_fw_sects *fws;
423 	struct iwm_fw_desc *fwone;
424 
425 	if (type >= IWM_UCODE_TYPE_MAX)
426 		return EINVAL;
427 	if (dlen < sizeof(uint32_t))
428 		return EINVAL;
429 
430 	fws = &sc->sc_fw.fw_sects[type];
431 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
432 		return EINVAL;
433 
434 	fwone = &fws->fw_sect[fws->fw_count];
435 
436 	/* first 32bit are device load offset */
437 	memcpy(&fwone->offset, data, sizeof(uint32_t));
438 
439 	/* rest is data */
440 	fwone->data = data + sizeof(uint32_t);
441 	fwone->len = dlen - sizeof(uint32_t);
442 
443 	fws->fw_count++;
444 
445 	return 0;
446 }
447 
448 #define IWM_DEFAULT_SCAN_CHANNELS 40
449 
450 /* iwlwifi: iwl-drv.c */
451 struct iwm_tlv_calib_data {
452 	uint32_t ucode_type;
453 	struct iwm_tlv_calib_ctrl calib;
454 } __packed;
455 
456 static int
457 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
458 {
459 	const struct iwm_tlv_calib_data *def_calib = data;
460 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
461 
462 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
463 		device_printf(sc->sc_dev,
464 		    "Wrong ucode_type %u for default "
465 		    "calibration.\n", ucode_type);
466 		return EINVAL;
467 	}
468 
469 	sc->sc_default_calib[ucode_type].flow_trigger =
470 	    def_calib->calib.flow_trigger;
471 	sc->sc_default_calib[ucode_type].event_trigger =
472 	    def_calib->calib.event_trigger;
473 
474 	return 0;
475 }
476 
477 static int
478 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
479 			struct iwm_ucode_capabilities *capa)
480 {
481 	const struct iwm_ucode_api *ucode_api = (const void *)data;
482 	uint32_t api_index = le32toh(ucode_api->api_index);
483 	uint32_t api_flags = le32toh(ucode_api->api_flags);
484 	int i;
485 
486 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
487 		device_printf(sc->sc_dev,
488 		    "api flags index %d larger than supported by driver\n",
489 		    api_index);
490 		/* don't return an error so we can load FW that has more bits */
491 		return 0;
492 	}
493 
494 	for (i = 0; i < 32; i++) {
495 		if (api_flags & (1U << i))
496 			setbit(capa->enabled_api, i + 32 * api_index);
497 	}
498 
499 	return 0;
500 }
501 
502 static int
503 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
504 			   struct iwm_ucode_capabilities *capa)
505 {
506 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
507 	uint32_t api_index = le32toh(ucode_capa->api_index);
508 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
509 	int i;
510 
511 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
512 		device_printf(sc->sc_dev,
513 		    "capa flags index %d larger than supported by driver\n",
514 		    api_index);
515 		/* don't return an error so we can load FW that has more bits */
516 		return 0;
517 	}
518 
519 	for (i = 0; i < 32; i++) {
520 		if (api_flags & (1U << i))
521 			setbit(capa->enabled_capa, i + 32 * api_index);
522 	}
523 
524 	return 0;
525 }
526 
527 static void
528 iwm_fw_info_free(struct iwm_fw_info *fw)
529 {
530 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
531 	fw->fw_fp = NULL;
532 	/* don't touch fw->fw_status */
533 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
534 }
535 
536 static int
537 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
538 {
539 	struct iwm_fw_info *fw = &sc->sc_fw;
540 	const struct iwm_tlv_ucode_header *uhdr;
541 	const struct iwm_ucode_tlv *tlv;
542 	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
543 	enum iwm_ucode_tlv_type tlv_type;
544 	const struct firmware *fwp;
545 	const uint8_t *data;
546 	uint32_t tlv_len;
547 	uint32_t usniffer_img;
548 	const uint8_t *tlv_data;
549 	uint32_t paging_mem_size;
550 	int num_of_cpus;
551 	int error = 0;
552 	size_t len;
553 
554 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
555 	    ucode_type != IWM_UCODE_INIT)
556 		return 0;
557 
558 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
559 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
560 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
561 
562 	if (fw->fw_fp != NULL)
563 		iwm_fw_info_free(fw);
564 
565 	/*
566 	 * Load firmware into driver memory.
567 	 * fw_fp will be set.
568 	 */
569 	IWM_UNLOCK(sc);
570 	fwp = firmware_get(sc->cfg->fw_name);
571 	IWM_LOCK(sc);
572 	if (fwp == NULL) {
573 		device_printf(sc->sc_dev,
574 		    "could not read firmware %s (error %d)\n",
575 		    sc->cfg->fw_name, error);
576 		goto out;
577 	}
578 	fw->fw_fp = fwp;
579 
580 	/* (Re-)Initialize default values. */
581 	capa->flags = 0;
582 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
583 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
584 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
585 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
586 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
587 
588 	/*
589 	 * Parse firmware contents
590 	 */
591 
592 	uhdr = (const void *)fw->fw_fp->data;
593 	if (*(const uint32_t *)fw->fw_fp->data != 0
594 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
595 		device_printf(sc->sc_dev, "invalid firmware %s\n",
596 		    sc->cfg->fw_name);
597 		error = EINVAL;
598 		goto out;
599 	}
600 
601 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
602 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
603 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
604 	    IWM_UCODE_API(le32toh(uhdr->ver)));
605 	data = uhdr->data;
606 	len = fw->fw_fp->datasize - sizeof(*uhdr);
607 
608 	while (len >= sizeof(*tlv)) {
609 		len -= sizeof(*tlv);
610 		tlv = (const void *)data;
611 
612 		tlv_len = le32toh(tlv->length);
613 		tlv_type = le32toh(tlv->type);
614 		tlv_data = tlv->data;
615 
616 		if (len < tlv_len) {
617 			device_printf(sc->sc_dev,
618 			    "firmware too short: %zu bytes\n",
619 			    len);
620 			error = EINVAL;
621 			goto parse_out;
622 		}
623 		len -= roundup2(tlv_len, 4);
624 		data += sizeof(tlv) + roundup2(tlv_len, 4);
625 
626 		switch ((int)tlv_type) {
627 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
628 			if (tlv_len != sizeof(uint32_t)) {
629 				device_printf(sc->sc_dev,
630 				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
631 				    __func__,
632 				    (int) tlv_len);
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			capa->max_probe_length =
637 			    le32_to_cpup((const uint32_t *)tlv_data);
638 			/* limit it to something sensible */
639 			if (capa->max_probe_length >
640 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
641 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
642 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
643 				    "ridiculous\n", __func__);
644 				error = EINVAL;
645 				goto parse_out;
646 			}
647 			break;
648 		case IWM_UCODE_TLV_PAN:
649 			if (tlv_len) {
650 				device_printf(sc->sc_dev,
651 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
652 				    __func__,
653 				    (int) tlv_len);
654 				error = EINVAL;
655 				goto parse_out;
656 			}
657 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
658 			break;
659 		case IWM_UCODE_TLV_FLAGS:
660 			if (tlv_len < sizeof(uint32_t)) {
661 				device_printf(sc->sc_dev,
662 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
663 				    __func__,
664 				    (int) tlv_len);
665 				error = EINVAL;
666 				goto parse_out;
667 			}
668 			if (tlv_len % sizeof(uint32_t)) {
669 				device_printf(sc->sc_dev,
670 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
671 				    __func__,
672 				    (int) tlv_len);
673 				error = EINVAL;
674 				goto parse_out;
675 			}
676 			/*
677 			 * Apparently there can be many flags, but Linux driver
678 			 * parses only the first one, and so do we.
679 			 *
680 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
681 			 * Intentional or a bug?  Observations from
682 			 * current firmware file:
683 			 *  1) TLV_PAN is parsed first
684 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
685 			 * ==> this resets TLV_PAN to itself... hnnnk
686 			 */
687 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
688 			break;
689 		case IWM_UCODE_TLV_CSCHEME:
690 			if ((error = iwm_store_cscheme(sc,
691 			    tlv_data, tlv_len)) != 0) {
692 				device_printf(sc->sc_dev,
693 				    "%s: iwm_store_cscheme(): returned %d\n",
694 				    __func__,
695 				    error);
696 				goto parse_out;
697 			}
698 			break;
699 		case IWM_UCODE_TLV_NUM_OF_CPU:
700 			if (tlv_len != sizeof(uint32_t)) {
701 				device_printf(sc->sc_dev,
702 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
703 				    __func__,
704 				    (int) tlv_len);
705 				error = EINVAL;
706 				goto parse_out;
707 			}
708 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
709 			if (num_of_cpus == 2) {
710 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
711 					TRUE;
712 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
713 					TRUE;
714 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
715 					TRUE;
716 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
717 				device_printf(sc->sc_dev,
718 				    "%s: Driver supports only 1 or 2 CPUs\n",
719 				    __func__);
720 				error = EINVAL;
721 				goto parse_out;
722 			}
723 			break;
724 		case IWM_UCODE_TLV_SEC_RT:
725 			if ((error = iwm_firmware_store_section(sc,
726 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
727 				device_printf(sc->sc_dev,
728 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
729 				    __func__,
730 				    error);
731 				goto parse_out;
732 			}
733 			break;
734 		case IWM_UCODE_TLV_SEC_INIT:
735 			if ((error = iwm_firmware_store_section(sc,
736 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
739 				    __func__,
740 				    error);
741 				goto parse_out;
742 			}
743 			break;
744 		case IWM_UCODE_TLV_SEC_WOWLAN:
745 			if ((error = iwm_firmware_store_section(sc,
746 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
747 				device_printf(sc->sc_dev,
748 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
749 				    __func__,
750 				    error);
751 				goto parse_out;
752 			}
753 			break;
754 		case IWM_UCODE_TLV_DEF_CALIB:
755 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
756 				device_printf(sc->sc_dev,
757 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
758 				    __func__,
759 				    (int) tlv_len,
760 				    (int) sizeof(struct iwm_tlv_calib_data));
761 				error = EINVAL;
762 				goto parse_out;
763 			}
764 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
765 				device_printf(sc->sc_dev,
766 				    "%s: iwm_set_default_calib() failed: %d\n",
767 				    __func__,
768 				    error);
769 				goto parse_out;
770 			}
771 			break;
772 		case IWM_UCODE_TLV_PHY_SKU:
773 			if (tlv_len != sizeof(uint32_t)) {
774 				error = EINVAL;
775 				device_printf(sc->sc_dev,
776 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
777 				    __func__,
778 				    (int) tlv_len);
779 				goto parse_out;
780 			}
781 			sc->sc_fw.phy_config =
782 			    le32_to_cpup((const uint32_t *)tlv_data);
783 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
784 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
785 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
786 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
787 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
788 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
789 			break;
790 
791 		case IWM_UCODE_TLV_API_CHANGES_SET: {
792 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
793 				error = EINVAL;
794 				goto parse_out;
795 			}
796 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
797 				error = EINVAL;
798 				goto parse_out;
799 			}
800 			break;
801 		}
802 
803 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
804 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
805 				error = EINVAL;
806 				goto parse_out;
807 			}
808 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
809 				error = EINVAL;
810 				goto parse_out;
811 			}
812 			break;
813 		}
814 
815 		case 48: /* undocumented TLV */
816 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
817 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
818 			/* ignore, not used by current driver */
819 			break;
820 
821 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
822 			if ((error = iwm_firmware_store_section(sc,
823 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
824 			    tlv_len)) != 0)
825 				goto parse_out;
826 			break;
827 
828 		case IWM_UCODE_TLV_PAGING:
829 			if (tlv_len != sizeof(uint32_t)) {
830 				error = EINVAL;
831 				goto parse_out;
832 			}
833 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
834 
835 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
836 			    "%s: Paging: paging enabled (size = %u bytes)\n",
837 			    __func__, paging_mem_size);
838 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
839 				device_printf(sc->sc_dev,
840 					"%s: Paging: driver supports up to %u bytes for paging image\n",
841 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
842 				error = EINVAL;
843 				goto out;
844 			}
845 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
846 				device_printf(sc->sc_dev,
847 				    "%s: Paging: image isn't multiple %u\n",
848 				    __func__, IWM_FW_PAGING_SIZE);
849 				error = EINVAL;
850 				goto out;
851 			}
852 
853 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
854 			    paging_mem_size;
855 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
856 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
857 			    paging_mem_size;
858 			break;
859 
860 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
861 			if (tlv_len != sizeof(uint32_t)) {
862 				error = EINVAL;
863 				goto parse_out;
864 			}
865 			capa->n_scan_channels =
866 			    le32_to_cpup((const uint32_t *)tlv_data);
867 			break;
868 
869 		case IWM_UCODE_TLV_FW_VERSION:
870 			if (tlv_len != sizeof(uint32_t) * 3) {
871 				error = EINVAL;
872 				goto parse_out;
873 			}
874 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
875 			    "%d.%d.%d",
876 			    le32toh(((const uint32_t *)tlv_data)[0]),
877 			    le32toh(((const uint32_t *)tlv_data)[1]),
878 			    le32toh(((const uint32_t *)tlv_data)[2]));
879 			break;
880 
881 		case IWM_UCODE_TLV_FW_MEM_SEG:
882 			break;
883 
884 		default:
885 			device_printf(sc->sc_dev,
886 			    "%s: unknown firmware section %d, abort\n",
887 			    __func__, tlv_type);
888 			error = EINVAL;
889 			goto parse_out;
890 		}
891 	}
892 
893 	KASSERT(error == 0, ("unhandled error"));
894 
895  parse_out:
896 	if (error) {
897 		device_printf(sc->sc_dev, "firmware parse error %d, "
898 		    "section type %d\n", error, tlv_type);
899 	}
900 
901  out:
902 	if (error) {
903 		fw->fw_status = IWM_FW_STATUS_NONE;
904 		if (fw->fw_fp != NULL)
905 			iwm_fw_info_free(fw);
906 	} else
907 		fw->fw_status = IWM_FW_STATUS_DONE;
908 	wakeup(&sc->sc_fw);
909 
910 	return error;
911 }
912 
913 /*
914  * DMA resource routines
915  */
916 
917 /* fwmem is used to load firmware onto the card */
918 static int
919 iwm_alloc_fwmem(struct iwm_softc *sc)
920 {
921 	/* Must be aligned on a 16-byte boundary. */
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
923 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
924 }
925 
926 /* tx scheduler rings.  not used? */
927 static int
928 iwm_alloc_sched(struct iwm_softc *sc)
929 {
930 	/* TX scheduler rings must be aligned on a 1KB boundary. */
931 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
932 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
933 }
934 
935 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
936 static int
937 iwm_alloc_kw(struct iwm_softc *sc)
938 {
939 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
940 }
941 
942 /* interrupt cause table */
943 static int
944 iwm_alloc_ict(struct iwm_softc *sc)
945 {
946 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
947 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
948 }
949 
950 static int
951 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
952 {
953 	bus_size_t size;
954 	int i, error;
955 
956 	ring->cur = 0;
957 
958 	/* Allocate RX descriptors (256-byte aligned). */
959 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
960 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
961 	if (error != 0) {
962 		device_printf(sc->sc_dev,
963 		    "could not allocate RX ring DMA memory\n");
964 		goto fail;
965 	}
966 	ring->desc = ring->desc_dma.vaddr;
967 
968 	/* Allocate RX status area (16-byte aligned). */
969 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
970 	    sizeof(*ring->stat), 16);
971 	if (error != 0) {
972 		device_printf(sc->sc_dev,
973 		    "could not allocate RX status DMA memory\n");
974 		goto fail;
975 	}
976 	ring->stat = ring->stat_dma.vaddr;
977 
978         /* Create RX buffer DMA tag. */
979         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
980             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
981             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
982         if (error != 0) {
983                 device_printf(sc->sc_dev,
984                     "%s: could not create RX buf DMA tag, error %d\n",
985                     __func__, error);
986                 goto fail;
987         }
988 
989 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
990 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
991 	if (error != 0) {
992 		device_printf(sc->sc_dev,
993 		    "%s: could not create RX buf DMA map, error %d\n",
994 		    __func__, error);
995 		goto fail;
996 	}
997 	/*
998 	 * Allocate and map RX buffers.
999 	 */
1000 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1001 		struct iwm_rx_data *data = &ring->data[i];
1002 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1003 		if (error != 0) {
1004 			device_printf(sc->sc_dev,
1005 			    "%s: could not create RX buf DMA map, error %d\n",
1006 			    __func__, error);
1007 			goto fail;
1008 		}
1009 		data->m = NULL;
1010 
1011 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1012 			goto fail;
1013 		}
1014 	}
1015 	return 0;
1016 
1017 fail:	iwm_free_rx_ring(sc, ring);
1018 	return error;
1019 }
1020 
1021 static void
1022 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1023 {
1024 	/* Reset the ring state */
1025 	ring->cur = 0;
1026 
1027 	/*
1028 	 * The hw rx ring index in shared memory must also be cleared,
1029 	 * otherwise the discrepancy can cause reprocessing chaos.
1030 	 */
1031 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1032 }
1033 
1034 static void
1035 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 {
1037 	int i;
1038 
1039 	iwm_dma_contig_free(&ring->desc_dma);
1040 	iwm_dma_contig_free(&ring->stat_dma);
1041 
1042 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1043 		struct iwm_rx_data *data = &ring->data[i];
1044 
1045 		if (data->m != NULL) {
1046 			bus_dmamap_sync(ring->data_dmat, data->map,
1047 			    BUS_DMASYNC_POSTREAD);
1048 			bus_dmamap_unload(ring->data_dmat, data->map);
1049 			m_freem(data->m);
1050 			data->m = NULL;
1051 		}
1052 		if (data->map != NULL) {
1053 			bus_dmamap_destroy(ring->data_dmat, data->map);
1054 			data->map = NULL;
1055 		}
1056 	}
1057 	if (ring->spare_map != NULL) {
1058 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1059 		ring->spare_map = NULL;
1060 	}
1061 	if (ring->data_dmat != NULL) {
1062 		bus_dma_tag_destroy(ring->data_dmat);
1063 		ring->data_dmat = NULL;
1064 	}
1065 }
1066 
1067 static int
1068 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1069 {
1070 	bus_addr_t paddr;
1071 	bus_size_t size;
1072 	size_t maxsize;
1073 	int nsegments;
1074 	int i, error;
1075 
1076 	ring->qid = qid;
1077 	ring->queued = 0;
1078 	ring->cur = 0;
1079 
1080 	/* Allocate TX descriptors (256-byte aligned). */
1081 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1082 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1083 	if (error != 0) {
1084 		device_printf(sc->sc_dev,
1085 		    "could not allocate TX ring DMA memory\n");
1086 		goto fail;
1087 	}
1088 	ring->desc = ring->desc_dma.vaddr;
1089 
1090 	/*
1091 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1092 	 * to allocate commands space for other rings.
1093 	 */
1094 	if (qid > IWM_MVM_CMD_QUEUE)
1095 		return 0;
1096 
1097 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1098 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1099 	if (error != 0) {
1100 		device_printf(sc->sc_dev,
1101 		    "could not allocate TX cmd DMA memory\n");
1102 		goto fail;
1103 	}
1104 	ring->cmd = ring->cmd_dma.vaddr;
1105 
1106 	/* FW commands may require more mapped space than packets. */
1107 	if (qid == IWM_MVM_CMD_QUEUE) {
1108 		maxsize = IWM_RBUF_SIZE;
1109 		nsegments = 1;
1110 	} else {
1111 		maxsize = MCLBYTES;
1112 		nsegments = IWM_MAX_SCATTER - 2;
1113 	}
1114 
1115 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1116 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1117             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1118 	if (error != 0) {
1119 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1120 		goto fail;
1121 	}
1122 
1123 	paddr = ring->cmd_dma.paddr;
1124 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1125 		struct iwm_tx_data *data = &ring->data[i];
1126 
1127 		data->cmd_paddr = paddr;
1128 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1129 		    + offsetof(struct iwm_tx_cmd, scratch);
1130 		paddr += sizeof(struct iwm_device_cmd);
1131 
1132 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1133 		if (error != 0) {
1134 			device_printf(sc->sc_dev,
1135 			    "could not create TX buf DMA map\n");
1136 			goto fail;
1137 		}
1138 	}
1139 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1140 	    ("invalid physical address"));
1141 	return 0;
1142 
1143 fail:	iwm_free_tx_ring(sc, ring);
1144 	return error;
1145 }
1146 
1147 static void
1148 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1149 {
1150 	int i;
1151 
1152 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1153 		struct iwm_tx_data *data = &ring->data[i];
1154 
1155 		if (data->m != NULL) {
1156 			bus_dmamap_sync(ring->data_dmat, data->map,
1157 			    BUS_DMASYNC_POSTWRITE);
1158 			bus_dmamap_unload(ring->data_dmat, data->map);
1159 			m_freem(data->m);
1160 			data->m = NULL;
1161 		}
1162 	}
1163 	/* Clear TX descriptors. */
1164 	memset(ring->desc, 0, ring->desc_dma.size);
1165 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1166 	    BUS_DMASYNC_PREWRITE);
1167 	sc->qfullmsk &= ~(1 << ring->qid);
1168 	ring->queued = 0;
1169 	ring->cur = 0;
1170 
1171 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1172 		iwm_pcie_clear_cmd_in_flight(sc);
1173 }
1174 
1175 static void
1176 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1177 {
1178 	int i;
1179 
1180 	iwm_dma_contig_free(&ring->desc_dma);
1181 	iwm_dma_contig_free(&ring->cmd_dma);
1182 
1183 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1184 		struct iwm_tx_data *data = &ring->data[i];
1185 
1186 		if (data->m != NULL) {
1187 			bus_dmamap_sync(ring->data_dmat, data->map,
1188 			    BUS_DMASYNC_POSTWRITE);
1189 			bus_dmamap_unload(ring->data_dmat, data->map);
1190 			m_freem(data->m);
1191 			data->m = NULL;
1192 		}
1193 		if (data->map != NULL) {
1194 			bus_dmamap_destroy(ring->data_dmat, data->map);
1195 			data->map = NULL;
1196 		}
1197 	}
1198 	if (ring->data_dmat != NULL) {
1199 		bus_dma_tag_destroy(ring->data_dmat);
1200 		ring->data_dmat = NULL;
1201 	}
1202 }
1203 
1204 /*
1205  * High-level hardware frobbing routines
1206  */
1207 
1208 static void
1209 iwm_enable_interrupts(struct iwm_softc *sc)
1210 {
1211 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1212 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1213 }
1214 
1215 static void
1216 iwm_restore_interrupts(struct iwm_softc *sc)
1217 {
1218 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1219 }
1220 
1221 static void
1222 iwm_disable_interrupts(struct iwm_softc *sc)
1223 {
1224 	/* disable interrupts */
1225 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1226 
1227 	/* acknowledge all interrupts */
1228 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1229 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1230 }
1231 
1232 static void
1233 iwm_ict_reset(struct iwm_softc *sc)
1234 {
1235 	iwm_disable_interrupts(sc);
1236 
1237 	/* Reset ICT table. */
1238 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1239 	sc->ict_cur = 0;
1240 
1241 	/* Set physical address of ICT table (4KB aligned). */
1242 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1243 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1244 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1245 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1246 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1247 
1248 	/* Switch to ICT interrupt mode in driver. */
1249 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1250 
1251 	/* Re-enable interrupts. */
1252 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1253 	iwm_enable_interrupts(sc);
1254 }
1255 
1256 /* iwlwifi pcie/trans.c */
1257 
1258 /*
1259  * Since this .. hard-resets things, it's time to actually
1260  * mark the first vap (if any) as having no mac context.
1261  * It's annoying, but since the driver is potentially being
1262  * stop/start'ed whilst active (thanks openbsd port!) we
1263  * have to correctly track this.
1264  */
1265 static void
1266 iwm_stop_device(struct iwm_softc *sc)
1267 {
1268 	struct ieee80211com *ic = &sc->sc_ic;
1269 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1270 	int chnl, qid;
1271 	uint32_t mask = 0;
1272 
1273 	/* tell the device to stop sending interrupts */
1274 	iwm_disable_interrupts(sc);
1275 
1276 	/*
1277 	 * FreeBSD-local: mark the first vap as not-uploaded,
1278 	 * so the next transition through auth/assoc
1279 	 * will correctly populate the MAC context.
1280 	 */
1281 	if (vap) {
1282 		struct iwm_vap *iv = IWM_VAP(vap);
1283 		iv->phy_ctxt = NULL;
1284 		iv->is_uploaded = 0;
1285 	}
1286 
1287 	/* device going down, Stop using ICT table */
1288 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1289 
1290 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1291 
1292 	if (iwm_nic_lock(sc)) {
1293 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1294 
1295 		/* Stop each Tx DMA channel */
1296 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1297 			IWM_WRITE(sc,
1298 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1299 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1300 		}
1301 
1302 		/* Wait for DMA channels to be idle */
1303 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1304 		    5000)) {
1305 			device_printf(sc->sc_dev,
1306 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1307 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1308 		}
1309 		iwm_nic_unlock(sc);
1310 	}
1311 	iwm_pcie_rx_stop(sc);
1312 
1313 	/* Stop RX ring. */
1314 	iwm_reset_rx_ring(sc, &sc->rxq);
1315 
1316 	/* Reset all TX rings. */
1317 	for (qid = 0; qid < nitems(sc->txq); qid++)
1318 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1319 
1320 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1321 		/* Power-down device's busmaster DMA clocks */
1322 		if (iwm_nic_lock(sc)) {
1323 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1324 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1325 			iwm_nic_unlock(sc);
1326 		}
1327 		DELAY(5);
1328 	}
1329 
1330 	/* Make sure (redundant) we've released our request to stay awake */
1331 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1332 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1333 
1334 	/* Stop the device, and put it in low power state */
1335 	iwm_apm_stop(sc);
1336 
1337 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1338 	 * Clean again the interrupt here
1339 	 */
1340 	iwm_disable_interrupts(sc);
1341 	/* stop and reset the on-board processor */
1342 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1343 
1344 	/*
1345 	 * Even if we stop the HW, we still want the RF kill
1346 	 * interrupt
1347 	 */
1348 	iwm_enable_rfkill_int(sc);
1349 	iwm_check_rfkill(sc);
1350 }
1351 
1352 /* iwlwifi: mvm/ops.c */
1353 static void
1354 iwm_mvm_nic_config(struct iwm_softc *sc)
1355 {
1356 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1357 	uint32_t reg_val = 0;
1358 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1359 
1360 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1361 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1362 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1363 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1364 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1365 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1366 
1367 	/* SKU control */
1368 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1369 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1370 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1371 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1372 
1373 	/* radio configuration */
1374 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1375 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1376 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1377 
1378 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1379 
1380 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1381 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1382 	    radio_cfg_step, radio_cfg_dash);
1383 
1384 	/*
1385 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1386 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1387 	 * to lose ownership and not being able to obtain it back.
1388 	 */
1389 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1390 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1391 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1392 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1393 	}
1394 }
1395 
1396 static int
1397 iwm_nic_rx_init(struct iwm_softc *sc)
1398 {
1399 	/*
1400 	 * Initialize RX ring.  This is from the iwn driver.
1401 	 */
1402 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1403 
1404 	/* Stop Rx DMA */
1405 	iwm_pcie_rx_stop(sc);
1406 
1407 	if (!iwm_nic_lock(sc))
1408 		return EBUSY;
1409 
1410 	/* reset and flush pointers */
1411 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1412 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1413 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1414 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1415 
1416 	/* Set physical address of RX ring (256-byte aligned). */
1417 	IWM_WRITE(sc,
1418 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1419 
1420 	/* Set physical address of RX status (16-byte aligned). */
1421 	IWM_WRITE(sc,
1422 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1423 
1424 	/* Enable Rx DMA
1425 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1426 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1427 	 *      the credit mechanism in 5000 HW RX FIFO
1428 	 * Direct rx interrupts to hosts
1429 	 * Rx buffer size 4 or 8k or 12k
1430 	 * RB timeout 0x10
1431 	 * 256 RBDs
1432 	 */
1433 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1434 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1435 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1436 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1437 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1438 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1439 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1440 
1441 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1442 
1443 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1444 	if (sc->cfg->host_interrupt_operation_mode)
1445 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1446 
1447 	/*
1448 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1449 	 *
1450 	 * This value should initially be 0 (before preparing any
1451 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1452 	 */
1453 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	return 0;
1458 }
1459 
1460 static int
1461 iwm_nic_tx_init(struct iwm_softc *sc)
1462 {
1463 	int qid;
1464 
1465 	if (!iwm_nic_lock(sc))
1466 		return EBUSY;
1467 
1468 	/* Deactivate TX scheduler. */
1469 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1470 
1471 	/* Set physical address of "keep warm" page (16-byte aligned). */
1472 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1473 
1474 	/* Initialize TX rings. */
1475 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1476 		struct iwm_tx_ring *txq = &sc->txq[qid];
1477 
1478 		/* Set physical address of TX ring (256-byte aligned). */
1479 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1480 		    txq->desc_dma.paddr >> 8);
1481 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1482 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1483 		    __func__,
1484 		    qid, txq->desc,
1485 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1486 	}
1487 
1488 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1489 
1490 	iwm_nic_unlock(sc);
1491 
1492 	return 0;
1493 }
1494 
1495 static int
1496 iwm_nic_init(struct iwm_softc *sc)
1497 {
1498 	int error;
1499 
1500 	iwm_apm_init(sc);
1501 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1502 		iwm_set_pwr(sc);
1503 
1504 	iwm_mvm_nic_config(sc);
1505 
1506 	if ((error = iwm_nic_rx_init(sc)) != 0)
1507 		return error;
1508 
1509 	/*
1510 	 * Ditto for TX, from iwn
1511 	 */
1512 	if ((error = iwm_nic_tx_init(sc)) != 0)
1513 		return error;
1514 
1515 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1516 	    "%s: shadow registers enabled\n", __func__);
1517 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1518 
1519 	return 0;
1520 }
1521 
1522 int
1523 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1524 {
1525 	if (!iwm_nic_lock(sc)) {
1526 		device_printf(sc->sc_dev,
1527 		    "%s: cannot enable txq %d\n",
1528 		    __func__,
1529 		    qid);
1530 		return EBUSY;
1531 	}
1532 
1533 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1534 
1535 	if (qid == IWM_MVM_CMD_QUEUE) {
1536 		/* unactivate before configuration */
1537 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1538 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1539 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1540 
1541 		iwm_nic_unlock(sc);
1542 
1543 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1544 
1545 		if (!iwm_nic_lock(sc)) {
1546 			device_printf(sc->sc_dev,
1547 			    "%s: cannot enable txq %d\n", __func__, qid);
1548 			return EBUSY;
1549 		}
1550 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1551 		iwm_nic_unlock(sc);
1552 
1553 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1554 		/* Set scheduler window size and frame limit. */
1555 		iwm_write_mem32(sc,
1556 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1557 		    sizeof(uint32_t),
1558 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1559 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1560 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1561 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1562 
1563 		if (!iwm_nic_lock(sc)) {
1564 			device_printf(sc->sc_dev,
1565 			    "%s: cannot enable txq %d\n", __func__, qid);
1566 			return EBUSY;
1567 		}
1568 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1569 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1570 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1571 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1572 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1573 	} else {
1574 		struct iwm_scd_txq_cfg_cmd cmd;
1575 		int error;
1576 
1577 		iwm_nic_unlock(sc);
1578 
1579 		memset(&cmd, 0, sizeof(cmd));
1580 		cmd.scd_queue = qid;
1581 		cmd.enable = 1;
1582 		cmd.sta_id = sta_id;
1583 		cmd.tx_fifo = fifo;
1584 		cmd.aggregate = 0;
1585 		cmd.window = IWM_FRAME_LIMIT;
1586 
1587 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1588 		    sizeof(cmd), &cmd);
1589 		if (error) {
1590 			device_printf(sc->sc_dev,
1591 			    "cannot enable txq %d\n", qid);
1592 			return error;
1593 		}
1594 
1595 		if (!iwm_nic_lock(sc))
1596 			return EBUSY;
1597 	}
1598 
1599 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1600 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1601 
1602 	iwm_nic_unlock(sc);
1603 
1604 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1605 	    __func__, qid, fifo);
1606 
1607 	return 0;
1608 }
1609 
1610 static int
1611 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1612 {
1613 	int error, chnl;
1614 
1615 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1616 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1617 
1618 	if (!iwm_nic_lock(sc))
1619 		return EBUSY;
1620 
1621 	iwm_ict_reset(sc);
1622 
1623 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1624 	if (scd_base_addr != 0 &&
1625 	    scd_base_addr != sc->scd_base_addr) {
1626 		device_printf(sc->sc_dev,
1627 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1628 		    __func__, sc->scd_base_addr, scd_base_addr);
1629 	}
1630 
1631 	iwm_nic_unlock(sc);
1632 
1633 	/* reset context data, TX status and translation data */
1634 	error = iwm_write_mem(sc,
1635 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1636 	    NULL, clear_dwords);
1637 	if (error)
1638 		return EBUSY;
1639 
1640 	if (!iwm_nic_lock(sc))
1641 		return EBUSY;
1642 
1643 	/* Set physical address of TX scheduler rings (1KB aligned). */
1644 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1645 
1646 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1647 
1648 	iwm_nic_unlock(sc);
1649 
1650 	/* enable command channel */
1651 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1652 	if (error)
1653 		return error;
1654 
1655 	if (!iwm_nic_lock(sc))
1656 		return EBUSY;
1657 
1658 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1659 
1660 	/* Enable DMA channels. */
1661 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1662 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1663 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1664 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1665 	}
1666 
1667 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1668 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1669 
1670 	iwm_nic_unlock(sc);
1671 
1672 	/* Enable L1-Active */
1673 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1674 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1675 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1676 	}
1677 
1678 	return error;
1679 }
1680 
1681 /*
1682  * NVM read access and content parsing.  We do not support
1683  * external NVM or writing NVM.
1684  * iwlwifi/mvm/nvm.c
1685  */
1686 
1687 /* Default NVM size to read */
1688 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1689 
1690 #define IWM_NVM_WRITE_OPCODE 1
1691 #define IWM_NVM_READ_OPCODE 0
1692 
1693 /* load nvm chunk response */
1694 enum {
1695 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1696 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1697 };
1698 
1699 static int
1700 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1701 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1702 {
1703 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1704 		.offset = htole16(offset),
1705 		.length = htole16(length),
1706 		.type = htole16(section),
1707 		.op_code = IWM_NVM_READ_OPCODE,
1708 	};
1709 	struct iwm_nvm_access_resp *nvm_resp;
1710 	struct iwm_rx_packet *pkt;
1711 	struct iwm_host_cmd cmd = {
1712 		.id = IWM_NVM_ACCESS_CMD,
1713 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1714 		.data = { &nvm_access_cmd, },
1715 	};
1716 	int ret, bytes_read, offset_read;
1717 	uint8_t *resp_data;
1718 
1719 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1720 
1721 	ret = iwm_send_cmd(sc, &cmd);
1722 	if (ret) {
1723 		device_printf(sc->sc_dev,
1724 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1725 		return ret;
1726 	}
1727 
1728 	pkt = cmd.resp_pkt;
1729 
1730 	/* Extract NVM response */
1731 	nvm_resp = (void *)pkt->data;
1732 	ret = le16toh(nvm_resp->status);
1733 	bytes_read = le16toh(nvm_resp->length);
1734 	offset_read = le16toh(nvm_resp->offset);
1735 	resp_data = nvm_resp->data;
1736 	if (ret) {
1737 		if ((offset != 0) &&
1738 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1739 			/*
1740 			 * meaning of NOT_VALID_ADDRESS:
1741 			 * driver try to read chunk from address that is
1742 			 * multiple of 2K and got an error since addr is empty.
1743 			 * meaning of (offset != 0): driver already
1744 			 * read valid data from another chunk so this case
1745 			 * is not an error.
1746 			 */
1747 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1748 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1749 				    offset);
1750 			*len = 0;
1751 			ret = 0;
1752 		} else {
1753 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1754 				    "NVM access command failed with status %d\n", ret);
1755 			ret = EIO;
1756 		}
1757 		goto exit;
1758 	}
1759 
1760 	if (offset_read != offset) {
1761 		device_printf(sc->sc_dev,
1762 		    "NVM ACCESS response with invalid offset %d\n",
1763 		    offset_read);
1764 		ret = EINVAL;
1765 		goto exit;
1766 	}
1767 
1768 	if (bytes_read > length) {
1769 		device_printf(sc->sc_dev,
1770 		    "NVM ACCESS response with too much data "
1771 		    "(%d bytes requested, %d bytes received)\n",
1772 		    length, bytes_read);
1773 		ret = EINVAL;
1774 		goto exit;
1775 	}
1776 
1777 	/* Write data to NVM */
1778 	memcpy(data + offset, resp_data, bytes_read);
1779 	*len = bytes_read;
1780 
1781  exit:
1782 	iwm_free_resp(sc, &cmd);
1783 	return ret;
1784 }
1785 
1786 /*
1787  * Reads an NVM section completely.
1788  * NICs prior to 7000 family don't have a real NVM, but just read
1789  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1790  * by uCode, we need to manually check in this case that we don't
1791  * overflow and try to read more than the EEPROM size.
1792  * For 7000 family NICs, we supply the maximal size we can read, and
1793  * the uCode fills the response with as much data as we can,
1794  * without overflowing, so no check is needed.
1795  */
1796 static int
1797 iwm_nvm_read_section(struct iwm_softc *sc,
1798 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1799 {
1800 	uint16_t seglen, length, offset = 0;
1801 	int ret;
1802 
1803 	/* Set nvm section read length */
1804 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1805 
1806 	seglen = length;
1807 
1808 	/* Read the NVM until exhausted (reading less than requested) */
1809 	while (seglen == length) {
1810 		/* Check no memory assumptions fail and cause an overflow */
1811 		if ((size_read + offset + length) >
1812 		    sc->cfg->eeprom_size) {
1813 			device_printf(sc->sc_dev,
1814 			    "EEPROM size is too small for NVM\n");
1815 			return ENOBUFS;
1816 		}
1817 
1818 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1819 		if (ret) {
1820 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1821 				    "Cannot read NVM from section %d offset %d, length %d\n",
1822 				    section, offset, length);
1823 			return ret;
1824 		}
1825 		offset += seglen;
1826 	}
1827 
1828 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829 		    "NVM section %d read completed\n", section);
1830 	*len = offset;
1831 	return 0;
1832 }
1833 
1834 /*
1835  * BEGIN IWM_NVM_PARSE
1836  */
1837 
1838 /* iwlwifi/iwl-nvm-parse.c */
1839 
1840 /* NVM offsets (in words) definitions */
1841 enum iwm_nvm_offsets {
1842 	/* NVM HW-Section offset (in words) definitions */
1843 	IWM_HW_ADDR = 0x15,
1844 
1845 /* NVM SW-Section offset (in words) definitions */
1846 	IWM_NVM_SW_SECTION = 0x1C0,
1847 	IWM_NVM_VERSION = 0,
1848 	IWM_RADIO_CFG = 1,
1849 	IWM_SKU = 2,
1850 	IWM_N_HW_ADDRS = 3,
1851 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1852 
1853 /* NVM calibration section offset (in words) definitions */
1854 	IWM_NVM_CALIB_SECTION = 0x2B8,
1855 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1856 };
1857 
1858 enum iwm_8000_nvm_offsets {
1859 	/* NVM HW-Section offset (in words) definitions */
1860 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1861 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1862 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1863 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1864 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1865 
1866 	/* NVM SW-Section offset (in words) definitions */
1867 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1868 	IWM_NVM_VERSION_8000 = 0,
1869 	IWM_RADIO_CFG_8000 = 0,
1870 	IWM_SKU_8000 = 2,
1871 	IWM_N_HW_ADDRS_8000 = 3,
1872 
1873 	/* NVM REGULATORY -Section offset (in words) definitions */
1874 	IWM_NVM_CHANNELS_8000 = 0,
1875 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1876 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1877 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1878 
1879 	/* NVM calibration section offset (in words) definitions */
1880 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1881 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1882 };
1883 
1884 /* SKU Capabilities (actual values from NVM definition) */
1885 enum nvm_sku_bits {
1886 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1887 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1888 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1889 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1890 };
1891 
1892 /* radio config bits (actual values from NVM definition) */
1893 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1894 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1895 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1896 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1897 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1898 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1899 
1900 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1901 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1902 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1903 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1904 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1905 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1906 
1907 #define DEFAULT_MAX_TX_POWER 16
1908 
1909 /**
1910  * enum iwm_nvm_channel_flags - channel flags in NVM
1911  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1912  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1913  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1914  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1915  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1916  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1917  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1918  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1919  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1920  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1921  */
1922 enum iwm_nvm_channel_flags {
1923 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1924 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1925 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1926 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1927 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1928 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1929 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1930 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1931 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1932 };
1933 
1934 /*
1935  * Translate EEPROM flags to net80211.
1936  */
1937 static uint32_t
1938 iwm_eeprom_channel_flags(uint16_t ch_flags)
1939 {
1940 	uint32_t nflags;
1941 
1942 	nflags = 0;
1943 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1944 		nflags |= IEEE80211_CHAN_PASSIVE;
1945 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1946 		nflags |= IEEE80211_CHAN_NOADHOC;
1947 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1948 		nflags |= IEEE80211_CHAN_DFS;
1949 		/* Just in case. */
1950 		nflags |= IEEE80211_CHAN_NOADHOC;
1951 	}
1952 
1953 	return (nflags);
1954 }
1955 
1956 static void
1957 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1958     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1959     const uint8_t bands[])
1960 {
1961 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1962 	uint32_t nflags;
1963 	uint16_t ch_flags;
1964 	uint8_t ieee;
1965 	int error;
1966 
1967 	for (; ch_idx < ch_num; ch_idx++) {
1968 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1969 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1970 			ieee = iwm_nvm_channels[ch_idx];
1971 		else
1972 			ieee = iwm_nvm_channels_8000[ch_idx];
1973 
1974 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1975 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1976 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1977 			    ieee, ch_flags,
1978 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1979 			    "5.2" : "2.4");
1980 			continue;
1981 		}
1982 
1983 		nflags = iwm_eeprom_channel_flags(ch_flags);
1984 		error = ieee80211_add_channel(chans, maxchans, nchans,
1985 		    ieee, 0, 0, nflags, bands);
1986 		if (error != 0)
1987 			break;
1988 
1989 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1990 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1991 		    ieee, ch_flags,
1992 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1993 		    "5.2" : "2.4");
1994 	}
1995 }
1996 
1997 static void
1998 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1999     struct ieee80211_channel chans[])
2000 {
2001 	struct iwm_softc *sc = ic->ic_softc;
2002 	struct iwm_nvm_data *data = sc->nvm_data;
2003 	uint8_t bands[IEEE80211_MODE_BYTES];
2004 	size_t ch_num;
2005 
2006 	memset(bands, 0, sizeof(bands));
2007 	/* 1-13: 11b/g channels. */
2008 	setbit(bands, IEEE80211_MODE_11B);
2009 	setbit(bands, IEEE80211_MODE_11G);
2010 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2011 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2012 
2013 	/* 14: 11b channel only. */
2014 	clrbit(bands, IEEE80211_MODE_11G);
2015 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2016 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2017 
2018 	if (data->sku_cap_band_52GHz_enable) {
2019 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2020 			ch_num = nitems(iwm_nvm_channels);
2021 		else
2022 			ch_num = nitems(iwm_nvm_channels_8000);
2023 		memset(bands, 0, sizeof(bands));
2024 		setbit(bands, IEEE80211_MODE_11A);
2025 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2026 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2027 	}
2028 }
2029 
2030 static void
2031 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2032 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2033 {
2034 	const uint8_t *hw_addr;
2035 
2036 	if (mac_override) {
2037 		static const uint8_t reserved_mac[] = {
2038 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2039 		};
2040 
2041 		hw_addr = (const uint8_t *)(mac_override +
2042 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2043 
2044 		/*
2045 		 * Store the MAC address from MAO section.
2046 		 * No byte swapping is required in MAO section
2047 		 */
2048 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2049 
2050 		/*
2051 		 * Force the use of the OTP MAC address in case of reserved MAC
2052 		 * address in the NVM, or if address is given but invalid.
2053 		 */
2054 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2055 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2056 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2057 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2058 			return;
2059 
2060 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2061 		    "%s: mac address from nvm override section invalid\n",
2062 		    __func__);
2063 	}
2064 
2065 	if (nvm_hw) {
2066 		/* read the mac address from WFMP registers */
2067 		uint32_t mac_addr0 =
2068 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2069 		uint32_t mac_addr1 =
2070 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2071 
2072 		hw_addr = (const uint8_t *)&mac_addr0;
2073 		data->hw_addr[0] = hw_addr[3];
2074 		data->hw_addr[1] = hw_addr[2];
2075 		data->hw_addr[2] = hw_addr[1];
2076 		data->hw_addr[3] = hw_addr[0];
2077 
2078 		hw_addr = (const uint8_t *)&mac_addr1;
2079 		data->hw_addr[4] = hw_addr[1];
2080 		data->hw_addr[5] = hw_addr[0];
2081 
2082 		return;
2083 	}
2084 
2085 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2086 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2087 }
2088 
2089 static int
2090 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2091 	    const uint16_t *phy_sku)
2092 {
2093 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2094 		return le16_to_cpup(nvm_sw + IWM_SKU);
2095 
2096 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2097 }
2098 
2099 static int
2100 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2101 {
2102 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2103 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2104 	else
2105 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2106 						IWM_NVM_VERSION_8000));
2107 }
2108 
2109 static int
2110 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2111 		  const uint16_t *phy_sku)
2112 {
2113         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2114                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2115 
2116         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2117 }
2118 
2119 static int
2120 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2121 {
2122 	int n_hw_addr;
2123 
2124 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2125 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2126 
2127 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2128 
2129         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2130 }
2131 
2132 static void
2133 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2134 		  uint32_t radio_cfg)
2135 {
2136 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2137 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2138 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2139 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2140 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2141 		return;
2142 	}
2143 
2144 	/* set the radio configuration for family 8000 */
2145 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2146 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2147 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2148 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2149 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2150 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2151 }
2152 
2153 static int
2154 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2155 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2156 {
2157 #ifdef notyet /* for FAMILY 9000 */
2158 	if (cfg->mac_addr_from_csr) {
2159 		iwm_set_hw_address_from_csr(sc, data);
2160         } else
2161 #endif
2162 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2163 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2164 
2165 		/* The byte order is little endian 16 bit, meaning 214365 */
2166 		data->hw_addr[0] = hw_addr[1];
2167 		data->hw_addr[1] = hw_addr[0];
2168 		data->hw_addr[2] = hw_addr[3];
2169 		data->hw_addr[3] = hw_addr[2];
2170 		data->hw_addr[4] = hw_addr[5];
2171 		data->hw_addr[5] = hw_addr[4];
2172 	} else {
2173 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2174 	}
2175 
2176 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2177 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2178 		return EINVAL;
2179 	}
2180 
2181 	return 0;
2182 }
2183 
2184 static struct iwm_nvm_data *
2185 iwm_parse_nvm_data(struct iwm_softc *sc,
2186 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2187 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2188 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2189 {
2190 	struct iwm_nvm_data *data;
2191 	uint32_t sku, radio_cfg;
2192 
2193 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2194 		data = malloc(sizeof(*data) +
2195 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2196 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2197 	} else {
2198 		data = malloc(sizeof(*data) +
2199 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2200 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2201 	}
2202 	if (!data)
2203 		return NULL;
2204 
2205 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2206 
2207 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2208 	iwm_set_radio_cfg(sc, data, radio_cfg);
2209 
2210 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2211 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2212 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2213 	data->sku_cap_11n_enable = 0;
2214 
2215 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2216 
2217 	/* If no valid mac address was found - bail out */
2218 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2219 		free(data, M_DEVBUF);
2220 		return NULL;
2221 	}
2222 
2223 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2224 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2225 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2226 	} else {
2227 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2228 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2229 	}
2230 
2231 	return data;
2232 }
2233 
2234 static void
2235 iwm_free_nvm_data(struct iwm_nvm_data *data)
2236 {
2237 	if (data != NULL)
2238 		free(data, M_DEVBUF);
2239 }
2240 
2241 static struct iwm_nvm_data *
2242 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2243 {
2244 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2245 
2246 	/* Checking for required sections */
2247 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2248 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2249 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2250 			device_printf(sc->sc_dev,
2251 			    "Can't parse empty OTP/NVM sections\n");
2252 			return NULL;
2253 		}
2254 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2255 		/* SW and REGULATORY sections are mandatory */
2256 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2257 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2258 			device_printf(sc->sc_dev,
2259 			    "Can't parse empty OTP/NVM sections\n");
2260 			return NULL;
2261 		}
2262 		/* MAC_OVERRIDE or at least HW section must exist */
2263 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2264 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2265 			device_printf(sc->sc_dev,
2266 			    "Can't parse mac_address, empty sections\n");
2267 			return NULL;
2268 		}
2269 
2270 		/* PHY_SKU section is mandatory in B0 */
2271 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2272 			device_printf(sc->sc_dev,
2273 			    "Can't parse phy_sku in B0, empty sections\n");
2274 			return NULL;
2275 		}
2276 	} else {
2277 		panic("unknown device family %d\n", sc->cfg->device_family);
2278 	}
2279 
2280 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2281 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2282 	calib = (const uint16_t *)
2283 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2284 	regulatory = (const uint16_t *)
2285 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2286 	mac_override = (const uint16_t *)
2287 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2288 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2289 
2290 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2291 	    phy_sku, regulatory);
2292 }
2293 
2294 static int
2295 iwm_nvm_init(struct iwm_softc *sc)
2296 {
2297 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2298 	int i, ret, section;
2299 	uint32_t size_read = 0;
2300 	uint8_t *nvm_buffer, *temp;
2301 	uint16_t len;
2302 
2303 	memset(nvm_sections, 0, sizeof(nvm_sections));
2304 
2305 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2306 		return EINVAL;
2307 
2308 	/* load NVM values from nic */
2309 	/* Read From FW NVM */
2310 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2311 
2312 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2313 	if (!nvm_buffer)
2314 		return ENOMEM;
2315 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2316 		/* we override the constness for initial read */
2317 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2318 					   &len, size_read);
2319 		if (ret)
2320 			continue;
2321 		size_read += len;
2322 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2323 		if (!temp) {
2324 			ret = ENOMEM;
2325 			break;
2326 		}
2327 		memcpy(temp, nvm_buffer, len);
2328 
2329 		nvm_sections[section].data = temp;
2330 		nvm_sections[section].length = len;
2331 	}
2332 	if (!size_read)
2333 		device_printf(sc->sc_dev, "OTP is blank\n");
2334 	free(nvm_buffer, M_DEVBUF);
2335 
2336 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2337 	if (!sc->nvm_data)
2338 		return EINVAL;
2339 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2340 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2341 
2342 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2343 		if (nvm_sections[i].data != NULL)
2344 			free(nvm_sections[i].data, M_DEVBUF);
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static int
2351 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2352 	const struct iwm_fw_desc *section)
2353 {
2354 	struct iwm_dma_info *dma = &sc->fw_dma;
2355 	uint8_t *v_addr;
2356 	bus_addr_t p_addr;
2357 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2358 	int ret = 0;
2359 
2360 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2361 		    "%s: [%d] uCode section being loaded...\n",
2362 		    __func__, section_num);
2363 
2364 	v_addr = dma->vaddr;
2365 	p_addr = dma->paddr;
2366 
2367 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2368 		uint32_t copy_size, dst_addr;
2369 		int extended_addr = FALSE;
2370 
2371 		copy_size = MIN(chunk_sz, section->len - offset);
2372 		dst_addr = section->offset + offset;
2373 
2374 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2375 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2376 			extended_addr = TRUE;
2377 
2378 		if (extended_addr)
2379 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2380 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2381 
2382 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2383 		    copy_size);
2384 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2385 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2386 						   copy_size);
2387 
2388 		if (extended_addr)
2389 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2390 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2391 
2392 		if (ret) {
2393 			device_printf(sc->sc_dev,
2394 			    "%s: Could not load the [%d] uCode section\n",
2395 			    __func__, section_num);
2396 			break;
2397 		}
2398 	}
2399 
2400 	return ret;
2401 }
2402 
2403 /*
2404  * ucode
2405  */
2406 static int
2407 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2408 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2409 {
2410 	int ret;
2411 
2412 	sc->sc_fw_chunk_done = 0;
2413 
2414 	if (!iwm_nic_lock(sc))
2415 		return EBUSY;
2416 
2417 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2418 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2419 
2420 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2421 	    dst_addr);
2422 
2423 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2424 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2425 
2426 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2427 	    (iwm_get_dma_hi_addr(phy_addr)
2428 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2429 
2430 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2431 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2432 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2433 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2434 
2435 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2436 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2437 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2438 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2439 
2440 	iwm_nic_unlock(sc);
2441 
2442 	/* wait up to 5s for this segment to load */
2443 	ret = 0;
2444 	while (!sc->sc_fw_chunk_done) {
2445 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2446 		if (ret)
2447 			break;
2448 	}
2449 
2450 	if (ret != 0) {
2451 		device_printf(sc->sc_dev,
2452 		    "fw chunk addr 0x%x len %d failed to load\n",
2453 		    dst_addr, byte_cnt);
2454 		return ETIMEDOUT;
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static int
2461 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2462 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2463 {
2464 	int shift_param;
2465 	int i, ret = 0, sec_num = 0x1;
2466 	uint32_t val, last_read_idx = 0;
2467 
2468 	if (cpu == 1) {
2469 		shift_param = 0;
2470 		*first_ucode_section = 0;
2471 	} else {
2472 		shift_param = 16;
2473 		(*first_ucode_section)++;
2474 	}
2475 
2476 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2477 		last_read_idx = i;
2478 
2479 		/*
2480 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2481 		 * CPU1 to CPU2.
2482 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2483 		 * CPU2 non paged to CPU2 paging sec.
2484 		 */
2485 		if (!image->fw_sect[i].data ||
2486 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2487 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2488 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2489 				    "Break since Data not valid or Empty section, sec = %d\n",
2490 				    i);
2491 			break;
2492 		}
2493 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2494 		if (ret)
2495 			return ret;
2496 
2497 		/* Notify the ucode of the loaded section number and status */
2498 		if (iwm_nic_lock(sc)) {
2499 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2500 			val = val | (sec_num << shift_param);
2501 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2502 			sec_num = (sec_num << 1) | 0x1;
2503 			iwm_nic_unlock(sc);
2504 		}
2505 	}
2506 
2507 	*first_ucode_section = last_read_idx;
2508 
2509 	iwm_enable_interrupts(sc);
2510 
2511 	if (iwm_nic_lock(sc)) {
2512 		if (cpu == 1)
2513 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2514 		else
2515 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2516 		iwm_nic_unlock(sc);
2517 	}
2518 
2519 	return 0;
2520 }
2521 
2522 static int
2523 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2524 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2525 {
2526 	int shift_param;
2527 	int i, ret = 0;
2528 	uint32_t last_read_idx = 0;
2529 
2530 	if (cpu == 1) {
2531 		shift_param = 0;
2532 		*first_ucode_section = 0;
2533 	} else {
2534 		shift_param = 16;
2535 		(*first_ucode_section)++;
2536 	}
2537 
2538 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2539 		last_read_idx = i;
2540 
2541 		/*
2542 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2543 		 * CPU1 to CPU2.
2544 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2545 		 * CPU2 non paged to CPU2 paging sec.
2546 		 */
2547 		if (!image->fw_sect[i].data ||
2548 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2549 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2550 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2551 				    "Break since Data not valid or Empty section, sec = %d\n",
2552 				     i);
2553 			break;
2554 		}
2555 
2556 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2557 		if (ret)
2558 			return ret;
2559 	}
2560 
2561 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2562 		iwm_set_bits_prph(sc,
2563 				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2564 				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2565 				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2566 				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2567 					shift_param);
2568 
2569 	*first_ucode_section = last_read_idx;
2570 
2571 	return 0;
2572 
2573 }
2574 
2575 static int
2576 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2577 	const struct iwm_fw_sects *image)
2578 {
2579 	int ret = 0;
2580 	int first_ucode_section;
2581 
2582 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2583 		     image->is_dual_cpus ? "Dual" : "Single");
2584 
2585 	/* load to FW the binary non secured sections of CPU1 */
2586 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2587 	if (ret)
2588 		return ret;
2589 
2590 	if (image->is_dual_cpus) {
2591 		/* set CPU2 header address */
2592 		if (iwm_nic_lock(sc)) {
2593 			iwm_write_prph(sc,
2594 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2595 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2596 			iwm_nic_unlock(sc);
2597 		}
2598 
2599 		/* load to FW the binary sections of CPU2 */
2600 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2601 						 &first_ucode_section);
2602 		if (ret)
2603 			return ret;
2604 	}
2605 
2606 	iwm_enable_interrupts(sc);
2607 
2608 	/* release CPU reset */
2609 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2610 
2611 	return 0;
2612 }
2613 
2614 int
2615 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2616 	const struct iwm_fw_sects *image)
2617 {
2618 	int ret = 0;
2619 	int first_ucode_section;
2620 
2621 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2622 		    image->is_dual_cpus ? "Dual" : "Single");
2623 
2624 	/* configure the ucode to be ready to get the secured image */
2625 	/* release CPU reset */
2626 	if (iwm_nic_lock(sc)) {
2627 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2628 		    IWM_RELEASE_CPU_RESET_BIT);
2629 		iwm_nic_unlock(sc);
2630 	}
2631 
2632 	/* load to FW the binary Secured sections of CPU1 */
2633 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2634 	    &first_ucode_section);
2635 	if (ret)
2636 		return ret;
2637 
2638 	/* load to FW the binary sections of CPU2 */
2639 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2640 	    &first_ucode_section);
2641 }
2642 
2643 /* XXX Get rid of this definition */
2644 static inline void
2645 iwm_enable_fw_load_int(struct iwm_softc *sc)
2646 {
2647 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2648 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2649 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2650 }
2651 
2652 /* XXX Add proper rfkill support code */
2653 static int
2654 iwm_start_fw(struct iwm_softc *sc,
2655 	const struct iwm_fw_sects *fw)
2656 {
2657 	int ret;
2658 
2659 	/* This may fail if AMT took ownership of the device */
2660 	if (iwm_prepare_card_hw(sc)) {
2661 		device_printf(sc->sc_dev,
2662 		    "%s: Exit HW not ready\n", __func__);
2663 		ret = EIO;
2664 		goto out;
2665 	}
2666 
2667 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2668 
2669 	iwm_disable_interrupts(sc);
2670 
2671 	/* make sure rfkill handshake bits are cleared */
2672 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2673 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2674 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2675 
2676 	/* clear (again), then enable host interrupts */
2677 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2678 
2679 	ret = iwm_nic_init(sc);
2680 	if (ret) {
2681 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2682 		goto out;
2683 	}
2684 
2685 	/*
2686 	 * Now, we load the firmware and don't want to be interrupted, even
2687 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2688 	 * FH_TX interrupt which is needed to load the firmware). If the
2689 	 * RF-Kill switch is toggled, we will find out after having loaded
2690 	 * the firmware and return the proper value to the caller.
2691 	 */
2692 	iwm_enable_fw_load_int(sc);
2693 
2694 	/* really make sure rfkill handshake bits are cleared */
2695 	/* maybe we should write a few times more?  just to make sure */
2696 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2697 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2698 
2699 	/* Load the given image to the HW */
2700 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2701 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2702 	else
2703 		ret = iwm_pcie_load_given_ucode(sc, fw);
2704 
2705 	/* XXX re-check RF-Kill state */
2706 
2707 out:
2708 	return ret;
2709 }
2710 
2711 static int
2712 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2713 {
2714 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2715 		.valid = htole32(valid_tx_ant),
2716 	};
2717 
2718 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2719 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2720 }
2721 
2722 /* iwlwifi: mvm/fw.c */
2723 static int
2724 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2725 {
2726 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2727 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2728 
2729 	/* Set parameters */
2730 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2731 	phy_cfg_cmd.calib_control.event_trigger =
2732 	    sc->sc_default_calib[ucode_type].event_trigger;
2733 	phy_cfg_cmd.calib_control.flow_trigger =
2734 	    sc->sc_default_calib[ucode_type].flow_trigger;
2735 
2736 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2737 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2738 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2739 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2740 }
2741 
2742 static int
2743 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2744 {
2745 	struct iwm_mvm_alive_data *alive_data = data;
2746 	struct iwm_mvm_alive_resp_ver1 *palive1;
2747 	struct iwm_mvm_alive_resp_ver2 *palive2;
2748 	struct iwm_mvm_alive_resp *palive;
2749 
2750 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2751 		palive1 = (void *)pkt->data;
2752 
2753 		sc->support_umac_log = FALSE;
2754                 sc->error_event_table =
2755                         le32toh(palive1->error_event_table_ptr);
2756                 sc->log_event_table =
2757                         le32toh(palive1->log_event_table_ptr);
2758                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2759 
2760                 alive_data->valid = le16toh(palive1->status) ==
2761                                     IWM_ALIVE_STATUS_OK;
2762                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2763 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2764 			     le16toh(palive1->status), palive1->ver_type,
2765                              palive1->ver_subtype, palive1->flags);
2766 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2767 		palive2 = (void *)pkt->data;
2768 		sc->error_event_table =
2769 			le32toh(palive2->error_event_table_ptr);
2770 		sc->log_event_table =
2771 			le32toh(palive2->log_event_table_ptr);
2772 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2773 		sc->umac_error_event_table =
2774                         le32toh(palive2->error_info_addr);
2775 
2776 		alive_data->valid = le16toh(palive2->status) ==
2777 				    IWM_ALIVE_STATUS_OK;
2778 		if (sc->umac_error_event_table)
2779 			sc->support_umac_log = TRUE;
2780 
2781 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2782 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2783 			    le16toh(palive2->status), palive2->ver_type,
2784 			    palive2->ver_subtype, palive2->flags);
2785 
2786 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2788 			    palive2->umac_major, palive2->umac_minor);
2789 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2790 		palive = (void *)pkt->data;
2791 
2792 		sc->error_event_table =
2793 			le32toh(palive->error_event_table_ptr);
2794 		sc->log_event_table =
2795 			le32toh(palive->log_event_table_ptr);
2796 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2797 		sc->umac_error_event_table =
2798 			le32toh(palive->error_info_addr);
2799 
2800 		alive_data->valid = le16toh(palive->status) ==
2801 				    IWM_ALIVE_STATUS_OK;
2802 		if (sc->umac_error_event_table)
2803 			sc->support_umac_log = TRUE;
2804 
2805 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2806 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2807 			    le16toh(palive->status), palive->ver_type,
2808 			    palive->ver_subtype, palive->flags);
2809 
2810 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2812 			    le32toh(palive->umac_major),
2813 			    le32toh(palive->umac_minor));
2814 	}
2815 
2816 	return TRUE;
2817 }
2818 
2819 static int
2820 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2821 	struct iwm_rx_packet *pkt, void *data)
2822 {
2823 	struct iwm_phy_db *phy_db = data;
2824 
2825 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2826 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2827 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2828 			    __func__, pkt->hdr.code);
2829 		}
2830 		return TRUE;
2831 	}
2832 
2833 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2834 		device_printf(sc->sc_dev,
2835 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2836 	}
2837 
2838 	return FALSE;
2839 }
2840 
2841 static int
2842 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2843 	enum iwm_ucode_type ucode_type)
2844 {
2845 	struct iwm_notification_wait alive_wait;
2846 	struct iwm_mvm_alive_data alive_data;
2847 	const struct iwm_fw_sects *fw;
2848 	enum iwm_ucode_type old_type = sc->cur_ucode;
2849 	int error;
2850 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2851 
2852 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2853 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2854 			error);
2855 		return error;
2856 	}
2857 	fw = &sc->sc_fw.fw_sects[ucode_type];
2858 	sc->cur_ucode = ucode_type;
2859 	sc->ucode_loaded = FALSE;
2860 
2861 	memset(&alive_data, 0, sizeof(alive_data));
2862 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2863 				   alive_cmd, nitems(alive_cmd),
2864 				   iwm_alive_fn, &alive_data);
2865 
2866 	error = iwm_start_fw(sc, fw);
2867 	if (error) {
2868 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2869 		sc->cur_ucode = old_type;
2870 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2871 		return error;
2872 	}
2873 
2874 	/*
2875 	 * Some things may run in the background now, but we
2876 	 * just wait for the ALIVE notification here.
2877 	 */
2878 	IWM_UNLOCK(sc);
2879 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2880 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2881 	IWM_LOCK(sc);
2882 	if (error) {
2883 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2884 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2885 			if (iwm_nic_lock(sc)) {
2886 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2887 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2888 				iwm_nic_unlock(sc);
2889 			}
2890 			device_printf(sc->sc_dev,
2891 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2892 			    a, b);
2893 		}
2894 		sc->cur_ucode = old_type;
2895 		return error;
2896 	}
2897 
2898 	if (!alive_data.valid) {
2899 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2900 		    __func__);
2901 		sc->cur_ucode = old_type;
2902 		return EIO;
2903 	}
2904 
2905 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2906 
2907 	/*
2908 	 * configure and operate fw paging mechanism.
2909 	 * driver configures the paging flow only once, CPU2 paging image
2910 	 * included in the IWM_UCODE_INIT image.
2911 	 */
2912 	if (fw->paging_mem_size) {
2913 		error = iwm_save_fw_paging(sc, fw);
2914 		if (error) {
2915 			device_printf(sc->sc_dev,
2916 			    "%s: failed to save the FW paging image\n",
2917 			    __func__);
2918 			return error;
2919 		}
2920 
2921 		error = iwm_send_paging_cmd(sc, fw);
2922 		if (error) {
2923 			device_printf(sc->sc_dev,
2924 			    "%s: failed to send the paging cmd\n", __func__);
2925 			iwm_free_fw_paging(sc);
2926 			return error;
2927 		}
2928 	}
2929 
2930 	if (!error)
2931 		sc->ucode_loaded = TRUE;
2932 	return error;
2933 }
2934 
2935 /*
2936  * mvm misc bits
2937  */
2938 
2939 /*
2940  * follows iwlwifi/fw.c
2941  */
2942 static int
2943 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2944 {
2945 	struct iwm_notification_wait calib_wait;
2946 	static const uint16_t init_complete[] = {
2947 		IWM_INIT_COMPLETE_NOTIF,
2948 		IWM_CALIB_RES_NOTIF_PHY_DB
2949 	};
2950 	int ret;
2951 
2952 	/* do not operate with rfkill switch turned on */
2953 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2954 		device_printf(sc->sc_dev,
2955 		    "radio is disabled by hardware switch\n");
2956 		return EPERM;
2957 	}
2958 
2959 	iwm_init_notification_wait(sc->sc_notif_wait,
2960 				   &calib_wait,
2961 				   init_complete,
2962 				   nitems(init_complete),
2963 				   iwm_wait_phy_db_entry,
2964 				   sc->sc_phy_db);
2965 
2966 	/* Will also start the device */
2967 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2968 	if (ret) {
2969 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2970 		    ret);
2971 		goto error;
2972 	}
2973 
2974 	if (justnvm) {
2975 		/* Read nvm */
2976 		ret = iwm_nvm_init(sc);
2977 		if (ret) {
2978 			device_printf(sc->sc_dev, "failed to read nvm\n");
2979 			goto error;
2980 		}
2981 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2982 		goto error;
2983 	}
2984 
2985 	ret = iwm_send_bt_init_conf(sc);
2986 	if (ret) {
2987 		device_printf(sc->sc_dev,
2988 		    "failed to send bt coex configuration: %d\n", ret);
2989 		goto error;
2990 	}
2991 
2992 	/* Init Smart FIFO. */
2993 	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2994 	if (ret)
2995 		goto error;
2996 
2997 	/* Send TX valid antennas before triggering calibrations */
2998 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2999 	if (ret) {
3000 		device_printf(sc->sc_dev,
3001 		    "failed to send antennas before calibration: %d\n", ret);
3002 		goto error;
3003 	}
3004 
3005 	/*
3006 	 * Send phy configurations command to init uCode
3007 	 * to start the 16.0 uCode init image internal calibrations.
3008 	 */
3009 	ret = iwm_send_phy_cfg_cmd(sc);
3010 	if (ret) {
3011 		device_printf(sc->sc_dev,
3012 		    "%s: Failed to run INIT calibrations: %d\n",
3013 		    __func__, ret);
3014 		goto error;
3015 	}
3016 
3017 	/*
3018 	 * Nothing to do but wait for the init complete notification
3019 	 * from the firmware.
3020 	 */
3021 	IWM_UNLOCK(sc);
3022 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3023 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3024 	IWM_LOCK(sc);
3025 
3026 
3027 	goto out;
3028 
3029 error:
3030 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3031 out:
3032 	return ret;
3033 }
3034 
3035 /*
3036  * receive side
3037  */
3038 
3039 /* (re)stock rx ring, called at init-time and at runtime */
3040 static int
3041 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3042 {
3043 	struct iwm_rx_ring *ring = &sc->rxq;
3044 	struct iwm_rx_data *data = &ring->data[idx];
3045 	struct mbuf *m;
3046 	bus_dmamap_t dmamap;
3047 	bus_dma_segment_t seg;
3048 	int nsegs, error;
3049 
3050 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3051 	if (m == NULL)
3052 		return ENOBUFS;
3053 
3054 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3055 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3056 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3057 	if (error != 0) {
3058 		device_printf(sc->sc_dev,
3059 		    "%s: can't map mbuf, error %d\n", __func__, error);
3060 		m_freem(m);
3061 		return error;
3062 	}
3063 
3064 	if (data->m != NULL)
3065 		bus_dmamap_unload(ring->data_dmat, data->map);
3066 
3067 	/* Swap ring->spare_map with data->map */
3068 	dmamap = data->map;
3069 	data->map = ring->spare_map;
3070 	ring->spare_map = dmamap;
3071 
3072 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3073 	data->m = m;
3074 
3075 	/* Update RX descriptor. */
3076 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3077 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3078 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3079 	    BUS_DMASYNC_PREWRITE);
3080 
3081 	return 0;
3082 }
3083 
3084 /* iwlwifi: mvm/rx.c */
3085 /*
3086  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3087  * values are reported by the fw as positive values - need to negate
3088  * to obtain their dBM.  Account for missing antennas by replacing 0
3089  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3090  */
3091 static int
3092 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3093 {
3094 	int energy_a, energy_b, energy_c, max_energy;
3095 	uint32_t val;
3096 
3097 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3098 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3099 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3100 	energy_a = energy_a ? -energy_a : -256;
3101 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3102 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3103 	energy_b = energy_b ? -energy_b : -256;
3104 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3105 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3106 	energy_c = energy_c ? -energy_c : -256;
3107 	max_energy = MAX(energy_a, energy_b);
3108 	max_energy = MAX(max_energy, energy_c);
3109 
3110 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3111 	    "energy In A %d B %d C %d , and max %d\n",
3112 	    energy_a, energy_b, energy_c, max_energy);
3113 
3114 	return max_energy;
3115 }
3116 
3117 static void
3118 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3119 {
3120 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3121 
3122 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3123 
3124 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3125 }
3126 
3127 /*
3128  * Retrieve the average noise (in dBm) among receivers.
3129  */
3130 static int
3131 iwm_get_noise(struct iwm_softc *sc,
3132     const struct iwm_mvm_statistics_rx_non_phy *stats)
3133 {
3134 	int i, total, nbant, noise;
3135 
3136 	total = nbant = noise = 0;
3137 	for (i = 0; i < 3; i++) {
3138 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3139 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3140 		    __func__,
3141 		    i,
3142 		    noise);
3143 
3144 		if (noise) {
3145 			total += noise;
3146 			nbant++;
3147 		}
3148 	}
3149 
3150 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3151 	    __func__, nbant, total);
3152 #if 0
3153 	/* There should be at least one antenna but check anyway. */
3154 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3155 #else
3156 	/* For now, just hard-code it to -96 to be safe */
3157 	return (-96);
3158 #endif
3159 }
3160 
3161 /*
3162  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3163  *
3164  * Handles the actual data of the Rx packet from the fw
3165  */
3166 static boolean_t
3167 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3168 	boolean_t stolen)
3169 {
3170 	struct ieee80211com *ic = &sc->sc_ic;
3171 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3172 	struct ieee80211_frame *wh;
3173 	struct ieee80211_node *ni;
3174 	struct ieee80211_rx_stats rxs;
3175 	struct iwm_rx_phy_info *phy_info;
3176 	struct iwm_rx_mpdu_res_start *rx_res;
3177 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3178 	uint32_t len;
3179 	uint32_t rx_pkt_status;
3180 	int rssi;
3181 
3182 	phy_info = &sc->sc_last_phy_info;
3183 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3184 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3185 	len = le16toh(rx_res->byte_count);
3186 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3187 
3188 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3189 		device_printf(sc->sc_dev,
3190 		    "dsp size out of range [0,20]: %d\n",
3191 		    phy_info->cfg_phy_cnt);
3192 		goto fail;
3193 	}
3194 
3195 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3196 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3197 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3198 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3199 		goto fail;
3200 	}
3201 
3202 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3203 
3204 	/* Map it to relative value */
3205 	rssi = rssi - sc->sc_noise;
3206 
3207 	/* replenish ring for the buffer we're going to feed to the sharks */
3208 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3209 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3210 		    __func__);
3211 		goto fail;
3212 	}
3213 
3214 	m->m_data = pkt->data + sizeof(*rx_res);
3215 	m->m_pkthdr.len = m->m_len = len;
3216 
3217 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3219 
3220 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3221 
3222 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3223 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3224 	    __func__,
3225 	    le16toh(phy_info->channel),
3226 	    le16toh(phy_info->phy_flags));
3227 
3228 	/*
3229 	 * Populate an RX state struct with the provided information.
3230 	 */
3231 	bzero(&rxs, sizeof(rxs));
3232 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3233 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3234 	rxs.c_ieee = le16toh(phy_info->channel);
3235 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3236 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3237 	} else {
3238 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3239 	}
3240 
3241 	/* rssi is in 1/2db units */
3242 	rxs.c_rssi = rssi * 2;
3243 	rxs.c_nf = sc->sc_noise;
3244 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3245 		if (ni)
3246 			ieee80211_free_node(ni);
3247 		goto fail;
3248 	}
3249 
3250 	if (ieee80211_radiotap_active_vap(vap)) {
3251 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3252 
3253 		tap->wr_flags = 0;
3254 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3255 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3256 		tap->wr_chan_freq = htole16(rxs.c_freq);
3257 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3258 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3259 		tap->wr_dbm_antsignal = (int8_t)rssi;
3260 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3261 		tap->wr_tsft = phy_info->system_timestamp;
3262 		switch (phy_info->rate) {
3263 		/* CCK rates. */
3264 		case  10: tap->wr_rate =   2; break;
3265 		case  20: tap->wr_rate =   4; break;
3266 		case  55: tap->wr_rate =  11; break;
3267 		case 110: tap->wr_rate =  22; break;
3268 		/* OFDM rates. */
3269 		case 0xd: tap->wr_rate =  12; break;
3270 		case 0xf: tap->wr_rate =  18; break;
3271 		case 0x5: tap->wr_rate =  24; break;
3272 		case 0x7: tap->wr_rate =  36; break;
3273 		case 0x9: tap->wr_rate =  48; break;
3274 		case 0xb: tap->wr_rate =  72; break;
3275 		case 0x1: tap->wr_rate =  96; break;
3276 		case 0x3: tap->wr_rate = 108; break;
3277 		/* Unknown rate: should not happen. */
3278 		default:  tap->wr_rate =   0;
3279 		}
3280 	}
3281 
3282 	IWM_UNLOCK(sc);
3283 	if (ni != NULL) {
3284 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3285 		ieee80211_input_mimo(ni, m);
3286 		ieee80211_free_node(ni);
3287 	} else {
3288 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3289 		ieee80211_input_mimo_all(ic, m);
3290 	}
3291 	IWM_LOCK(sc);
3292 
3293 	return TRUE;
3294 
3295 fail:
3296 	counter_u64_add(ic->ic_ierrors, 1);
3297 	return FALSE;
3298 }
3299 
3300 static int
3301 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3302 	struct iwm_node *in)
3303 {
3304 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3305 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3306 	struct ieee80211_node *ni = &in->in_ni;
3307 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3308 
3309 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3310 
3311 	/* Update rate control statistics. */
3312 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3313 	    __func__,
3314 	    (int) le16toh(tx_resp->status.status),
3315 	    (int) le16toh(tx_resp->status.sequence),
3316 	    tx_resp->frame_count,
3317 	    tx_resp->bt_kill_count,
3318 	    tx_resp->failure_rts,
3319 	    tx_resp->failure_frame,
3320 	    le32toh(tx_resp->initial_rate),
3321 	    (int) le16toh(tx_resp->wireless_media_time));
3322 
3323 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3324 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3325 	txs->short_retries = tx_resp->failure_rts;
3326 	txs->long_retries = tx_resp->failure_frame;
3327 	if (status != IWM_TX_STATUS_SUCCESS &&
3328 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3329 		switch (status) {
3330 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3331 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3332 			break;
3333 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3334 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3335 			break;
3336 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3337 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3338 			break;
3339 		default:
3340 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3341 			break;
3342 		}
3343 	} else {
3344 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3345 	}
3346 	ieee80211_ratectl_tx_complete(ni, txs);
3347 
3348 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3349 }
3350 
3351 static void
3352 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3353 {
3354 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3355 	int idx = cmd_hdr->idx;
3356 	int qid = cmd_hdr->qid;
3357 	struct iwm_tx_ring *ring = &sc->txq[qid];
3358 	struct iwm_tx_data *txd = &ring->data[idx];
3359 	struct iwm_node *in = txd->in;
3360 	struct mbuf *m = txd->m;
3361 	int status;
3362 
3363 	KASSERT(txd->done == 0, ("txd not done"));
3364 	KASSERT(txd->in != NULL, ("txd without node"));
3365 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3366 
3367 	sc->sc_tx_timer = 0;
3368 
3369 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3370 
3371 	/* Unmap and free mbuf. */
3372 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3373 	bus_dmamap_unload(ring->data_dmat, txd->map);
3374 
3375 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3376 	    "free txd %p, in %p\n", txd, txd->in);
3377 	txd->done = 1;
3378 	txd->m = NULL;
3379 	txd->in = NULL;
3380 
3381 	ieee80211_tx_complete(&in->in_ni, m, status);
3382 
3383 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3384 		sc->qfullmsk &= ~(1 << ring->qid);
3385 		if (sc->qfullmsk == 0) {
3386 			iwm_start(sc);
3387 		}
3388 	}
3389 }
3390 
3391 /*
3392  * transmit side
3393  */
3394 
3395 /*
3396  * Process a "command done" firmware notification.  This is where we wakeup
3397  * processes waiting for a synchronous command completion.
3398  * from if_iwn
3399  */
3400 static void
3401 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3402 {
3403 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3404 	struct iwm_tx_data *data;
3405 
3406 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3407 		return;	/* Not a command ack. */
3408 	}
3409 
3410 	/* XXX wide commands? */
3411 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3412 	    "cmd notification type 0x%x qid %d idx %d\n",
3413 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3414 
3415 	data = &ring->data[pkt->hdr.idx];
3416 
3417 	/* If the command was mapped in an mbuf, free it. */
3418 	if (data->m != NULL) {
3419 		bus_dmamap_sync(ring->data_dmat, data->map,
3420 		    BUS_DMASYNC_POSTWRITE);
3421 		bus_dmamap_unload(ring->data_dmat, data->map);
3422 		m_freem(data->m);
3423 		data->m = NULL;
3424 	}
3425 	wakeup(&ring->desc[pkt->hdr.idx]);
3426 
3427 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3428 		device_printf(sc->sc_dev,
3429 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3430 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3431 		/* XXX call iwm_force_nmi() */
3432 	}
3433 
3434 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3435 	ring->queued--;
3436 	if (ring->queued == 0)
3437 		iwm_pcie_clear_cmd_in_flight(sc);
3438 }
3439 
3440 #if 0
3441 /*
3442  * necessary only for block ack mode
3443  */
3444 void
3445 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3446 	uint16_t len)
3447 {
3448 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3449 	uint16_t w_val;
3450 
3451 	scd_bc_tbl = sc->sched_dma.vaddr;
3452 
3453 	len += 8; /* magic numbers came naturally from paris */
3454 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3455 		len = roundup(len, 4) / 4;
3456 
3457 	w_val = htole16(sta_id << 12 | len);
3458 
3459 	/* Update TX scheduler. */
3460 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3461 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3462 	    BUS_DMASYNC_PREWRITE);
3463 
3464 	/* I really wonder what this is ?!? */
3465 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3466 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3467 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3468 		    BUS_DMASYNC_PREWRITE);
3469 	}
3470 }
3471 #endif
3472 
3473 /*
3474  * Take an 802.11 (non-n) rate, find the relevant rate
3475  * table entry.  return the index into in_ridx[].
3476  *
3477  * The caller then uses that index back into in_ridx
3478  * to figure out the rate index programmed /into/
3479  * the firmware for this given node.
3480  */
3481 static int
3482 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3483     uint8_t rate)
3484 {
3485 	int i;
3486 	uint8_t r;
3487 
3488 	for (i = 0; i < nitems(in->in_ridx); i++) {
3489 		r = iwm_rates[in->in_ridx[i]].rate;
3490 		if (rate == r)
3491 			return (i);
3492 	}
3493 
3494 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3495 	    "%s: couldn't find an entry for rate=%d\n",
3496 	    __func__,
3497 	    rate);
3498 
3499 	/* XXX Return the first */
3500 	/* XXX TODO: have it return the /lowest/ */
3501 	return (0);
3502 }
3503 
3504 static int
3505 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3506 {
3507 	int i;
3508 
3509 	for (i = 0; i < nitems(iwm_rates); i++) {
3510 		if (iwm_rates[i].rate == rate)
3511 			return (i);
3512 	}
3513 	/* XXX error? */
3514 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3515 	    "%s: couldn't find an entry for rate=%d\n",
3516 	    __func__,
3517 	    rate);
3518 	return (0);
3519 }
3520 
3521 /*
3522  * Fill in the rate related information for a transmit command.
3523  */
3524 static const struct iwm_rate *
3525 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3526 	struct mbuf *m, struct iwm_tx_cmd *tx)
3527 {
3528 	struct ieee80211_node *ni = &in->in_ni;
3529 	struct ieee80211_frame *wh;
3530 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3531 	const struct iwm_rate *rinfo;
3532 	int type;
3533 	int ridx, rate_flags;
3534 
3535 	wh = mtod(m, struct ieee80211_frame *);
3536 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3537 
3538 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3539 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3540 
3541 	if (type == IEEE80211_FC0_TYPE_MGT ||
3542 	    type == IEEE80211_FC0_TYPE_CTL ||
3543 	    (m->m_flags & M_EAPOL) != 0) {
3544 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3545 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3547 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3548 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3549 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3551 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3552 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3553 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3555 	} else {
3556 		int i;
3557 
3558 		/* for data frames, use RS table */
3559 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3560 		/* XXX pass pktlen */
3561 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3562 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3563 		ridx = in->in_ridx[i];
3564 
3565 		/* This is the index into the programmed table */
3566 		tx->initial_rate_index = i;
3567 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3568 
3569 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3570 		    "%s: start with i=%d, txrate %d\n",
3571 		    __func__, i, iwm_rates[ridx].rate);
3572 	}
3573 
3574 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3575 	    "%s: frame type=%d txrate %d\n",
3576 	        __func__, type, iwm_rates[ridx].rate);
3577 
3578 	rinfo = &iwm_rates[ridx];
3579 
3580 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3581 	    __func__, ridx,
3582 	    rinfo->rate,
3583 	    !! (IWM_RIDX_IS_CCK(ridx))
3584 	    );
3585 
3586 	/* XXX TODO: hard-coded TX antenna? */
3587 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3588 	if (IWM_RIDX_IS_CCK(ridx))
3589 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3590 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3591 
3592 	return rinfo;
3593 }
3594 
3595 #define TB0_SIZE 16
3596 static int
3597 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3598 {
3599 	struct ieee80211com *ic = &sc->sc_ic;
3600 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3601 	struct iwm_node *in = IWM_NODE(ni);
3602 	struct iwm_tx_ring *ring;
3603 	struct iwm_tx_data *data;
3604 	struct iwm_tfd *desc;
3605 	struct iwm_device_cmd *cmd;
3606 	struct iwm_tx_cmd *tx;
3607 	struct ieee80211_frame *wh;
3608 	struct ieee80211_key *k = NULL;
3609 	struct mbuf *m1;
3610 	const struct iwm_rate *rinfo;
3611 	uint32_t flags;
3612 	u_int hdrlen;
3613 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3614 	int nsegs;
3615 	uint8_t tid, type;
3616 	int i, totlen, error, pad;
3617 
3618 	wh = mtod(m, struct ieee80211_frame *);
3619 	hdrlen = ieee80211_anyhdrsize(wh);
3620 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3621 	tid = 0;
3622 	ring = &sc->txq[ac];
3623 	desc = &ring->desc[ring->cur];
3624 	memset(desc, 0, sizeof(*desc));
3625 	data = &ring->data[ring->cur];
3626 
3627 	/* Fill out iwm_tx_cmd to send to the firmware */
3628 	cmd = &ring->cmd[ring->cur];
3629 	cmd->hdr.code = IWM_TX_CMD;
3630 	cmd->hdr.flags = 0;
3631 	cmd->hdr.qid = ring->qid;
3632 	cmd->hdr.idx = ring->cur;
3633 
3634 	tx = (void *)cmd->data;
3635 	memset(tx, 0, sizeof(*tx));
3636 
3637 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3638 
3639 	/* Encrypt the frame if need be. */
3640 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3641 		/* Retrieve key for TX && do software encryption. */
3642 		k = ieee80211_crypto_encap(ni, m);
3643 		if (k == NULL) {
3644 			m_freem(m);
3645 			return (ENOBUFS);
3646 		}
3647 		/* 802.11 header may have moved. */
3648 		wh = mtod(m, struct ieee80211_frame *);
3649 	}
3650 
3651 	if (ieee80211_radiotap_active_vap(vap)) {
3652 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3653 
3654 		tap->wt_flags = 0;
3655 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3656 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3657 		tap->wt_rate = rinfo->rate;
3658 		if (k != NULL)
3659 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3660 		ieee80211_radiotap_tx(vap, m);
3661 	}
3662 
3663 
3664 	totlen = m->m_pkthdr.len;
3665 
3666 	flags = 0;
3667 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3668 		flags |= IWM_TX_CMD_FLG_ACK;
3669 	}
3670 
3671 	if (type == IEEE80211_FC0_TYPE_DATA
3672 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3673 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3674 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3675 	}
3676 
3677 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3678 	    type != IEEE80211_FC0_TYPE_DATA)
3679 		tx->sta_id = sc->sc_aux_sta.sta_id;
3680 	else
3681 		tx->sta_id = IWM_STATION_ID;
3682 
3683 	if (type == IEEE80211_FC0_TYPE_MGT) {
3684 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3685 
3686 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3687 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3688 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3689 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3690 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3691 		} else {
3692 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3693 		}
3694 	} else {
3695 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3696 	}
3697 
3698 	if (hdrlen & 3) {
3699 		/* First segment length must be a multiple of 4. */
3700 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3701 		pad = 4 - (hdrlen & 3);
3702 	} else
3703 		pad = 0;
3704 
3705 	tx->driver_txop = 0;
3706 	tx->next_frame_len = 0;
3707 
3708 	tx->len = htole16(totlen);
3709 	tx->tid_tspec = tid;
3710 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3711 
3712 	/* Set physical address of "scratch area". */
3713 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3714 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3715 
3716 	/* Copy 802.11 header in TX command. */
3717 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3718 
3719 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3720 
3721 	tx->sec_ctl = 0;
3722 	tx->tx_flags |= htole32(flags);
3723 
3724 	/* Trim 802.11 header. */
3725 	m_adj(m, hdrlen);
3726 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3727 	    segs, &nsegs, BUS_DMA_NOWAIT);
3728 	if (error != 0) {
3729 		if (error != EFBIG) {
3730 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3731 			    error);
3732 			m_freem(m);
3733 			return error;
3734 		}
3735 		/* Too many DMA segments, linearize mbuf. */
3736 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3737 		if (m1 == NULL) {
3738 			device_printf(sc->sc_dev,
3739 			    "%s: could not defrag mbuf\n", __func__);
3740 			m_freem(m);
3741 			return (ENOBUFS);
3742 		}
3743 		m = m1;
3744 
3745 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3746 		    segs, &nsegs, BUS_DMA_NOWAIT);
3747 		if (error != 0) {
3748 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3749 			    error);
3750 			m_freem(m);
3751 			return error;
3752 		}
3753 	}
3754 	data->m = m;
3755 	data->in = in;
3756 	data->done = 0;
3757 
3758 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3759 	    "sending txd %p, in %p\n", data, data->in);
3760 	KASSERT(data->in != NULL, ("node is NULL"));
3761 
3762 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3763 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3764 	    ring->qid, ring->cur, totlen, nsegs,
3765 	    le32toh(tx->tx_flags),
3766 	    le32toh(tx->rate_n_flags),
3767 	    tx->initial_rate_index
3768 	    );
3769 
3770 	/* Fill TX descriptor. */
3771 	desc->num_tbs = 2 + nsegs;
3772 
3773 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3774 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3775 	    (TB0_SIZE << 4);
3776 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3777 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3778 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3779 	      + hdrlen + pad - TB0_SIZE) << 4);
3780 
3781 	/* Other DMA segments are for data payload. */
3782 	for (i = 0; i < nsegs; i++) {
3783 		seg = &segs[i];
3784 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3785 		desc->tbs[i+2].hi_n_len = \
3786 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3787 		    | ((seg->ds_len) << 4);
3788 	}
3789 
3790 	bus_dmamap_sync(ring->data_dmat, data->map,
3791 	    BUS_DMASYNC_PREWRITE);
3792 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3793 	    BUS_DMASYNC_PREWRITE);
3794 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3795 	    BUS_DMASYNC_PREWRITE);
3796 
3797 #if 0
3798 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3799 #endif
3800 
3801 	/* Kick TX ring. */
3802 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3803 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3804 
3805 	/* Mark TX ring as full if we reach a certain threshold. */
3806 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3807 		sc->qfullmsk |= 1 << ring->qid;
3808 	}
3809 
3810 	return 0;
3811 }
3812 
3813 static int
3814 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3815     const struct ieee80211_bpf_params *params)
3816 {
3817 	struct ieee80211com *ic = ni->ni_ic;
3818 	struct iwm_softc *sc = ic->ic_softc;
3819 	int error = 0;
3820 
3821 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3822 	    "->%s begin\n", __func__);
3823 
3824 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3825 		m_freem(m);
3826 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3827 		    "<-%s not RUNNING\n", __func__);
3828 		return (ENETDOWN);
3829         }
3830 
3831 	IWM_LOCK(sc);
3832 	/* XXX fix this */
3833         if (params == NULL) {
3834 		error = iwm_tx(sc, m, ni, 0);
3835 	} else {
3836 		error = iwm_tx(sc, m, ni, 0);
3837 	}
3838 	sc->sc_tx_timer = 5;
3839 	IWM_UNLOCK(sc);
3840 
3841         return (error);
3842 }
3843 
3844 /*
3845  * mvm/tx.c
3846  */
3847 
3848 /*
3849  * Note that there are transports that buffer frames before they reach
3850  * the firmware. This means that after flush_tx_path is called, the
3851  * queue might not be empty. The race-free way to handle this is to:
3852  * 1) set the station as draining
3853  * 2) flush the Tx path
3854  * 3) wait for the transport queues to be empty
3855  */
3856 int
3857 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3858 {
3859 	int ret;
3860 	struct iwm_tx_path_flush_cmd flush_cmd = {
3861 		.queues_ctl = htole32(tfd_msk),
3862 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3863 	};
3864 
3865 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3866 	    sizeof(flush_cmd), &flush_cmd);
3867 	if (ret)
3868                 device_printf(sc->sc_dev,
3869 		    "Flushing tx queue failed: %d\n", ret);
3870 	return ret;
3871 }
3872 
3873 /*
3874  * BEGIN mvm/quota.c
3875  */
3876 
3877 static int
3878 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3879 {
3880 	struct iwm_time_quota_cmd cmd;
3881 	int i, idx, ret, num_active_macs, quota, quota_rem;
3882 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3883 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3884 	uint16_t id;
3885 
3886 	memset(&cmd, 0, sizeof(cmd));
3887 
3888 	/* currently, PHY ID == binding ID */
3889 	if (ivp) {
3890 		id = ivp->phy_ctxt->id;
3891 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3892 		colors[id] = ivp->phy_ctxt->color;
3893 
3894 		if (1)
3895 			n_ifs[id] = 1;
3896 	}
3897 
3898 	/*
3899 	 * The FW's scheduling session consists of
3900 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3901 	 * equally between all the bindings that require quota
3902 	 */
3903 	num_active_macs = 0;
3904 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3905 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3906 		num_active_macs += n_ifs[i];
3907 	}
3908 
3909 	quota = 0;
3910 	quota_rem = 0;
3911 	if (num_active_macs) {
3912 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3913 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3914 	}
3915 
3916 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3917 		if (colors[i] < 0)
3918 			continue;
3919 
3920 		cmd.quotas[idx].id_and_color =
3921 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3922 
3923 		if (n_ifs[i] <= 0) {
3924 			cmd.quotas[idx].quota = htole32(0);
3925 			cmd.quotas[idx].max_duration = htole32(0);
3926 		} else {
3927 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3928 			cmd.quotas[idx].max_duration = htole32(0);
3929 		}
3930 		idx++;
3931 	}
3932 
3933 	/* Give the remainder of the session to the first binding */
3934 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3935 
3936 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3937 	    sizeof(cmd), &cmd);
3938 	if (ret)
3939 		device_printf(sc->sc_dev,
3940 		    "%s: Failed to send quota: %d\n", __func__, ret);
3941 	return ret;
3942 }
3943 
3944 /*
3945  * END mvm/quota.c
3946  */
3947 
3948 /*
3949  * ieee80211 routines
3950  */
3951 
3952 /*
3953  * Change to AUTH state in 80211 state machine.  Roughly matches what
3954  * Linux does in bss_info_changed().
3955  */
3956 static int
3957 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3958 {
3959 	struct ieee80211_node *ni;
3960 	struct iwm_node *in;
3961 	struct iwm_vap *iv = IWM_VAP(vap);
3962 	uint32_t duration;
3963 	int error;
3964 
3965 	/*
3966 	 * XXX i have a feeling that the vap node is being
3967 	 * freed from underneath us. Grr.
3968 	 */
3969 	ni = ieee80211_ref_node(vap->iv_bss);
3970 	in = IWM_NODE(ni);
3971 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3972 	    "%s: called; vap=%p, bss ni=%p\n",
3973 	    __func__,
3974 	    vap,
3975 	    ni);
3976 
3977 	in->in_assoc = 0;
3978 
3979 	/*
3980 	 * Firmware bug - it'll crash if the beacon interval is less
3981 	 * than 16. We can't avoid connecting at all, so refuse the
3982 	 * station state change, this will cause net80211 to abandon
3983 	 * attempts to connect to this AP, and eventually wpa_s will
3984 	 * blacklist the AP...
3985 	 */
3986 	if (ni->ni_intval < 16) {
3987 		device_printf(sc->sc_dev,
3988 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3989 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3990 		error = EINVAL;
3991 		goto out;
3992 	}
3993 
3994 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3995 	if (error != 0)
3996 		return error;
3997 
3998 	error = iwm_allow_mcast(vap, sc);
3999 	if (error) {
4000 		device_printf(sc->sc_dev,
4001 		    "%s: failed to set multicast\n", __func__);
4002 		goto out;
4003 	}
4004 
4005 	/*
4006 	 * This is where it deviates from what Linux does.
4007 	 *
4008 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4009 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4010 	 * and always does a mac_ctx_changed().
4011 	 *
4012 	 * The openbsd port doesn't attempt to do that - it reset things
4013 	 * at odd states and does the add here.
4014 	 *
4015 	 * So, until the state handling is fixed (ie, we never reset
4016 	 * the NIC except for a firmware failure, which should drag
4017 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4018 	 * contexts that are required), let's do a dirty hack here.
4019 	 */
4020 	if (iv->is_uploaded) {
4021 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4022 			device_printf(sc->sc_dev,
4023 			    "%s: failed to update MAC\n", __func__);
4024 			goto out;
4025 		}
4026 	} else {
4027 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4028 			device_printf(sc->sc_dev,
4029 			    "%s: failed to add MAC\n", __func__);
4030 			goto out;
4031 		}
4032 	}
4033 
4034 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4035 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4036 		device_printf(sc->sc_dev,
4037 		    "%s: failed update phy ctxt\n", __func__);
4038 		goto out;
4039 	}
4040 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4041 
4042 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4043 		device_printf(sc->sc_dev,
4044 		    "%s: binding update cmd\n", __func__);
4045 		goto out;
4046 	}
4047 	/*
4048 	 * Authentication becomes unreliable when powersaving is left enabled
4049 	 * here. Powersaving will be activated again when association has
4050 	 * finished or is aborted.
4051 	 */
4052 	iv->ps_disabled = TRUE;
4053 	error = iwm_mvm_power_update_mac(sc);
4054 	iv->ps_disabled = FALSE;
4055 	if (error != 0) {
4056 		device_printf(sc->sc_dev,
4057 		    "%s: failed to update power management\n",
4058 		    __func__);
4059 		goto out;
4060 	}
4061 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4062 		device_printf(sc->sc_dev,
4063 		    "%s: failed to add sta\n", __func__);
4064 		goto out;
4065 	}
4066 
4067 	/*
4068 	 * Prevent the FW from wandering off channel during association
4069 	 * by "protecting" the session with a time event.
4070 	 */
4071 	/* XXX duration is in units of TU, not MS */
4072 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4073 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4074 	DELAY(100);
4075 
4076 	error = 0;
4077 out:
4078 	ieee80211_free_node(ni);
4079 	return (error);
4080 }
4081 
4082 static int
4083 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4084 {
4085 	uint32_t tfd_msk;
4086 
4087 	/*
4088 	 * Ok, so *technically* the proper set of calls for going
4089 	 * from RUN back to SCAN is:
4090 	 *
4091 	 * iwm_mvm_power_mac_disable(sc, in);
4092 	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4093 	 * iwm_mvm_rm_sta(sc, in);
4094 	 * iwm_mvm_update_quotas(sc, NULL);
4095 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4096 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4097 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4098 	 *
4099 	 * However, that freezes the device not matter which permutations
4100 	 * and modifications are attempted.  Obviously, this driver is missing
4101 	 * something since it works in the Linux driver, but figuring out what
4102 	 * is missing is a little more complicated.  Now, since we're going
4103 	 * back to nothing anyway, we'll just do a complete device reset.
4104 	 * Up your's, device!
4105 	 */
4106 	/*
4107 	 * Just using 0xf for the queues mask is fine as long as we only
4108 	 * get here from RUN state.
4109 	 */
4110 	tfd_msk = 0xf;
4111 	mbufq_drain(&sc->sc_snd);
4112 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4113 	/*
4114 	 * We seem to get away with just synchronously sending the
4115 	 * IWM_TXPATH_FLUSH command.
4116 	 */
4117 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4118 	iwm_stop_device(sc);
4119 	iwm_init_hw(sc);
4120 	if (in)
4121 		in->in_assoc = 0;
4122 	return 0;
4123 
4124 #if 0
4125 	int error;
4126 
4127 	iwm_mvm_power_mac_disable(sc, in);
4128 
4129 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4130 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4131 		return error;
4132 	}
4133 
4134 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4135 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4136 		return error;
4137 	}
4138 	error = iwm_mvm_rm_sta(sc, in);
4139 	in->in_assoc = 0;
4140 	iwm_mvm_update_quotas(sc, NULL);
4141 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4142 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4143 		return error;
4144 	}
4145 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4146 
4147 	iwm_mvm_mac_ctxt_remove(sc, in);
4148 
4149 	return error;
4150 #endif
4151 }
4152 
4153 static struct ieee80211_node *
4154 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4155 {
4156 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4157 	    M_NOWAIT | M_ZERO);
4158 }
4159 
4160 uint8_t
4161 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4162 {
4163 	int i;
4164 	uint8_t rval;
4165 
4166 	for (i = 0; i < rs->rs_nrates; i++) {
4167 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4168 		if (rval == iwm_rates[ridx].rate)
4169 			return rs->rs_rates[i];
4170 	}
4171 
4172 	return 0;
4173 }
4174 
4175 static void
4176 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4177 {
4178 	struct ieee80211_node *ni = &in->in_ni;
4179 	struct iwm_lq_cmd *lq = &in->in_lq;
4180 	int nrates = ni->ni_rates.rs_nrates;
4181 	int i, ridx, tab = 0;
4182 //	int txant = 0;
4183 
4184 	if (nrates > nitems(lq->rs_table)) {
4185 		device_printf(sc->sc_dev,
4186 		    "%s: node supports %d rates, driver handles "
4187 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4188 		return;
4189 	}
4190 	if (nrates == 0) {
4191 		device_printf(sc->sc_dev,
4192 		    "%s: node supports 0 rates, odd!\n", __func__);
4193 		return;
4194 	}
4195 
4196 	/*
4197 	 * XXX .. and most of iwm_node is not initialised explicitly;
4198 	 * it's all just 0x0 passed to the firmware.
4199 	 */
4200 
4201 	/* first figure out which rates we should support */
4202 	/* XXX TODO: this isn't 11n aware /at all/ */
4203 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4204 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4205 	    "%s: nrates=%d\n", __func__, nrates);
4206 
4207 	/*
4208 	 * Loop over nrates and populate in_ridx from the highest
4209 	 * rate to the lowest rate.  Remember, in_ridx[] has
4210 	 * IEEE80211_RATE_MAXSIZE entries!
4211 	 */
4212 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4213 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4214 
4215 		/* Map 802.11 rate to HW rate index. */
4216 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4217 			if (iwm_rates[ridx].rate == rate)
4218 				break;
4219 		if (ridx > IWM_RIDX_MAX) {
4220 			device_printf(sc->sc_dev,
4221 			    "%s: WARNING: device rate for %d not found!\n",
4222 			    __func__, rate);
4223 		} else {
4224 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4225 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4226 			    __func__,
4227 			    i,
4228 			    rate,
4229 			    ridx);
4230 			in->in_ridx[i] = ridx;
4231 		}
4232 	}
4233 
4234 	/* then construct a lq_cmd based on those */
4235 	memset(lq, 0, sizeof(*lq));
4236 	lq->sta_id = IWM_STATION_ID;
4237 
4238 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4239 	if (ni->ni_flags & IEEE80211_NODE_HT)
4240 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4241 
4242 	/*
4243 	 * are these used? (we don't do SISO or MIMO)
4244 	 * need to set them to non-zero, though, or we get an error.
4245 	 */
4246 	lq->single_stream_ant_msk = 1;
4247 	lq->dual_stream_ant_msk = 1;
4248 
4249 	/*
4250 	 * Build the actual rate selection table.
4251 	 * The lowest bits are the rates.  Additionally,
4252 	 * CCK needs bit 9 to be set.  The rest of the bits
4253 	 * we add to the table select the tx antenna
4254 	 * Note that we add the rates in the highest rate first
4255 	 * (opposite of ni_rates).
4256 	 */
4257 	/*
4258 	 * XXX TODO: this should be looping over the min of nrates
4259 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4260 	 */
4261 	for (i = 0; i < nrates; i++) {
4262 		int nextant;
4263 
4264 #if 0
4265 		if (txant == 0)
4266 			txant = iwm_mvm_get_valid_tx_ant(sc);
4267 		nextant = 1<<(ffs(txant)-1);
4268 		txant &= ~nextant;
4269 #else
4270 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4271 #endif
4272 		/*
4273 		 * Map the rate id into a rate index into
4274 		 * our hardware table containing the
4275 		 * configuration to use for this rate.
4276 		 */
4277 		ridx = in->in_ridx[i];
4278 		tab = iwm_rates[ridx].plcp;
4279 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4280 		if (IWM_RIDX_IS_CCK(ridx))
4281 			tab |= IWM_RATE_MCS_CCK_MSK;
4282 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4283 		    "station rate i=%d, rate=%d, hw=%x\n",
4284 		    i, iwm_rates[ridx].rate, tab);
4285 		lq->rs_table[i] = htole32(tab);
4286 	}
4287 	/* then fill the rest with the lowest possible rate */
4288 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4289 		KASSERT(tab != 0, ("invalid tab"));
4290 		lq->rs_table[i] = htole32(tab);
4291 	}
4292 }
4293 
4294 static int
4295 iwm_media_change(struct ifnet *ifp)
4296 {
4297 	struct ieee80211vap *vap = ifp->if_softc;
4298 	struct ieee80211com *ic = vap->iv_ic;
4299 	struct iwm_softc *sc = ic->ic_softc;
4300 	int error;
4301 
4302 	error = ieee80211_media_change(ifp);
4303 	if (error != ENETRESET)
4304 		return error;
4305 
4306 	IWM_LOCK(sc);
4307 	if (ic->ic_nrunning > 0) {
4308 		iwm_stop(sc);
4309 		iwm_init(sc);
4310 	}
4311 	IWM_UNLOCK(sc);
4312 	return error;
4313 }
4314 
4315 
4316 static int
4317 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4318 {
4319 	struct iwm_vap *ivp = IWM_VAP(vap);
4320 	struct ieee80211com *ic = vap->iv_ic;
4321 	struct iwm_softc *sc = ic->ic_softc;
4322 	struct iwm_node *in;
4323 	int error;
4324 
4325 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4326 	    "switching state %s -> %s\n",
4327 	    ieee80211_state_name[vap->iv_state],
4328 	    ieee80211_state_name[nstate]);
4329 	IEEE80211_UNLOCK(ic);
4330 	IWM_LOCK(sc);
4331 
4332 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4333 		iwm_led_blink_stop(sc);
4334 
4335 	/* disable beacon filtering if we're hopping out of RUN */
4336 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4337 		iwm_mvm_disable_beacon_filter(sc);
4338 
4339 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4340 			in->in_assoc = 0;
4341 
4342 		if (nstate == IEEE80211_S_INIT) {
4343 			IWM_UNLOCK(sc);
4344 			IEEE80211_LOCK(ic);
4345 			error = ivp->iv_newstate(vap, nstate, arg);
4346 			IEEE80211_UNLOCK(ic);
4347 			IWM_LOCK(sc);
4348 			iwm_release(sc, NULL);
4349 			IWM_UNLOCK(sc);
4350 			IEEE80211_LOCK(ic);
4351 			return error;
4352 		}
4353 
4354 		/*
4355 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4356 		 * above then the card will be completely reinitialized,
4357 		 * so the driver must do everything necessary to bring the card
4358 		 * from INIT to SCAN.
4359 		 *
4360 		 * Additionally, upon receiving deauth frame from AP,
4361 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4362 		 * state. This will also fail with this driver, so bring the FSM
4363 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4364 		 *
4365 		 * XXX TODO: fix this for FreeBSD!
4366 		 */
4367 		if (nstate == IEEE80211_S_SCAN ||
4368 		    nstate == IEEE80211_S_AUTH ||
4369 		    nstate == IEEE80211_S_ASSOC) {
4370 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4371 			    "Force transition to INIT; MGT=%d\n", arg);
4372 			IWM_UNLOCK(sc);
4373 			IEEE80211_LOCK(ic);
4374 			/* Always pass arg as -1 since we can't Tx right now. */
4375 			/*
4376 			 * XXX arg is just ignored anyway when transitioning
4377 			 *     to IEEE80211_S_INIT.
4378 			 */
4379 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4380 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4381 			    "Going INIT->SCAN\n");
4382 			nstate = IEEE80211_S_SCAN;
4383 			IEEE80211_UNLOCK(ic);
4384 			IWM_LOCK(sc);
4385 		}
4386 	}
4387 
4388 	switch (nstate) {
4389 	case IEEE80211_S_INIT:
4390 	case IEEE80211_S_SCAN:
4391 		if (vap->iv_state == IEEE80211_S_AUTH ||
4392 		    vap->iv_state == IEEE80211_S_ASSOC) {
4393 			int myerr;
4394 			IWM_UNLOCK(sc);
4395 			IEEE80211_LOCK(ic);
4396 			myerr = ivp->iv_newstate(vap, nstate, arg);
4397 			IEEE80211_UNLOCK(ic);
4398 			IWM_LOCK(sc);
4399 			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4400                         if (error) {
4401                                 device_printf(sc->sc_dev,
4402 				    "%s: Failed to remove station: %d\n",
4403 				    __func__, error);
4404 			}
4405 			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4406                         if (error) {
4407                                 device_printf(sc->sc_dev,
4408                                     "%s: Failed to change mac context: %d\n",
4409                                     __func__, error);
4410                         }
4411                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4412                         if (error) {
4413                                 device_printf(sc->sc_dev,
4414                                     "%s: Failed to remove channel ctx: %d\n",
4415                                     __func__, error);
4416                         }
4417 			ivp->phy_ctxt = NULL;
4418 			error = iwm_mvm_power_update_mac(sc);
4419 			if (error != 0) {
4420 				device_printf(sc->sc_dev,
4421 				    "%s: failed to update power management\n",
4422 				    __func__);
4423 			}
4424 			IWM_UNLOCK(sc);
4425 			IEEE80211_LOCK(ic);
4426 			return myerr;
4427 		}
4428 		break;
4429 
4430 	case IEEE80211_S_AUTH:
4431 		if ((error = iwm_auth(vap, sc)) != 0) {
4432 			device_printf(sc->sc_dev,
4433 			    "%s: could not move to auth state: %d\n",
4434 			    __func__, error);
4435 		}
4436 		break;
4437 
4438 	case IEEE80211_S_ASSOC:
4439 		/*
4440 		 * EBS may be disabled due to previous failures reported by FW.
4441 		 * Reset EBS status here assuming environment has been changed.
4442 		 */
4443 		sc->last_ebs_successful = TRUE;
4444 		break;
4445 
4446 	case IEEE80211_S_RUN:
4447 	{
4448 		struct iwm_host_cmd cmd = {
4449 			.id = IWM_LQ_CMD,
4450 			.len = { sizeof(in->in_lq), },
4451 			.flags = IWM_CMD_SYNC,
4452 		};
4453 
4454 		in = IWM_NODE(vap->iv_bss);
4455 		/* Update the association state, now we have it all */
4456 		/* (eg associd comes in at this point */
4457 		error = iwm_mvm_update_sta(sc, in);
4458 		if (error != 0) {
4459 			device_printf(sc->sc_dev,
4460 			    "%s: failed to update STA\n", __func__);
4461 			IWM_UNLOCK(sc);
4462 			IEEE80211_LOCK(ic);
4463 			return error;
4464 		}
4465 		in->in_assoc = 1;
4466 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4467 		if (error != 0) {
4468 			device_printf(sc->sc_dev,
4469 			    "%s: failed to update MAC: %d\n", __func__, error);
4470 		}
4471 
4472 		iwm_mvm_enable_beacon_filter(sc, ivp);
4473 		iwm_mvm_power_update_mac(sc);
4474 		iwm_mvm_update_quotas(sc, ivp);
4475 		iwm_setrates(sc, in);
4476 
4477 		cmd.data[0] = &in->in_lq;
4478 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4479 			device_printf(sc->sc_dev,
4480 			    "%s: IWM_LQ_CMD failed\n", __func__);
4481 		}
4482 
4483 		iwm_mvm_led_enable(sc);
4484 		break;
4485 	}
4486 
4487 	default:
4488 		break;
4489 	}
4490 	IWM_UNLOCK(sc);
4491 	IEEE80211_LOCK(ic);
4492 
4493 	return (ivp->iv_newstate(vap, nstate, arg));
4494 }
4495 
4496 void
4497 iwm_endscan_cb(void *arg, int pending)
4498 {
4499 	struct iwm_softc *sc = arg;
4500 	struct ieee80211com *ic = &sc->sc_ic;
4501 
4502 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4503 	    "%s: scan ended\n",
4504 	    __func__);
4505 
4506 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4507 }
4508 
4509 /*
4510  * Aging and idle timeouts for the different possible scenarios
4511  * in default configuration
4512  */
4513 static const uint32_t
4514 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4515 	{
4516 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4517 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4518 	},
4519 	{
4520 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4521 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4522 	},
4523 	{
4524 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4525 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4526 	},
4527 	{
4528 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4529 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4530 	},
4531 	{
4532 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4533 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4534 	},
4535 };
4536 
4537 /*
4538  * Aging and idle timeouts for the different possible scenarios
4539  * in single BSS MAC configuration.
4540  */
4541 static const uint32_t
4542 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4543 	{
4544 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4545 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4546 	},
4547 	{
4548 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4549 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4550 	},
4551 	{
4552 		htole32(IWM_SF_MCAST_AGING_TIMER),
4553 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4554 	},
4555 	{
4556 		htole32(IWM_SF_BA_AGING_TIMER),
4557 		htole32(IWM_SF_BA_IDLE_TIMER)
4558 	},
4559 	{
4560 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4561 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4562 	},
4563 };
4564 
4565 static void
4566 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4567     struct ieee80211_node *ni)
4568 {
4569 	int i, j, watermark;
4570 
4571 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4572 
4573 	/*
4574 	 * If we are in association flow - check antenna configuration
4575 	 * capabilities of the AP station, and choose the watermark accordingly.
4576 	 */
4577 	if (ni) {
4578 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4579 #ifdef notyet
4580 			if (ni->ni_rxmcs[2] != 0)
4581 				watermark = IWM_SF_W_MARK_MIMO3;
4582 			else if (ni->ni_rxmcs[1] != 0)
4583 				watermark = IWM_SF_W_MARK_MIMO2;
4584 			else
4585 #endif
4586 				watermark = IWM_SF_W_MARK_SISO;
4587 		} else {
4588 			watermark = IWM_SF_W_MARK_LEGACY;
4589 		}
4590 	/* default watermark value for unassociated mode. */
4591 	} else {
4592 		watermark = IWM_SF_W_MARK_MIMO2;
4593 	}
4594 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4595 
4596 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4597 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4598 			sf_cmd->long_delay_timeouts[i][j] =
4599 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4600 		}
4601 	}
4602 
4603 	if (ni) {
4604 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4605 		       sizeof(iwm_sf_full_timeout));
4606 	} else {
4607 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4608 		       sizeof(iwm_sf_full_timeout_def));
4609 	}
4610 }
4611 
4612 static int
4613 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4614 {
4615 	struct ieee80211com *ic = &sc->sc_ic;
4616 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4617 	struct iwm_sf_cfg_cmd sf_cmd = {
4618 		.state = htole32(IWM_SF_FULL_ON),
4619 	};
4620 	int ret = 0;
4621 
4622 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4623 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4624 
4625 	switch (new_state) {
4626 	case IWM_SF_UNINIT:
4627 	case IWM_SF_INIT_OFF:
4628 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4629 		break;
4630 	case IWM_SF_FULL_ON:
4631 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4632 		break;
4633 	default:
4634 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4635 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4636 			  new_state);
4637 		return EINVAL;
4638 	}
4639 
4640 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4641 				   sizeof(sf_cmd), &sf_cmd);
4642 	return ret;
4643 }
4644 
4645 static int
4646 iwm_send_bt_init_conf(struct iwm_softc *sc)
4647 {
4648 	struct iwm_bt_coex_cmd bt_cmd;
4649 
4650 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4651 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4652 
4653 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4654 	    &bt_cmd);
4655 }
4656 
4657 static int
4658 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4659 {
4660 	struct iwm_mcc_update_cmd mcc_cmd;
4661 	struct iwm_host_cmd hcmd = {
4662 		.id = IWM_MCC_UPDATE_CMD,
4663 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4664 		.data = { &mcc_cmd },
4665 	};
4666 	int ret;
4667 #ifdef IWM_DEBUG
4668 	struct iwm_rx_packet *pkt;
4669 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4670 	struct iwm_mcc_update_resp *mcc_resp;
4671 	int n_channels;
4672 	uint16_t mcc;
4673 #endif
4674 	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4675 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4676 
4677 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4678 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4679 	if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4680 	    fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4681 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4682 	else
4683 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4684 
4685 	if (resp_v2)
4686 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4687 	else
4688 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4689 
4690 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4691 	    "send MCC update to FW with '%c%c' src = %d\n",
4692 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4693 
4694 	ret = iwm_send_cmd(sc, &hcmd);
4695 	if (ret)
4696 		return ret;
4697 
4698 #ifdef IWM_DEBUG
4699 	pkt = hcmd.resp_pkt;
4700 
4701 	/* Extract MCC response */
4702 	if (resp_v2) {
4703 		mcc_resp = (void *)pkt->data;
4704 		mcc = mcc_resp->mcc;
4705 		n_channels =  le32toh(mcc_resp->n_channels);
4706 	} else {
4707 		mcc_resp_v1 = (void *)pkt->data;
4708 		mcc = mcc_resp_v1->mcc;
4709 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4710 	}
4711 
4712 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4713 	if (mcc == 0)
4714 		mcc = 0x3030;  /* "00" - world */
4715 
4716 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4717 	    "regulatory domain '%c%c' (%d channels available)\n",
4718 	    mcc >> 8, mcc & 0xff, n_channels);
4719 #endif
4720 	iwm_free_resp(sc, &hcmd);
4721 
4722 	return 0;
4723 }
4724 
4725 static void
4726 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4727 {
4728 	struct iwm_host_cmd cmd = {
4729 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4730 		.len = { sizeof(uint32_t), },
4731 		.data = { &backoff, },
4732 	};
4733 
4734 	if (iwm_send_cmd(sc, &cmd) != 0) {
4735 		device_printf(sc->sc_dev,
4736 		    "failed to change thermal tx backoff\n");
4737 	}
4738 }
4739 
4740 static int
4741 iwm_init_hw(struct iwm_softc *sc)
4742 {
4743 	struct ieee80211com *ic = &sc->sc_ic;
4744 	int error, i, ac;
4745 
4746 	if ((error = iwm_start_hw(sc)) != 0) {
4747 		printf("iwm_start_hw: failed %d\n", error);
4748 		return error;
4749 	}
4750 
4751 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4752 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4753 		return error;
4754 	}
4755 
4756 	/*
4757 	 * should stop and start HW since that INIT
4758 	 * image just loaded
4759 	 */
4760 	iwm_stop_device(sc);
4761 	sc->sc_ps_disabled = FALSE;
4762 	if ((error = iwm_start_hw(sc)) != 0) {
4763 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4764 		return error;
4765 	}
4766 
4767 	/* omstart, this time with the regular firmware */
4768 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4769 	if (error) {
4770 		device_printf(sc->sc_dev, "could not load firmware\n");
4771 		goto error;
4772 	}
4773 
4774 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4775 		device_printf(sc->sc_dev, "bt init conf failed\n");
4776 		goto error;
4777 	}
4778 
4779 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4780 	if (error != 0) {
4781 		device_printf(sc->sc_dev, "antenna config failed\n");
4782 		goto error;
4783 	}
4784 
4785 	/* Send phy db control command and then phy db calibration */
4786 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4787 		goto error;
4788 
4789 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4790 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4791 		goto error;
4792 	}
4793 
4794 	/* Add auxiliary station for scanning */
4795 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4796 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4797 		goto error;
4798 	}
4799 
4800 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4801 		/*
4802 		 * The channel used here isn't relevant as it's
4803 		 * going to be overwritten in the other flows.
4804 		 * For now use the first channel we have.
4805 		 */
4806 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4807 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4808 			goto error;
4809 	}
4810 
4811 	/* Initialize tx backoffs to the minimum. */
4812 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4813 		iwm_mvm_tt_tx_backoff(sc, 0);
4814 
4815 	error = iwm_mvm_power_update_device(sc);
4816 	if (error)
4817 		goto error;
4818 
4819 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4820 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4821 			goto error;
4822 	}
4823 
4824 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4825 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4826 			goto error;
4827 	}
4828 
4829 	/* Enable Tx queues. */
4830 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4831 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4832 		    iwm_mvm_ac_to_tx_fifo[ac]);
4833 		if (error)
4834 			goto error;
4835 	}
4836 
4837 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4838 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4839 		goto error;
4840 	}
4841 
4842 	return 0;
4843 
4844  error:
4845 	iwm_stop_device(sc);
4846 	return error;
4847 }
4848 
4849 /* Allow multicast from our BSSID. */
4850 static int
4851 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4852 {
4853 	struct ieee80211_node *ni = vap->iv_bss;
4854 	struct iwm_mcast_filter_cmd *cmd;
4855 	size_t size;
4856 	int error;
4857 
4858 	size = roundup(sizeof(*cmd), 4);
4859 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4860 	if (cmd == NULL)
4861 		return ENOMEM;
4862 	cmd->filter_own = 1;
4863 	cmd->port_id = 0;
4864 	cmd->count = 0;
4865 	cmd->pass_all = 1;
4866 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4867 
4868 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4869 	    IWM_CMD_SYNC, size, cmd);
4870 	free(cmd, M_DEVBUF);
4871 
4872 	return (error);
4873 }
4874 
4875 /*
4876  * ifnet interfaces
4877  */
4878 
4879 static void
4880 iwm_init(struct iwm_softc *sc)
4881 {
4882 	int error;
4883 
4884 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4885 		return;
4886 	}
4887 	sc->sc_generation++;
4888 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4889 
4890 	if ((error = iwm_init_hw(sc)) != 0) {
4891 		printf("iwm_init_hw failed %d\n", error);
4892 		iwm_stop(sc);
4893 		return;
4894 	}
4895 
4896 	/*
4897 	 * Ok, firmware loaded and we are jogging
4898 	 */
4899 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4900 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4901 }
4902 
4903 static int
4904 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4905 {
4906 	struct iwm_softc *sc;
4907 	int error;
4908 
4909 	sc = ic->ic_softc;
4910 
4911 	IWM_LOCK(sc);
4912 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4913 		IWM_UNLOCK(sc);
4914 		return (ENXIO);
4915 	}
4916 	error = mbufq_enqueue(&sc->sc_snd, m);
4917 	if (error) {
4918 		IWM_UNLOCK(sc);
4919 		return (error);
4920 	}
4921 	iwm_start(sc);
4922 	IWM_UNLOCK(sc);
4923 	return (0);
4924 }
4925 
4926 /*
4927  * Dequeue packets from sendq and call send.
4928  */
4929 static void
4930 iwm_start(struct iwm_softc *sc)
4931 {
4932 	struct ieee80211_node *ni;
4933 	struct mbuf *m;
4934 	int ac = 0;
4935 
4936 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4937 	while (sc->qfullmsk == 0 &&
4938 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4939 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4940 		if (iwm_tx(sc, m, ni, ac) != 0) {
4941 			if_inc_counter(ni->ni_vap->iv_ifp,
4942 			    IFCOUNTER_OERRORS, 1);
4943 			ieee80211_free_node(ni);
4944 			continue;
4945 		}
4946 		sc->sc_tx_timer = 15;
4947 	}
4948 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4949 }
4950 
4951 static void
4952 iwm_stop(struct iwm_softc *sc)
4953 {
4954 
4955 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4956 	sc->sc_flags |= IWM_FLAG_STOPPED;
4957 	sc->sc_generation++;
4958 	iwm_led_blink_stop(sc);
4959 	sc->sc_tx_timer = 0;
4960 	iwm_stop_device(sc);
4961 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4962 }
4963 
4964 static void
4965 iwm_watchdog(void *arg)
4966 {
4967 	struct iwm_softc *sc = arg;
4968 	struct ieee80211com *ic = &sc->sc_ic;
4969 
4970 	if (sc->sc_tx_timer > 0) {
4971 		if (--sc->sc_tx_timer == 0) {
4972 			device_printf(sc->sc_dev, "device timeout\n");
4973 #ifdef IWM_DEBUG
4974 			iwm_nic_error(sc);
4975 #endif
4976 			ieee80211_restart_all(ic);
4977 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4978 			return;
4979 		}
4980 	}
4981 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4982 }
4983 
4984 static void
4985 iwm_parent(struct ieee80211com *ic)
4986 {
4987 	struct iwm_softc *sc = ic->ic_softc;
4988 	int startall = 0;
4989 
4990 	IWM_LOCK(sc);
4991 	if (ic->ic_nrunning > 0) {
4992 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4993 			iwm_init(sc);
4994 			startall = 1;
4995 		}
4996 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4997 		iwm_stop(sc);
4998 	IWM_UNLOCK(sc);
4999 	if (startall)
5000 		ieee80211_start_all(ic);
5001 }
5002 
5003 /*
5004  * The interrupt side of things
5005  */
5006 
5007 /*
5008  * error dumping routines are from iwlwifi/mvm/utils.c
5009  */
5010 
5011 /*
5012  * Note: This structure is read from the device with IO accesses,
5013  * and the reading already does the endian conversion. As it is
5014  * read with uint32_t-sized accesses, any members with a different size
5015  * need to be ordered correctly though!
5016  */
5017 struct iwm_error_event_table {
5018 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5019 	uint32_t error_id;		/* type of error */
5020 	uint32_t trm_hw_status0;	/* TRM HW status */
5021 	uint32_t trm_hw_status1;	/* TRM HW status */
5022 	uint32_t blink2;		/* branch link */
5023 	uint32_t ilink1;		/* interrupt link */
5024 	uint32_t ilink2;		/* interrupt link */
5025 	uint32_t data1;		/* error-specific data */
5026 	uint32_t data2;		/* error-specific data */
5027 	uint32_t data3;		/* error-specific data */
5028 	uint32_t bcon_time;		/* beacon timer */
5029 	uint32_t tsf_low;		/* network timestamp function timer */
5030 	uint32_t tsf_hi;		/* network timestamp function timer */
5031 	uint32_t gp1;		/* GP1 timer register */
5032 	uint32_t gp2;		/* GP2 timer register */
5033 	uint32_t fw_rev_type;	/* firmware revision type */
5034 	uint32_t major;		/* uCode version major */
5035 	uint32_t minor;		/* uCode version minor */
5036 	uint32_t hw_ver;		/* HW Silicon version */
5037 	uint32_t brd_ver;		/* HW board version */
5038 	uint32_t log_pc;		/* log program counter */
5039 	uint32_t frame_ptr;		/* frame pointer */
5040 	uint32_t stack_ptr;		/* stack pointer */
5041 	uint32_t hcmd;		/* last host command header */
5042 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5043 				 * rxtx_flag */
5044 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5045 				 * host_flag */
5046 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5047 				 * enc_flag */
5048 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5049 				 * time_flag */
5050 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5051 				 * wico interrupt */
5052 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5053 	uint32_t wait_event;		/* wait event() caller address */
5054 	uint32_t l2p_control;	/* L2pControlField */
5055 	uint32_t l2p_duration;	/* L2pDurationField */
5056 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5057 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5058 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5059 				 * (LMPM_PMG_SEL) */
5060 	uint32_t u_timestamp;	/* indicate when the date and time of the
5061 				 * compilation */
5062 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5063 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5064 
5065 /*
5066  * UMAC error struct - relevant starting from family 8000 chip.
5067  * Note: This structure is read from the device with IO accesses,
5068  * and the reading already does the endian conversion. As it is
5069  * read with u32-sized accesses, any members with a different size
5070  * need to be ordered correctly though!
5071  */
5072 struct iwm_umac_error_event_table {
5073 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5074 	uint32_t error_id;	/* type of error */
5075 	uint32_t blink1;	/* branch link */
5076 	uint32_t blink2;	/* branch link */
5077 	uint32_t ilink1;	/* interrupt link */
5078 	uint32_t ilink2;	/* interrupt link */
5079 	uint32_t data1;		/* error-specific data */
5080 	uint32_t data2;		/* error-specific data */
5081 	uint32_t data3;		/* error-specific data */
5082 	uint32_t umac_major;
5083 	uint32_t umac_minor;
5084 	uint32_t frame_pointer;	/* core register 27*/
5085 	uint32_t stack_pointer;	/* core register 28 */
5086 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5087 	uint32_t nic_isr_pref;	/* ISR status register */
5088 } __packed;
5089 
5090 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5091 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5092 
5093 #ifdef IWM_DEBUG
5094 struct {
5095 	const char *name;
5096 	uint8_t num;
5097 } advanced_lookup[] = {
5098 	{ "NMI_INTERRUPT_WDG", 0x34 },
5099 	{ "SYSASSERT", 0x35 },
5100 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5101 	{ "BAD_COMMAND", 0x38 },
5102 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5103 	{ "FATAL_ERROR", 0x3D },
5104 	{ "NMI_TRM_HW_ERR", 0x46 },
5105 	{ "NMI_INTERRUPT_TRM", 0x4C },
5106 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5107 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5108 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5109 	{ "NMI_INTERRUPT_HOST", 0x66 },
5110 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5111 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5112 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5113 	{ "ADVANCED_SYSASSERT", 0 },
5114 };
5115 
5116 static const char *
5117 iwm_desc_lookup(uint32_t num)
5118 {
5119 	int i;
5120 
5121 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5122 		if (advanced_lookup[i].num == num)
5123 			return advanced_lookup[i].name;
5124 
5125 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5126 	return advanced_lookup[i].name;
5127 }
5128 
5129 static void
5130 iwm_nic_umac_error(struct iwm_softc *sc)
5131 {
5132 	struct iwm_umac_error_event_table table;
5133 	uint32_t base;
5134 
5135 	base = sc->umac_error_event_table;
5136 
5137 	if (base < 0x800000) {
5138 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5139 		    base);
5140 		return;
5141 	}
5142 
5143 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5144 		device_printf(sc->sc_dev, "reading errlog failed\n");
5145 		return;
5146 	}
5147 
5148 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5149 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5150 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5151 		    sc->sc_flags, table.valid);
5152 	}
5153 
5154 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5155 		iwm_desc_lookup(table.error_id));
5156 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5157 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5158 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5159 	    table.ilink1);
5160 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5161 	    table.ilink2);
5162 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5163 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5164 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5165 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5166 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5167 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5168 	    table.frame_pointer);
5169 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5170 	    table.stack_pointer);
5171 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5172 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5173 	    table.nic_isr_pref);
5174 }
5175 
5176 /*
5177  * Support for dumping the error log seemed like a good idea ...
5178  * but it's mostly hex junk and the only sensible thing is the
5179  * hw/ucode revision (which we know anyway).  Since it's here,
5180  * I'll just leave it in, just in case e.g. the Intel guys want to
5181  * help us decipher some "ADVANCED_SYSASSERT" later.
5182  */
5183 static void
5184 iwm_nic_error(struct iwm_softc *sc)
5185 {
5186 	struct iwm_error_event_table table;
5187 	uint32_t base;
5188 
5189 	device_printf(sc->sc_dev, "dumping device error log\n");
5190 	base = sc->error_event_table;
5191 	if (base < 0x800000) {
5192 		device_printf(sc->sc_dev,
5193 		    "Invalid error log pointer 0x%08x\n", base);
5194 		return;
5195 	}
5196 
5197 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5198 		device_printf(sc->sc_dev, "reading errlog failed\n");
5199 		return;
5200 	}
5201 
5202 	if (!table.valid) {
5203 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5204 		return;
5205 	}
5206 
5207 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5208 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5209 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5210 		    sc->sc_flags, table.valid);
5211 	}
5212 
5213 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5214 	    iwm_desc_lookup(table.error_id));
5215 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5216 	    table.trm_hw_status0);
5217 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5218 	    table.trm_hw_status1);
5219 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5220 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5221 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5222 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5223 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5224 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5225 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5226 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5227 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5228 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5229 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5230 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5231 	    table.fw_rev_type);
5232 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5233 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5234 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5235 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5236 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5237 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5238 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5239 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5240 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5241 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5242 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5243 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5244 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5245 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5246 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5247 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5248 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5249 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5250 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5251 
5252 	if (sc->umac_error_event_table)
5253 		iwm_nic_umac_error(sc);
5254 }
5255 #endif
5256 
5257 static void
5258 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5259 {
5260 	struct ieee80211com *ic = &sc->sc_ic;
5261 	struct iwm_cmd_response *cresp;
5262 	struct mbuf *m1;
5263 	uint32_t offset = 0;
5264 	uint32_t maxoff = IWM_RBUF_SIZE;
5265 	uint32_t nextoff;
5266 	boolean_t stolen = FALSE;
5267 
5268 #define HAVEROOM(a)	\
5269     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5270 
5271 	while (HAVEROOM(offset)) {
5272 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5273 		    offset);
5274 		int qid, idx, code, len;
5275 
5276 		qid = pkt->hdr.qid;
5277 		idx = pkt->hdr.idx;
5278 
5279 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5280 
5281 		/*
5282 		 * randomly get these from the firmware, no idea why.
5283 		 * they at least seem harmless, so just ignore them for now
5284 		 */
5285 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5286 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5287 			break;
5288 		}
5289 
5290 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5291 		    "rx packet qid=%d idx=%d type=%x\n",
5292 		    qid & ~0x80, pkt->hdr.idx, code);
5293 
5294 		len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5295 		len += sizeof(uint32_t); /* account for status word */
5296 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5297 
5298 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5299 
5300 		switch (code) {
5301 		case IWM_REPLY_RX_PHY_CMD:
5302 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5303 			break;
5304 
5305 		case IWM_REPLY_RX_MPDU_CMD: {
5306 			/*
5307 			 * If this is the last frame in the RX buffer, we
5308 			 * can directly feed the mbuf to the sharks here.
5309 			 */
5310 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5311 			    struct iwm_rx_packet *, nextoff);
5312 			if (!HAVEROOM(nextoff) ||
5313 			    (nextpkt->hdr.code == 0 &&
5314 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5315 			     nextpkt->hdr.idx == 0) ||
5316 			    (nextpkt->len_n_flags ==
5317 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5318 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5319 					stolen = FALSE;
5320 					/* Make sure we abort the loop */
5321 					nextoff = maxoff;
5322 				}
5323 				break;
5324 			}
5325 
5326 			/*
5327 			 * Use m_copym instead of m_split, because that
5328 			 * makes it easier to keep a valid rx buffer in
5329 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5330 			 *
5331 			 * We need to start m_copym() at offset 0, to get the
5332 			 * M_PKTHDR flag preserved.
5333 			 */
5334 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5335 			if (m1) {
5336 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5337 					stolen = TRUE;
5338 				else
5339 					m_freem(m1);
5340 			}
5341 			break;
5342 		}
5343 
5344 		case IWM_TX_CMD:
5345 			iwm_mvm_rx_tx_cmd(sc, pkt);
5346 			break;
5347 
5348 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5349 			struct iwm_missed_beacons_notif *resp;
5350 			int missed;
5351 
5352 			/* XXX look at mac_id to determine interface ID */
5353 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5354 
5355 			resp = (void *)pkt->data;
5356 			missed = le32toh(resp->consec_missed_beacons);
5357 
5358 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5359 			    "%s: MISSED_BEACON: mac_id=%d, "
5360 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5361 			    "num_rx=%d\n",
5362 			    __func__,
5363 			    le32toh(resp->mac_id),
5364 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5365 			    le32toh(resp->consec_missed_beacons),
5366 			    le32toh(resp->num_expected_beacons),
5367 			    le32toh(resp->num_recvd_beacons));
5368 
5369 			/* Be paranoid */
5370 			if (vap == NULL)
5371 				break;
5372 
5373 			/* XXX no net80211 locking? */
5374 			if (vap->iv_state == IEEE80211_S_RUN &&
5375 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5376 				if (missed > vap->iv_bmissthreshold) {
5377 					/* XXX bad locking; turn into task */
5378 					IWM_UNLOCK(sc);
5379 					ieee80211_beacon_miss(ic);
5380 					IWM_LOCK(sc);
5381 				}
5382 			}
5383 
5384 			break;
5385 		}
5386 
5387 		case IWM_MFUART_LOAD_NOTIFICATION:
5388 			break;
5389 
5390 		case IWM_MVM_ALIVE:
5391 			break;
5392 
5393 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5394 			break;
5395 
5396 		case IWM_STATISTICS_NOTIFICATION: {
5397 			struct iwm_notif_statistics *stats;
5398 			stats = (void *)pkt->data;
5399 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5400 			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5401 			break;
5402 		}
5403 
5404 		case IWM_NVM_ACCESS_CMD:
5405 		case IWM_MCC_UPDATE_CMD:
5406 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5407 				memcpy(sc->sc_cmd_resp,
5408 				    pkt, sizeof(sc->sc_cmd_resp));
5409 			}
5410 			break;
5411 
5412 		case IWM_MCC_CHUB_UPDATE_CMD: {
5413 			struct iwm_mcc_chub_notif *notif;
5414 			notif = (void *)pkt->data;
5415 
5416 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5417 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5418 			sc->sc_fw_mcc[2] = '\0';
5419 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5420 			    "fw source %d sent CC '%s'\n",
5421 			    notif->source_id, sc->sc_fw_mcc);
5422 			break;
5423 		}
5424 
5425 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5426 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5427 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5428 			struct iwm_dts_measurement_notif_v1 *notif;
5429 
5430 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5431 				device_printf(sc->sc_dev,
5432 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5433 				break;
5434 			}
5435 			notif = (void *)pkt->data;
5436 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5437 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5438 			    notif->temp);
5439 			break;
5440 		}
5441 
5442 		case IWM_PHY_CONFIGURATION_CMD:
5443 		case IWM_TX_ANT_CONFIGURATION_CMD:
5444 		case IWM_ADD_STA:
5445 		case IWM_MAC_CONTEXT_CMD:
5446 		case IWM_REPLY_SF_CFG_CMD:
5447 		case IWM_POWER_TABLE_CMD:
5448 		case IWM_PHY_CONTEXT_CMD:
5449 		case IWM_BINDING_CONTEXT_CMD:
5450 		case IWM_TIME_EVENT_CMD:
5451 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5452 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5453 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5454 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5455 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5456 		case IWM_REPLY_BEACON_FILTERING_CMD:
5457 		case IWM_MAC_PM_POWER_TABLE:
5458 		case IWM_TIME_QUOTA_CMD:
5459 		case IWM_REMOVE_STA:
5460 		case IWM_TXPATH_FLUSH:
5461 		case IWM_LQ_CMD:
5462 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5463 				 IWM_FW_PAGING_BLOCK_CMD):
5464 		case IWM_BT_CONFIG:
5465 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5466 			cresp = (void *)pkt->data;
5467 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5468 				memcpy(sc->sc_cmd_resp,
5469 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5470 			}
5471 			break;
5472 
5473 		/* ignore */
5474 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5475 			break;
5476 
5477 		case IWM_INIT_COMPLETE_NOTIF:
5478 			break;
5479 
5480 		case IWM_SCAN_OFFLOAD_COMPLETE:
5481 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5482 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5483 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5484 				ieee80211_runtask(ic, &sc->sc_es_task);
5485 			}
5486 			break;
5487 
5488 		case IWM_SCAN_ITERATION_COMPLETE: {
5489 			struct iwm_lmac_scan_complete_notif *notif;
5490 			notif = (void *)pkt->data;
5491 			break;
5492 		}
5493 
5494 		case IWM_SCAN_COMPLETE_UMAC:
5495 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5496 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5497 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5498 				ieee80211_runtask(ic, &sc->sc_es_task);
5499 			}
5500 			break;
5501 
5502 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5503 			struct iwm_umac_scan_iter_complete_notif *notif;
5504 			notif = (void *)pkt->data;
5505 
5506 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5507 			    "complete, status=0x%x, %d channels scanned\n",
5508 			    notif->status, notif->scanned_channels);
5509 			break;
5510 		}
5511 
5512 		case IWM_REPLY_ERROR: {
5513 			struct iwm_error_resp *resp;
5514 			resp = (void *)pkt->data;
5515 
5516 			device_printf(sc->sc_dev,
5517 			    "firmware error 0x%x, cmd 0x%x\n",
5518 			    le32toh(resp->error_type),
5519 			    resp->cmd_id);
5520 			break;
5521 		}
5522 
5523 		case IWM_TIME_EVENT_NOTIFICATION: {
5524 			struct iwm_time_event_notif *notif;
5525 			notif = (void *)pkt->data;
5526 
5527 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5528 			    "TE notif status = 0x%x action = 0x%x\n",
5529 			    notif->status, notif->action);
5530 			break;
5531 		}
5532 
5533 		case IWM_MCAST_FILTER_CMD:
5534 			break;
5535 
5536 		case IWM_SCD_QUEUE_CFG: {
5537 			struct iwm_scd_txq_cfg_rsp *rsp;
5538 			rsp = (void *)pkt->data;
5539 
5540 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5541 			    "queue cfg token=0x%x sta_id=%d "
5542 			    "tid=%d scd_queue=%d\n",
5543 			    rsp->token, rsp->sta_id, rsp->tid,
5544 			    rsp->scd_queue);
5545 			break;
5546 		}
5547 
5548 		default:
5549 			device_printf(sc->sc_dev,
5550 			    "frame %d/%d %x UNHANDLED (this should "
5551 			    "not happen)\n", qid & ~0x80, idx,
5552 			    pkt->len_n_flags);
5553 			break;
5554 		}
5555 
5556 		/*
5557 		 * Why test bit 0x80?  The Linux driver:
5558 		 *
5559 		 * There is one exception:  uCode sets bit 15 when it
5560 		 * originates the response/notification, i.e. when the
5561 		 * response/notification is not a direct response to a
5562 		 * command sent by the driver.  For example, uCode issues
5563 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5564 		 * it is not a direct response to any driver command.
5565 		 *
5566 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5567 		 * uses a slightly different format for pkt->hdr, and "qid"
5568 		 * is actually the upper byte of a two-byte field.
5569 		 */
5570 		if (!(qid & (1 << 7)))
5571 			iwm_cmd_done(sc, pkt);
5572 
5573 		offset = nextoff;
5574 	}
5575 	if (stolen)
5576 		m_freem(m);
5577 #undef HAVEROOM
5578 }
5579 
5580 /*
5581  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5582  * Basic structure from if_iwn
5583  */
5584 static void
5585 iwm_notif_intr(struct iwm_softc *sc)
5586 {
5587 	uint16_t hw;
5588 
5589 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5590 	    BUS_DMASYNC_POSTREAD);
5591 
5592 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5593 
5594 	/*
5595 	 * Process responses
5596 	 */
5597 	while (sc->rxq.cur != hw) {
5598 		struct iwm_rx_ring *ring = &sc->rxq;
5599 		struct iwm_rx_data *data = &ring->data[ring->cur];
5600 
5601 		bus_dmamap_sync(ring->data_dmat, data->map,
5602 		    BUS_DMASYNC_POSTREAD);
5603 
5604 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5605 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5606 		iwm_handle_rxb(sc, data->m);
5607 
5608 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5609 	}
5610 
5611 	/*
5612 	 * Tell the firmware that it can reuse the ring entries that
5613 	 * we have just processed.
5614 	 * Seems like the hardware gets upset unless we align
5615 	 * the write by 8??
5616 	 */
5617 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5618 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5619 }
5620 
5621 static void
5622 iwm_intr(void *arg)
5623 {
5624 	struct iwm_softc *sc = arg;
5625 	int handled = 0;
5626 	int r1, r2, rv = 0;
5627 	int isperiodic = 0;
5628 
5629 	IWM_LOCK(sc);
5630 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5631 
5632 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5633 		uint32_t *ict = sc->ict_dma.vaddr;
5634 		int tmp;
5635 
5636 		tmp = htole32(ict[sc->ict_cur]);
5637 		if (!tmp)
5638 			goto out_ena;
5639 
5640 		/*
5641 		 * ok, there was something.  keep plowing until we have all.
5642 		 */
5643 		r1 = r2 = 0;
5644 		while (tmp) {
5645 			r1 |= tmp;
5646 			ict[sc->ict_cur] = 0;
5647 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5648 			tmp = htole32(ict[sc->ict_cur]);
5649 		}
5650 
5651 		/* this is where the fun begins.  don't ask */
5652 		if (r1 == 0xffffffff)
5653 			r1 = 0;
5654 
5655 		/* i am not expected to understand this */
5656 		if (r1 & 0xc0000)
5657 			r1 |= 0x8000;
5658 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5659 	} else {
5660 		r1 = IWM_READ(sc, IWM_CSR_INT);
5661 		/* "hardware gone" (where, fishing?) */
5662 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5663 			goto out;
5664 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5665 	}
5666 	if (r1 == 0 && r2 == 0) {
5667 		goto out_ena;
5668 	}
5669 
5670 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5671 
5672 	/* Safely ignore these bits for debug checks below */
5673 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5674 
5675 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5676 		int i;
5677 		struct ieee80211com *ic = &sc->sc_ic;
5678 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5679 
5680 #ifdef IWM_DEBUG
5681 		iwm_nic_error(sc);
5682 #endif
5683 		/* Dump driver status (TX and RX rings) while we're here. */
5684 		device_printf(sc->sc_dev, "driver status:\n");
5685 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5686 			struct iwm_tx_ring *ring = &sc->txq[i];
5687 			device_printf(sc->sc_dev,
5688 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5689 			    "queued=%-3d\n",
5690 			    i, ring->qid, ring->cur, ring->queued);
5691 		}
5692 		device_printf(sc->sc_dev,
5693 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5694 		device_printf(sc->sc_dev,
5695 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5696 
5697 		/* Don't stop the device; just do a VAP restart */
5698 		IWM_UNLOCK(sc);
5699 
5700 		if (vap == NULL) {
5701 			printf("%s: null vap\n", __func__);
5702 			return;
5703 		}
5704 
5705 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5706 		    "restarting\n", __func__, vap->iv_state);
5707 
5708 		ieee80211_restart_all(ic);
5709 		return;
5710 	}
5711 
5712 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5713 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5714 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5715 		iwm_stop(sc);
5716 		rv = 1;
5717 		goto out;
5718 	}
5719 
5720 	/* firmware chunk loaded */
5721 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5722 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5723 		handled |= IWM_CSR_INT_BIT_FH_TX;
5724 		sc->sc_fw_chunk_done = 1;
5725 		wakeup(&sc->sc_fw);
5726 	}
5727 
5728 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5729 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5730 		if (iwm_check_rfkill(sc)) {
5731 			device_printf(sc->sc_dev,
5732 			    "%s: rfkill switch, disabling interface\n",
5733 			    __func__);
5734 			iwm_stop(sc);
5735 		}
5736 	}
5737 
5738 	/*
5739 	 * The Linux driver uses periodic interrupts to avoid races.
5740 	 * We cargo-cult like it's going out of fashion.
5741 	 */
5742 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5743 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5744 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5745 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5746 			IWM_WRITE_1(sc,
5747 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5748 		isperiodic = 1;
5749 	}
5750 
5751 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5752 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5753 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5754 
5755 		iwm_notif_intr(sc);
5756 
5757 		/* enable periodic interrupt, see above */
5758 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5759 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5760 			    IWM_CSR_INT_PERIODIC_ENA);
5761 	}
5762 
5763 	if (__predict_false(r1 & ~handled))
5764 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5765 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5766 	rv = 1;
5767 
5768  out_ena:
5769 	iwm_restore_interrupts(sc);
5770  out:
5771 	IWM_UNLOCK(sc);
5772 	return;
5773 }
5774 
5775 /*
5776  * Autoconf glue-sniffing
5777  */
5778 #define	PCI_VENDOR_INTEL		0x8086
5779 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5780 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5781 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5782 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5783 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5784 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5785 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5786 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5787 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5788 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5789 
5790 static const struct iwm_devices {
5791 	uint16_t		device;
5792 	const struct iwm_cfg	*cfg;
5793 } iwm_devices[] = {
5794 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5795 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5796 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5797 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5798 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5799 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5800 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5801 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5802 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5803 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5804 };
5805 
5806 static int
5807 iwm_probe(device_t dev)
5808 {
5809 	int i;
5810 
5811 	for (i = 0; i < nitems(iwm_devices); i++) {
5812 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5813 		    pci_get_device(dev) == iwm_devices[i].device) {
5814 			device_set_desc(dev, iwm_devices[i].cfg->name);
5815 			return (BUS_PROBE_DEFAULT);
5816 		}
5817 	}
5818 
5819 	return (ENXIO);
5820 }
5821 
5822 static int
5823 iwm_dev_check(device_t dev)
5824 {
5825 	struct iwm_softc *sc;
5826 	uint16_t devid;
5827 	int i;
5828 
5829 	sc = device_get_softc(dev);
5830 
5831 	devid = pci_get_device(dev);
5832 	for (i = 0; i < nitems(iwm_devices); i++) {
5833 		if (iwm_devices[i].device == devid) {
5834 			sc->cfg = iwm_devices[i].cfg;
5835 			return (0);
5836 		}
5837 	}
5838 	device_printf(dev, "unknown adapter type\n");
5839 	return ENXIO;
5840 }
5841 
5842 /* PCI registers */
5843 #define PCI_CFG_RETRY_TIMEOUT	0x041
5844 
5845 static int
5846 iwm_pci_attach(device_t dev)
5847 {
5848 	struct iwm_softc *sc;
5849 	int count, error, rid;
5850 	uint16_t reg;
5851 
5852 	sc = device_get_softc(dev);
5853 
5854 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5855 	 * PCI Tx retries from interfering with C3 CPU state */
5856 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5857 
5858 	/* Enable bus-mastering and hardware bug workaround. */
5859 	pci_enable_busmaster(dev);
5860 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5861 	/* if !MSI */
5862 	if (reg & PCIM_STATUS_INTxSTATE) {
5863 		reg &= ~PCIM_STATUS_INTxSTATE;
5864 	}
5865 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5866 
5867 	rid = PCIR_BAR(0);
5868 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5869 	    RF_ACTIVE);
5870 	if (sc->sc_mem == NULL) {
5871 		device_printf(sc->sc_dev, "can't map mem space\n");
5872 		return (ENXIO);
5873 	}
5874 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5875 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5876 
5877 	/* Install interrupt handler. */
5878 	count = 1;
5879 	rid = 0;
5880 	if (pci_alloc_msi(dev, &count) == 0)
5881 		rid = 1;
5882 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5883 	    (rid != 0 ? 0 : RF_SHAREABLE));
5884 	if (sc->sc_irq == NULL) {
5885 		device_printf(dev, "can't map interrupt\n");
5886 			return (ENXIO);
5887 	}
5888 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5889 	    NULL, iwm_intr, sc, &sc->sc_ih);
5890 	if (sc->sc_ih == NULL) {
5891 		device_printf(dev, "can't establish interrupt");
5892 			return (ENXIO);
5893 	}
5894 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5895 
5896 	return (0);
5897 }
5898 
5899 static void
5900 iwm_pci_detach(device_t dev)
5901 {
5902 	struct iwm_softc *sc = device_get_softc(dev);
5903 
5904 	if (sc->sc_irq != NULL) {
5905 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5906 		bus_release_resource(dev, SYS_RES_IRQ,
5907 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5908 		pci_release_msi(dev);
5909         }
5910 	if (sc->sc_mem != NULL)
5911 		bus_release_resource(dev, SYS_RES_MEMORY,
5912 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5913 }
5914 
5915 
5916 
5917 static int
5918 iwm_attach(device_t dev)
5919 {
5920 	struct iwm_softc *sc = device_get_softc(dev);
5921 	struct ieee80211com *ic = &sc->sc_ic;
5922 	int error;
5923 	int txq_i, i;
5924 
5925 	sc->sc_dev = dev;
5926 	sc->sc_attached = 1;
5927 	IWM_LOCK_INIT(sc);
5928 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5929 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5930 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5931 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5932 
5933 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5934 	if (sc->sc_notif_wait == NULL) {
5935 		device_printf(dev, "failed to init notification wait struct\n");
5936 		goto fail;
5937 	}
5938 
5939 	/* Init phy db */
5940 	sc->sc_phy_db = iwm_phy_db_init(sc);
5941 	if (!sc->sc_phy_db) {
5942 		device_printf(dev, "Cannot init phy_db\n");
5943 		goto fail;
5944 	}
5945 
5946 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5947 	sc->last_ebs_successful = TRUE;
5948 
5949 	/* PCI attach */
5950 	error = iwm_pci_attach(dev);
5951 	if (error != 0)
5952 		goto fail;
5953 
5954 	sc->sc_wantresp = -1;
5955 
5956 	/* Check device type */
5957 	error = iwm_dev_check(dev);
5958 	if (error != 0)
5959 		goto fail;
5960 
5961 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5962 	/*
5963 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5964 	 * changed, and now the revision step also includes bit 0-1 (no more
5965 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5966 	 * in the old format.
5967 	 */
5968 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5969 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5970 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5971 
5972 	if (iwm_prepare_card_hw(sc) != 0) {
5973 		device_printf(dev, "could not initialize hardware\n");
5974 		goto fail;
5975 	}
5976 
5977 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5978 		int ret;
5979 		uint32_t hw_step;
5980 
5981 		/*
5982 		 * In order to recognize C step the driver should read the
5983 		 * chip version id located at the AUX bus MISC address.
5984 		 */
5985 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5986 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5987 		DELAY(2);
5988 
5989 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5990 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5991 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5992 				   25000);
5993 		if (!ret) {
5994 			device_printf(sc->sc_dev,
5995 			    "Failed to wake up the nic\n");
5996 			goto fail;
5997 		}
5998 
5999 		if (iwm_nic_lock(sc)) {
6000 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6001 			hw_step |= IWM_ENABLE_WFPM;
6002 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6003 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6004 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6005 			if (hw_step == 0x3)
6006 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6007 						(IWM_SILICON_C_STEP << 2);
6008 			iwm_nic_unlock(sc);
6009 		} else {
6010 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6011 			goto fail;
6012 		}
6013 	}
6014 
6015 	/* special-case 7265D, it has the same PCI IDs. */
6016 	if (sc->cfg == &iwm7265_cfg &&
6017 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6018 		sc->cfg = &iwm7265d_cfg;
6019 	}
6020 
6021 	/* Allocate DMA memory for firmware transfers. */
6022 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6023 		device_printf(dev, "could not allocate memory for firmware\n");
6024 		goto fail;
6025 	}
6026 
6027 	/* Allocate "Keep Warm" page. */
6028 	if ((error = iwm_alloc_kw(sc)) != 0) {
6029 		device_printf(dev, "could not allocate keep warm page\n");
6030 		goto fail;
6031 	}
6032 
6033 	/* We use ICT interrupts */
6034 	if ((error = iwm_alloc_ict(sc)) != 0) {
6035 		device_printf(dev, "could not allocate ICT table\n");
6036 		goto fail;
6037 	}
6038 
6039 	/* Allocate TX scheduler "rings". */
6040 	if ((error = iwm_alloc_sched(sc)) != 0) {
6041 		device_printf(dev, "could not allocate TX scheduler rings\n");
6042 		goto fail;
6043 	}
6044 
6045 	/* Allocate TX rings */
6046 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6047 		if ((error = iwm_alloc_tx_ring(sc,
6048 		    &sc->txq[txq_i], txq_i)) != 0) {
6049 			device_printf(dev,
6050 			    "could not allocate TX ring %d\n",
6051 			    txq_i);
6052 			goto fail;
6053 		}
6054 	}
6055 
6056 	/* Allocate RX ring. */
6057 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6058 		device_printf(dev, "could not allocate RX ring\n");
6059 		goto fail;
6060 	}
6061 
6062 	/* Clear pending interrupts. */
6063 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6064 
6065 	ic->ic_softc = sc;
6066 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6067 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6068 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6069 
6070 	/* Set device capabilities. */
6071 	ic->ic_caps =
6072 	    IEEE80211_C_STA |
6073 	    IEEE80211_C_WPA |		/* WPA/RSN */
6074 	    IEEE80211_C_WME |
6075 	    IEEE80211_C_PMGT |
6076 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6077 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6078 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6079 	    ;
6080 	/* Advertise full-offload scanning */
6081 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6082 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6083 		sc->sc_phyctxt[i].id = i;
6084 		sc->sc_phyctxt[i].color = 0;
6085 		sc->sc_phyctxt[i].ref = 0;
6086 		sc->sc_phyctxt[i].channel = NULL;
6087 	}
6088 
6089 	/* Default noise floor */
6090 	sc->sc_noise = -96;
6091 
6092 	/* Max RSSI */
6093 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6094 
6095 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6096 	sc->sc_preinit_hook.ich_arg = sc;
6097 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6098 		device_printf(dev, "config_intrhook_establish failed\n");
6099 		goto fail;
6100 	}
6101 
6102 #ifdef IWM_DEBUG
6103 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6104 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6105 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6106 #endif
6107 
6108 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6109 	    "<-%s\n", __func__);
6110 
6111 	return 0;
6112 
6113 	/* Free allocated memory if something failed during attachment. */
6114 fail:
6115 	iwm_detach_local(sc, 0);
6116 
6117 	return ENXIO;
6118 }
6119 
6120 static int
6121 iwm_is_valid_ether_addr(uint8_t *addr)
6122 {
6123 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6124 
6125 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6126 		return (FALSE);
6127 
6128 	return (TRUE);
6129 }
6130 
6131 static int
6132 iwm_wme_update(struct ieee80211com *ic)
6133 {
6134 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6135 	struct iwm_softc *sc = ic->ic_softc;
6136 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6137 	struct iwm_vap *ivp = IWM_VAP(vap);
6138 	struct iwm_node *in;
6139 	struct wmeParams tmp[WME_NUM_AC];
6140 	int aci, error;
6141 
6142 	if (vap == NULL)
6143 		return (0);
6144 
6145 	IEEE80211_LOCK(ic);
6146 	for (aci = 0; aci < WME_NUM_AC; aci++)
6147 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6148 	IEEE80211_UNLOCK(ic);
6149 
6150 	IWM_LOCK(sc);
6151 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6152 		const struct wmeParams *ac = &tmp[aci];
6153 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6154 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6155 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6156 		ivp->queue_params[aci].edca_txop =
6157 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6158 	}
6159 	ivp->have_wme = TRUE;
6160 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6161 		in = IWM_NODE(vap->iv_bss);
6162 		if (in->in_assoc) {
6163 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6164 				device_printf(sc->sc_dev,
6165 				    "%s: failed to update MAC\n", __func__);
6166 			}
6167 		}
6168 	}
6169 	IWM_UNLOCK(sc);
6170 
6171 	return (0);
6172 #undef IWM_EXP2
6173 }
6174 
6175 static void
6176 iwm_preinit(void *arg)
6177 {
6178 	struct iwm_softc *sc = arg;
6179 	device_t dev = sc->sc_dev;
6180 	struct ieee80211com *ic = &sc->sc_ic;
6181 	int error;
6182 
6183 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6184 	    "->%s\n", __func__);
6185 
6186 	IWM_LOCK(sc);
6187 	if ((error = iwm_start_hw(sc)) != 0) {
6188 		device_printf(dev, "could not initialize hardware\n");
6189 		IWM_UNLOCK(sc);
6190 		goto fail;
6191 	}
6192 
6193 	error = iwm_run_init_mvm_ucode(sc, 1);
6194 	iwm_stop_device(sc);
6195 	if (error) {
6196 		IWM_UNLOCK(sc);
6197 		goto fail;
6198 	}
6199 	device_printf(dev,
6200 	    "hw rev 0x%x, fw ver %s, address %s\n",
6201 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6202 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6203 
6204 	/* not all hardware can do 5GHz band */
6205 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6206 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6207 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6208 	IWM_UNLOCK(sc);
6209 
6210 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6211 	    ic->ic_channels);
6212 
6213 	/*
6214 	 * At this point we've committed - if we fail to do setup,
6215 	 * we now also have to tear down the net80211 state.
6216 	 */
6217 	ieee80211_ifattach(ic);
6218 	ic->ic_vap_create = iwm_vap_create;
6219 	ic->ic_vap_delete = iwm_vap_delete;
6220 	ic->ic_raw_xmit = iwm_raw_xmit;
6221 	ic->ic_node_alloc = iwm_node_alloc;
6222 	ic->ic_scan_start = iwm_scan_start;
6223 	ic->ic_scan_end = iwm_scan_end;
6224 	ic->ic_update_mcast = iwm_update_mcast;
6225 	ic->ic_getradiocaps = iwm_init_channel_map;
6226 	ic->ic_set_channel = iwm_set_channel;
6227 	ic->ic_scan_curchan = iwm_scan_curchan;
6228 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6229 	ic->ic_wme.wme_update = iwm_wme_update;
6230 	ic->ic_parent = iwm_parent;
6231 	ic->ic_transmit = iwm_transmit;
6232 	iwm_radiotap_attach(sc);
6233 	if (bootverbose)
6234 		ieee80211_announce(ic);
6235 
6236 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6237 	    "<-%s\n", __func__);
6238 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6239 
6240 	return;
6241 fail:
6242 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6243 	iwm_detach_local(sc, 0);
6244 }
6245 
6246 /*
6247  * Attach the interface to 802.11 radiotap.
6248  */
6249 static void
6250 iwm_radiotap_attach(struct iwm_softc *sc)
6251 {
6252         struct ieee80211com *ic = &sc->sc_ic;
6253 
6254 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6255 	    "->%s begin\n", __func__);
6256         ieee80211_radiotap_attach(ic,
6257             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6258                 IWM_TX_RADIOTAP_PRESENT,
6259             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6260                 IWM_RX_RADIOTAP_PRESENT);
6261 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6262 	    "->%s end\n", __func__);
6263 }
6264 
6265 static struct ieee80211vap *
6266 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6267     enum ieee80211_opmode opmode, int flags,
6268     const uint8_t bssid[IEEE80211_ADDR_LEN],
6269     const uint8_t mac[IEEE80211_ADDR_LEN])
6270 {
6271 	struct iwm_vap *ivp;
6272 	struct ieee80211vap *vap;
6273 
6274 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6275 		return NULL;
6276 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6277 	vap = &ivp->iv_vap;
6278 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6279 	vap->iv_bmissthreshold = 10;            /* override default */
6280 	/* Override with driver methods. */
6281 	ivp->iv_newstate = vap->iv_newstate;
6282 	vap->iv_newstate = iwm_newstate;
6283 
6284 	ivp->id = IWM_DEFAULT_MACID;
6285 	ivp->color = IWM_DEFAULT_COLOR;
6286 
6287 	ivp->have_wme = FALSE;
6288 	ivp->ps_disabled = FALSE;
6289 
6290 	ieee80211_ratectl_init(vap);
6291 	/* Complete setup. */
6292 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6293 	    mac);
6294 	ic->ic_opmode = opmode;
6295 
6296 	return vap;
6297 }
6298 
6299 static void
6300 iwm_vap_delete(struct ieee80211vap *vap)
6301 {
6302 	struct iwm_vap *ivp = IWM_VAP(vap);
6303 
6304 	ieee80211_ratectl_deinit(vap);
6305 	ieee80211_vap_detach(vap);
6306 	free(ivp, M_80211_VAP);
6307 }
6308 
6309 static void
6310 iwm_scan_start(struct ieee80211com *ic)
6311 {
6312 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6313 	struct iwm_softc *sc = ic->ic_softc;
6314 	int error;
6315 
6316 	IWM_LOCK(sc);
6317 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6318 		/* This should not be possible */
6319 		device_printf(sc->sc_dev,
6320 		    "%s: Previous scan not completed yet\n", __func__);
6321 	}
6322 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6323 		error = iwm_mvm_umac_scan(sc);
6324 	else
6325 		error = iwm_mvm_lmac_scan(sc);
6326 	if (error != 0) {
6327 		device_printf(sc->sc_dev, "could not initiate scan\n");
6328 		IWM_UNLOCK(sc);
6329 		ieee80211_cancel_scan(vap);
6330 	} else {
6331 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6332 		iwm_led_blink_start(sc);
6333 		IWM_UNLOCK(sc);
6334 	}
6335 }
6336 
6337 static void
6338 iwm_scan_end(struct ieee80211com *ic)
6339 {
6340 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6341 	struct iwm_softc *sc = ic->ic_softc;
6342 
6343 	IWM_LOCK(sc);
6344 	iwm_led_blink_stop(sc);
6345 	if (vap->iv_state == IEEE80211_S_RUN)
6346 		iwm_mvm_led_enable(sc);
6347 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6348 		/*
6349 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6350 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6351 		 * taskqueue.
6352 		 */
6353 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6354 		iwm_mvm_scan_stop_wait(sc);
6355 	}
6356 	IWM_UNLOCK(sc);
6357 
6358 	/*
6359 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6360 	 * This is to make sure that it won't call ieee80211_scan_done
6361 	 * when we have already started the next scan.
6362 	 */
6363 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6364 }
6365 
6366 static void
6367 iwm_update_mcast(struct ieee80211com *ic)
6368 {
6369 }
6370 
6371 static void
6372 iwm_set_channel(struct ieee80211com *ic)
6373 {
6374 }
6375 
6376 static void
6377 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6378 {
6379 }
6380 
6381 static void
6382 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6383 {
6384 	return;
6385 }
6386 
6387 void
6388 iwm_init_task(void *arg1)
6389 {
6390 	struct iwm_softc *sc = arg1;
6391 
6392 	IWM_LOCK(sc);
6393 	while (sc->sc_flags & IWM_FLAG_BUSY)
6394 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6395 	sc->sc_flags |= IWM_FLAG_BUSY;
6396 	iwm_stop(sc);
6397 	if (sc->sc_ic.ic_nrunning > 0)
6398 		iwm_init(sc);
6399 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6400 	wakeup(&sc->sc_flags);
6401 	IWM_UNLOCK(sc);
6402 }
6403 
6404 static int
6405 iwm_resume(device_t dev)
6406 {
6407 	struct iwm_softc *sc = device_get_softc(dev);
6408 	int do_reinit = 0;
6409 
6410 	/*
6411 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6412 	 * PCI Tx retries from interfering with C3 CPU state.
6413 	 */
6414 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6415 	iwm_init_task(device_get_softc(dev));
6416 
6417 	IWM_LOCK(sc);
6418 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6419 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6420 		do_reinit = 1;
6421 	}
6422 	IWM_UNLOCK(sc);
6423 
6424 	if (do_reinit)
6425 		ieee80211_resume_all(&sc->sc_ic);
6426 
6427 	return 0;
6428 }
6429 
6430 static int
6431 iwm_suspend(device_t dev)
6432 {
6433 	int do_stop = 0;
6434 	struct iwm_softc *sc = device_get_softc(dev);
6435 
6436 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6437 
6438 	ieee80211_suspend_all(&sc->sc_ic);
6439 
6440 	if (do_stop) {
6441 		IWM_LOCK(sc);
6442 		iwm_stop(sc);
6443 		sc->sc_flags |= IWM_FLAG_SCANNING;
6444 		IWM_UNLOCK(sc);
6445 	}
6446 
6447 	return (0);
6448 }
6449 
6450 static int
6451 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6452 {
6453 	struct iwm_fw_info *fw = &sc->sc_fw;
6454 	device_t dev = sc->sc_dev;
6455 	int i;
6456 
6457 	if (!sc->sc_attached)
6458 		return 0;
6459 	sc->sc_attached = 0;
6460 
6461 	if (do_net80211)
6462 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6463 
6464 	callout_drain(&sc->sc_led_blink_to);
6465 	callout_drain(&sc->sc_watchdog_to);
6466 	iwm_stop_device(sc);
6467 	if (do_net80211) {
6468 		ieee80211_ifdetach(&sc->sc_ic);
6469 	}
6470 
6471 	iwm_phy_db_free(sc->sc_phy_db);
6472 	sc->sc_phy_db = NULL;
6473 
6474 	iwm_free_nvm_data(sc->nvm_data);
6475 
6476 	/* Free descriptor rings */
6477 	iwm_free_rx_ring(sc, &sc->rxq);
6478 	for (i = 0; i < nitems(sc->txq); i++)
6479 		iwm_free_tx_ring(sc, &sc->txq[i]);
6480 
6481 	/* Free firmware */
6482 	if (fw->fw_fp != NULL)
6483 		iwm_fw_info_free(fw);
6484 
6485 	/* Free scheduler */
6486 	iwm_dma_contig_free(&sc->sched_dma);
6487 	iwm_dma_contig_free(&sc->ict_dma);
6488 	iwm_dma_contig_free(&sc->kw_dma);
6489 	iwm_dma_contig_free(&sc->fw_dma);
6490 
6491 	iwm_free_fw_paging(sc);
6492 
6493 	/* Finished with the hardware - detach things */
6494 	iwm_pci_detach(dev);
6495 
6496 	if (sc->sc_notif_wait != NULL) {
6497 		iwm_notification_wait_free(sc->sc_notif_wait);
6498 		sc->sc_notif_wait = NULL;
6499 	}
6500 
6501 	mbufq_drain(&sc->sc_snd);
6502 	IWM_LOCK_DESTROY(sc);
6503 
6504 	return (0);
6505 }
6506 
6507 static int
6508 iwm_detach(device_t dev)
6509 {
6510 	struct iwm_softc *sc = device_get_softc(dev);
6511 
6512 	return (iwm_detach_local(sc, 1));
6513 }
6514 
6515 static device_method_t iwm_pci_methods[] = {
6516         /* Device interface */
6517         DEVMETHOD(device_probe,         iwm_probe),
6518         DEVMETHOD(device_attach,        iwm_attach),
6519         DEVMETHOD(device_detach,        iwm_detach),
6520         DEVMETHOD(device_suspend,       iwm_suspend),
6521         DEVMETHOD(device_resume,        iwm_resume),
6522 
6523         DEVMETHOD_END
6524 };
6525 
6526 static driver_t iwm_pci_driver = {
6527         "iwm",
6528         iwm_pci_methods,
6529         sizeof (struct iwm_softc)
6530 };
6531 
6532 static devclass_t iwm_devclass;
6533 
6534 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6535 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6536 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6537 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6538