xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 70f51f0e474ffe1fb74cb427423a2fba3637544d)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358 static void	iwm_endscan_cb(void *, int);
359 static int	iwm_send_bt_init_conf(struct iwm_softc *);
360 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int	iwm_init_hw(struct iwm_softc *);
365 static void	iwm_init(struct iwm_softc *);
366 static void	iwm_start(struct iwm_softc *);
367 static void	iwm_stop(struct iwm_softc *);
368 static void	iwm_watchdog(void *);
369 static void	iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 		iwm_desc_lookup(uint32_t);
373 static void	iwm_nic_error(struct iwm_softc *);
374 static void	iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void	iwm_notif_intr(struct iwm_softc *);
378 static void	iwm_intr(void *);
379 static int	iwm_attach(device_t);
380 static int	iwm_is_valid_ether_addr(uint8_t *);
381 static void	iwm_preinit(void *);
382 static int	iwm_detach_local(struct iwm_softc *sc, int);
383 static void	iwm_init_task(void *);
384 static void	iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 		iwm_vap_create(struct ieee80211com *,
387 		               const char [IFNAMSIZ], int,
388 		               enum ieee80211_opmode, int,
389 		               const uint8_t [IEEE80211_ADDR_LEN],
390 		               const uint8_t [IEEE80211_ADDR_LEN]);
391 static void	iwm_vap_delete(struct ieee80211vap *);
392 static void	iwm_xmit_queue_drain(struct iwm_softc *);
393 static void	iwm_scan_start(struct ieee80211com *);
394 static void	iwm_scan_end(struct ieee80211com *);
395 static void	iwm_update_mcast(struct ieee80211com *);
396 static void	iwm_set_channel(struct ieee80211com *);
397 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
399 static int	iwm_detach(device_t);
400 
401 static int	iwm_lar_disable = 0;
402 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403 
404 /*
405  * Firmware parser.
406  */
407 
408 static int
409 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410 {
411 	const struct iwm_fw_cscheme_list *l = (const void *)data;
412 
413 	if (dlen < sizeof(*l) ||
414 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 		return EINVAL;
416 
417 	/* we don't actually store anything for now, always use s/w crypto */
418 
419 	return 0;
420 }
421 
422 static int
423 iwm_firmware_store_section(struct iwm_softc *sc,
424     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425 {
426 	struct iwm_fw_img *fws;
427 	struct iwm_fw_desc *fwone;
428 
429 	if (type >= IWM_UCODE_TYPE_MAX)
430 		return EINVAL;
431 	if (dlen < sizeof(uint32_t))
432 		return EINVAL;
433 
434 	fws = &sc->sc_fw.img[type];
435 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
436 		return EINVAL;
437 
438 	fwone = &fws->sec[fws->fw_count];
439 
440 	/* first 32bit are device load offset */
441 	memcpy(&fwone->offset, data, sizeof(uint32_t));
442 
443 	/* rest is data */
444 	fwone->data = data + sizeof(uint32_t);
445 	fwone->len = dlen - sizeof(uint32_t);
446 
447 	fws->fw_count++;
448 
449 	return 0;
450 }
451 
452 #define IWM_DEFAULT_SCAN_CHANNELS 40
453 
454 /* iwlwifi: iwl-drv.c */
455 struct iwm_tlv_calib_data {
456 	uint32_t ucode_type;
457 	struct iwm_tlv_calib_ctrl calib;
458 } __packed;
459 
460 static int
461 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462 {
463 	const struct iwm_tlv_calib_data *def_calib = data;
464 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
465 
466 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
467 		device_printf(sc->sc_dev,
468 		    "Wrong ucode_type %u for default "
469 		    "calibration.\n", ucode_type);
470 		return EINVAL;
471 	}
472 
473 	sc->sc_default_calib[ucode_type].flow_trigger =
474 	    def_calib->calib.flow_trigger;
475 	sc->sc_default_calib[ucode_type].event_trigger =
476 	    def_calib->calib.event_trigger;
477 
478 	return 0;
479 }
480 
481 static int
482 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 			struct iwm_ucode_capabilities *capa)
484 {
485 	const struct iwm_ucode_api *ucode_api = (const void *)data;
486 	uint32_t api_index = le32toh(ucode_api->api_index);
487 	uint32_t api_flags = le32toh(ucode_api->api_flags);
488 	int i;
489 
490 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 		device_printf(sc->sc_dev,
492 		    "api flags index %d larger than supported by driver\n",
493 		    api_index);
494 		/* don't return an error so we can load FW that has more bits */
495 		return 0;
496 	}
497 
498 	for (i = 0; i < 32; i++) {
499 		if (api_flags & (1U << i))
500 			setbit(capa->enabled_api, i + 32 * api_index);
501 	}
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 			   struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_capa->api_index);
512 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "capa flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_capa, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static void
532 iwm_fw_info_free(struct iwm_fw_info *fw)
533 {
534 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 	fw->fw_fp = NULL;
536 	memset(fw->img, 0, sizeof(fw->img));
537 }
538 
539 static int
540 iwm_read_firmware(struct iwm_softc *sc)
541 {
542 	struct iwm_fw_info *fw = &sc->sc_fw;
543 	const struct iwm_tlv_ucode_header *uhdr;
544 	const struct iwm_ucode_tlv *tlv;
545 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 	enum iwm_ucode_tlv_type tlv_type;
547 	const struct firmware *fwp;
548 	const uint8_t *data;
549 	uint32_t tlv_len;
550 	uint32_t usniffer_img;
551 	const uint8_t *tlv_data;
552 	uint32_t paging_mem_size;
553 	int num_of_cpus;
554 	int error = 0;
555 	size_t len;
556 
557 	/*
558 	 * Load firmware into driver memory.
559 	 * fw_fp will be set.
560 	 */
561 	fwp = firmware_get(sc->cfg->fw_name);
562 	if (fwp == NULL) {
563 		device_printf(sc->sc_dev,
564 		    "could not read firmware %s (error %d)\n",
565 		    sc->cfg->fw_name, error);
566 		goto out;
567 	}
568 	fw->fw_fp = fwp;
569 
570 	/* (Re-)Initialize default values. */
571 	capa->flags = 0;
572 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
573 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
574 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577 
578 	/*
579 	 * Parse firmware contents
580 	 */
581 
582 	uhdr = (const void *)fw->fw_fp->data;
583 	if (*(const uint32_t *)fw->fw_fp->data != 0
584 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 		device_printf(sc->sc_dev, "invalid firmware %s\n",
586 		    sc->cfg->fw_name);
587 		error = EINVAL;
588 		goto out;
589 	}
590 
591 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_API(le32toh(uhdr->ver)));
595 	data = uhdr->data;
596 	len = fw->fw_fp->datasize - sizeof(*uhdr);
597 
598 	while (len >= sizeof(*tlv)) {
599 		len -= sizeof(*tlv);
600 		tlv = (const void *)data;
601 
602 		tlv_len = le32toh(tlv->length);
603 		tlv_type = le32toh(tlv->type);
604 		tlv_data = tlv->data;
605 
606 		if (len < tlv_len) {
607 			device_printf(sc->sc_dev,
608 			    "firmware too short: %zu bytes\n",
609 			    len);
610 			error = EINVAL;
611 			goto parse_out;
612 		}
613 		len -= roundup2(tlv_len, 4);
614 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
615 
616 		switch ((int)tlv_type) {
617 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
618 			if (tlv_len != sizeof(uint32_t)) {
619 				device_printf(sc->sc_dev,
620 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 				    __func__, tlv_len);
622 				error = EINVAL;
623 				goto parse_out;
624 			}
625 			capa->max_probe_length =
626 			    le32_to_cpup((const uint32_t *)tlv_data);
627 			/* limit it to something sensible */
628 			if (capa->max_probe_length >
629 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
630 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 				    "ridiculous\n", __func__);
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			break;
637 		case IWM_UCODE_TLV_PAN:
638 			if (tlv_len) {
639 				device_printf(sc->sc_dev,
640 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 				    __func__, tlv_len);
642 				error = EINVAL;
643 				goto parse_out;
644 			}
645 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
646 			break;
647 		case IWM_UCODE_TLV_FLAGS:
648 			if (tlv_len < sizeof(uint32_t)) {
649 				device_printf(sc->sc_dev,
650 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 				    __func__, tlv_len);
652 				error = EINVAL;
653 				goto parse_out;
654 			}
655 			if (tlv_len % sizeof(uint32_t)) {
656 				device_printf(sc->sc_dev,
657 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 				    __func__, tlv_len);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			/*
663 			 * Apparently there can be many flags, but Linux driver
664 			 * parses only the first one, and so do we.
665 			 *
666 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 			 * Intentional or a bug?  Observations from
668 			 * current firmware file:
669 			 *  1) TLV_PAN is parsed first
670 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
671 			 * ==> this resets TLV_PAN to itself... hnnnk
672 			 */
673 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 			break;
675 		case IWM_UCODE_TLV_CSCHEME:
676 			if ((error = iwm_store_cscheme(sc,
677 			    tlv_data, tlv_len)) != 0) {
678 				device_printf(sc->sc_dev,
679 				    "%s: iwm_store_cscheme(): returned %d\n",
680 				    __func__, error);
681 				goto parse_out;
682 			}
683 			break;
684 		case IWM_UCODE_TLV_NUM_OF_CPU:
685 			if (tlv_len != sizeof(uint32_t)) {
686 				device_printf(sc->sc_dev,
687 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 				    __func__, tlv_len);
689 				error = EINVAL;
690 				goto parse_out;
691 			}
692 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 			if (num_of_cpus == 2) {
694 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
695 					TRUE;
696 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
697 					TRUE;
698 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
699 					TRUE;
700 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 				device_printf(sc->sc_dev,
702 				    "%s: Driver supports only 1 or 2 CPUs\n",
703 				    __func__);
704 				error = EINVAL;
705 				goto parse_out;
706 			}
707 			break;
708 		case IWM_UCODE_TLV_SEC_RT:
709 			if ((error = iwm_firmware_store_section(sc,
710 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 				    __func__, error);
714 				goto parse_out;
715 			}
716 			break;
717 		case IWM_UCODE_TLV_SEC_INIT:
718 			if ((error = iwm_firmware_store_section(sc,
719 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 				device_printf(sc->sc_dev,
721 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 				    __func__, error);
723 				goto parse_out;
724 			}
725 			break;
726 		case IWM_UCODE_TLV_SEC_WOWLAN:
727 			if ((error = iwm_firmware_store_section(sc,
728 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 				device_printf(sc->sc_dev,
730 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 				    __func__, error);
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_DEF_CALIB:
736 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 				    __func__, tlv_len,
740 				    sizeof(struct iwm_tlv_calib_data));
741 				error = EINVAL;
742 				goto parse_out;
743 			}
744 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: iwm_set_default_calib() failed: %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_PHY_SKU:
752 			if (tlv_len != sizeof(uint32_t)) {
753 				error = EINVAL;
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 				    __func__, tlv_len);
757 				goto parse_out;
758 			}
759 			sc->sc_fw.phy_config =
760 			    le32_to_cpup((const uint32_t *)tlv_data);
761 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
762 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
763 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
764 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
765 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
766 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
767 			break;
768 
769 		case IWM_UCODE_TLV_API_CHANGES_SET: {
770 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 				error = EINVAL;
772 				goto parse_out;
773 			}
774 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 				error = EINVAL;
776 				goto parse_out;
777 			}
778 			break;
779 		}
780 
781 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
782 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 				error = EINVAL;
784 				goto parse_out;
785 			}
786 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 				error = EINVAL;
788 				goto parse_out;
789 			}
790 			break;
791 		}
792 
793 		case IWM_UCODE_TLV_CMD_VERSIONS:
794 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
795 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
796 			/* ignore, not used by current driver */
797 			break;
798 
799 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
800 			if ((error = iwm_firmware_store_section(sc,
801 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
802 			    tlv_len)) != 0)
803 				goto parse_out;
804 			break;
805 
806 		case IWM_UCODE_TLV_PAGING:
807 			if (tlv_len != sizeof(uint32_t)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812 
813 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 			    "%s: Paging: paging enabled (size = %u bytes)\n",
815 			    __func__, paging_mem_size);
816 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 				device_printf(sc->sc_dev,
818 					"%s: Paging: driver supports up to %u bytes for paging image\n",
819 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 				error = EINVAL;
821 				goto out;
822 			}
823 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 				device_printf(sc->sc_dev,
825 				    "%s: Paging: image isn't multiple %u\n",
826 				    __func__, IWM_FW_PAGING_SIZE);
827 				error = EINVAL;
828 				goto out;
829 			}
830 
831 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
832 			    paging_mem_size;
833 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 			sc->sc_fw.img[usniffer_img].paging_mem_size =
835 			    paging_mem_size;
836 			break;
837 
838 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
839 			if (tlv_len != sizeof(uint32_t)) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			capa->n_scan_channels =
844 			    le32_to_cpup((const uint32_t *)tlv_data);
845 			break;
846 
847 		case IWM_UCODE_TLV_FW_VERSION:
848 			if (tlv_len != sizeof(uint32_t) * 3) {
849 				error = EINVAL;
850 				goto parse_out;
851 			}
852 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 			    "%u.%u.%u",
854 			    le32toh(((const uint32_t *)tlv_data)[0]),
855 			    le32toh(((const uint32_t *)tlv_data)[1]),
856 			    le32toh(((const uint32_t *)tlv_data)[2]));
857 			break;
858 
859 		case IWM_UCODE_TLV_FW_MEM_SEG:
860 			break;
861 
862 		default:
863 			device_printf(sc->sc_dev,
864 			    "%s: unknown firmware section %d, abort\n",
865 			    __func__, tlv_type);
866 			error = EINVAL;
867 			goto parse_out;
868 		}
869 	}
870 
871 	KASSERT(error == 0, ("unhandled error"));
872 
873  parse_out:
874 	if (error) {
875 		device_printf(sc->sc_dev, "firmware parse error %d, "
876 		    "section type %d\n", error, tlv_type);
877 	}
878 
879  out:
880 	if (error) {
881 		if (fw->fw_fp != NULL)
882 			iwm_fw_info_free(fw);
883 	}
884 
885 	return error;
886 }
887 
888 /*
889  * DMA resource routines
890  */
891 
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896 	/* Must be aligned on a 16-byte boundary. */
897 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
899 }
900 
901 /* tx scheduler rings.  not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905 	/* TX scheduler rings must be aligned on a 1KB boundary. */
906 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909 
910 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916 
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924 
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928 	bus_size_t size;
929 	size_t descsz;
930 	int count, i, error;
931 
932 	ring->cur = 0;
933 	if (sc->cfg->mqrx_supported) {
934 		count = IWM_RX_MQ_RING_COUNT;
935 		descsz = sizeof(uint64_t);
936 	} else {
937 		count = IWM_RX_LEGACY_RING_COUNT;
938 		descsz = sizeof(uint32_t);
939 	}
940 
941 	/* Allocate RX descriptors (256-byte aligned). */
942 	size = count * descsz;
943 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 	    256);
945 	if (error != 0) {
946 		device_printf(sc->sc_dev,
947 		    "could not allocate RX ring DMA memory\n");
948 		goto fail;
949 	}
950 	ring->desc = ring->free_desc_dma.vaddr;
951 
952 	/* Allocate RX status area (16-byte aligned). */
953 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 	    sizeof(*ring->stat), 16);
955 	if (error != 0) {
956 		device_printf(sc->sc_dev,
957 		    "could not allocate RX status DMA memory\n");
958 		goto fail;
959 	}
960 	ring->stat = ring->stat_dma.vaddr;
961 
962 	if (sc->cfg->mqrx_supported) {
963 		size = count * sizeof(uint32_t);
964 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 		    size, 256);
966 		if (error != 0) {
967 			device_printf(sc->sc_dev,
968 			    "could not allocate RX ring DMA memory\n");
969 			goto fail;
970 		}
971 	}
972 
973         /* Create RX buffer DMA tag. */
974         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977         if (error != 0) {
978                 device_printf(sc->sc_dev,
979                     "%s: could not create RX buf DMA tag, error %d\n",
980                     __func__, error);
981                 goto fail;
982         }
983 
984 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 	if (error != 0) {
987 		device_printf(sc->sc_dev,
988 		    "%s: could not create RX buf DMA map, error %d\n",
989 		    __func__, error);
990 		goto fail;
991 	}
992 
993 	/*
994 	 * Allocate and map RX buffers.
995 	 */
996 	for (i = 0; i < count; i++) {
997 		struct iwm_rx_data *data = &ring->data[i];
998 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 		if (error != 0) {
1000 			device_printf(sc->sc_dev,
1001 			    "%s: could not create RX buf DMA map, error %d\n",
1002 			    __func__, error);
1003 			goto fail;
1004 		}
1005 		data->m = NULL;
1006 
1007 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 			goto fail;
1009 		}
1010 	}
1011 	return 0;
1012 
1013 fail:	iwm_free_rx_ring(sc, ring);
1014 	return error;
1015 }
1016 
1017 static void
1018 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019 {
1020 	/* Reset the ring state */
1021 	ring->cur = 0;
1022 
1023 	/*
1024 	 * The hw rx ring index in shared memory must also be cleared,
1025 	 * otherwise the discrepancy can cause reprocessing chaos.
1026 	 */
1027 	if (sc->rxq.stat)
1028 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029 }
1030 
1031 static void
1032 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033 {
1034 	int count, i;
1035 
1036 	iwm_dma_contig_free(&ring->free_desc_dma);
1037 	iwm_dma_contig_free(&ring->stat_dma);
1038 	iwm_dma_contig_free(&ring->used_desc_dma);
1039 
1040 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1041 	    IWM_RX_LEGACY_RING_COUNT;
1042 
1043 	for (i = 0; i < count; i++) {
1044 		struct iwm_rx_data *data = &ring->data[i];
1045 
1046 		if (data->m != NULL) {
1047 			bus_dmamap_sync(ring->data_dmat, data->map,
1048 			    BUS_DMASYNC_POSTREAD);
1049 			bus_dmamap_unload(ring->data_dmat, data->map);
1050 			m_freem(data->m);
1051 			data->m = NULL;
1052 		}
1053 		if (data->map != NULL) {
1054 			bus_dmamap_destroy(ring->data_dmat, data->map);
1055 			data->map = NULL;
1056 		}
1057 	}
1058 	if (ring->spare_map != NULL) {
1059 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 		ring->spare_map = NULL;
1061 	}
1062 	if (ring->data_dmat != NULL) {
1063 		bus_dma_tag_destroy(ring->data_dmat);
1064 		ring->data_dmat = NULL;
1065 	}
1066 }
1067 
1068 static int
1069 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070 {
1071 	bus_addr_t paddr;
1072 	bus_size_t size;
1073 	size_t maxsize;
1074 	int nsegments;
1075 	int i, error;
1076 
1077 	ring->qid = qid;
1078 	ring->queued = 0;
1079 	ring->cur = 0;
1080 
1081 	/* Allocate TX descriptors (256-byte aligned). */
1082 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 	if (error != 0) {
1085 		device_printf(sc->sc_dev,
1086 		    "could not allocate TX ring DMA memory\n");
1087 		goto fail;
1088 	}
1089 	ring->desc = ring->desc_dma.vaddr;
1090 
1091 	/*
1092 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 	 * to allocate commands space for other rings.
1094 	 */
1095 	if (qid > IWM_CMD_QUEUE)
1096 		return 0;
1097 
1098 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 	if (error != 0) {
1101 		device_printf(sc->sc_dev,
1102 		    "could not allocate TX cmd DMA memory\n");
1103 		goto fail;
1104 	}
1105 	ring->cmd = ring->cmd_dma.vaddr;
1106 
1107 	/* FW commands may require more mapped space than packets. */
1108 	if (qid == IWM_CMD_QUEUE) {
1109 		maxsize = IWM_RBUF_SIZE;
1110 		nsegments = 1;
1111 	} else {
1112 		maxsize = MCLBYTES;
1113 		nsegments = IWM_MAX_SCATTER - 2;
1114 	}
1115 
1116 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 	if (error != 0) {
1120 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 		goto fail;
1122 	}
1123 
1124 	paddr = ring->cmd_dma.paddr;
1125 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 		struct iwm_tx_data *data = &ring->data[i];
1127 
1128 		data->cmd_paddr = paddr;
1129 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 		    + offsetof(struct iwm_tx_cmd, scratch);
1131 		paddr += sizeof(struct iwm_device_cmd);
1132 
1133 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 		if (error != 0) {
1135 			device_printf(sc->sc_dev,
1136 			    "could not create TX buf DMA map\n");
1137 			goto fail;
1138 		}
1139 	}
1140 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 	    ("invalid physical address"));
1142 	return 0;
1143 
1144 fail:	iwm_free_tx_ring(sc, ring);
1145 	return error;
1146 }
1147 
1148 static void
1149 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150 {
1151 	int i;
1152 
1153 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 		struct iwm_tx_data *data = &ring->data[i];
1155 
1156 		if (data->m != NULL) {
1157 			bus_dmamap_sync(ring->data_dmat, data->map,
1158 			    BUS_DMASYNC_POSTWRITE);
1159 			bus_dmamap_unload(ring->data_dmat, data->map);
1160 			m_freem(data->m);
1161 			data->m = NULL;
1162 		}
1163 	}
1164 	/* Clear TX descriptors. */
1165 	memset(ring->desc, 0, ring->desc_dma.size);
1166 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 	    BUS_DMASYNC_PREWRITE);
1168 	sc->qfullmsk &= ~(1 << ring->qid);
1169 	ring->queued = 0;
1170 	ring->cur = 0;
1171 
1172 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1173 		iwm_pcie_clear_cmd_in_flight(sc);
1174 }
1175 
1176 static void
1177 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179 	int i;
1180 
1181 	iwm_dma_contig_free(&ring->desc_dma);
1182 	iwm_dma_contig_free(&ring->cmd_dma);
1183 
1184 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 		struct iwm_tx_data *data = &ring->data[i];
1186 
1187 		if (data->m != NULL) {
1188 			bus_dmamap_sync(ring->data_dmat, data->map,
1189 			    BUS_DMASYNC_POSTWRITE);
1190 			bus_dmamap_unload(ring->data_dmat, data->map);
1191 			m_freem(data->m);
1192 			data->m = NULL;
1193 		}
1194 		if (data->map != NULL) {
1195 			bus_dmamap_destroy(ring->data_dmat, data->map);
1196 			data->map = NULL;
1197 		}
1198 	}
1199 	if (ring->data_dmat != NULL) {
1200 		bus_dma_tag_destroy(ring->data_dmat);
1201 		ring->data_dmat = NULL;
1202 	}
1203 }
1204 
1205 /*
1206  * High-level hardware frobbing routines
1207  */
1208 
1209 static void
1210 iwm_enable_interrupts(struct iwm_softc *sc)
1211 {
1212 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1213 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1214 }
1215 
1216 static void
1217 iwm_restore_interrupts(struct iwm_softc *sc)
1218 {
1219 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221 
1222 static void
1223 iwm_disable_interrupts(struct iwm_softc *sc)
1224 {
1225 	/* disable interrupts */
1226 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1227 
1228 	/* acknowledge all interrupts */
1229 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1230 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1231 }
1232 
1233 static void
1234 iwm_ict_reset(struct iwm_softc *sc)
1235 {
1236 	iwm_disable_interrupts(sc);
1237 
1238 	/* Reset ICT table. */
1239 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 	sc->ict_cur = 0;
1241 
1242 	/* Set physical address of ICT table (4KB aligned). */
1243 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1244 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1245 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1247 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1248 
1249 	/* Switch to ICT interrupt mode in driver. */
1250 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1251 
1252 	/* Re-enable interrupts. */
1253 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1254 	iwm_enable_interrupts(sc);
1255 }
1256 
1257 /* iwlwifi pcie/trans.c */
1258 
1259 /*
1260  * Since this .. hard-resets things, it's time to actually
1261  * mark the first vap (if any) as having no mac context.
1262  * It's annoying, but since the driver is potentially being
1263  * stop/start'ed whilst active (thanks openbsd port!) we
1264  * have to correctly track this.
1265  */
1266 static void
1267 iwm_stop_device(struct iwm_softc *sc)
1268 {
1269 	struct ieee80211com *ic = &sc->sc_ic;
1270 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 	int chnl, qid;
1272 	uint32_t mask = 0;
1273 
1274 	/* tell the device to stop sending interrupts */
1275 	iwm_disable_interrupts(sc);
1276 
1277 	/*
1278 	 * FreeBSD-local: mark the first vap as not-uploaded,
1279 	 * so the next transition through auth/assoc
1280 	 * will correctly populate the MAC context.
1281 	 */
1282 	if (vap) {
1283 		struct iwm_vap *iv = IWM_VAP(vap);
1284 		iv->phy_ctxt = NULL;
1285 		iv->is_uploaded = 0;
1286 	}
1287 	sc->sc_firmware_state = 0;
1288 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289 
1290 	/* device going down, Stop using ICT table */
1291 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292 
1293 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1294 
1295 	if (iwm_nic_lock(sc)) {
1296 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1297 
1298 		/* Stop each Tx DMA channel */
1299 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 			IWM_WRITE(sc,
1301 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1302 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1303 		}
1304 
1305 		/* Wait for DMA channels to be idle */
1306 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 		    5000)) {
1308 			device_printf(sc->sc_dev,
1309 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1310 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1311 		}
1312 		iwm_nic_unlock(sc);
1313 	}
1314 	iwm_pcie_rx_stop(sc);
1315 
1316 	/* Stop RX ring. */
1317 	iwm_reset_rx_ring(sc, &sc->rxq);
1318 
1319 	/* Reset all TX rings. */
1320 	for (qid = 0; qid < nitems(sc->txq); qid++)
1321 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322 
1323 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1324 		/* Power-down device's busmaster DMA clocks */
1325 		if (iwm_nic_lock(sc)) {
1326 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1327 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1328 			iwm_nic_unlock(sc);
1329 		}
1330 		DELAY(5);
1331 	}
1332 
1333 	/* Make sure (redundant) we've released our request to stay awake */
1334 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336 
1337 	/* Stop the device, and put it in low power state */
1338 	iwm_apm_stop(sc);
1339 
1340 	/* stop and reset the on-board processor */
1341 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1342 	DELAY(5000);
1343 
1344 	/*
1345 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 	 */
1347 	iwm_disable_interrupts(sc);
1348 
1349 	/*
1350 	 * Even if we stop the HW, we still want the RF kill
1351 	 * interrupt
1352 	 */
1353 	iwm_enable_rfkill_int(sc);
1354 	iwm_check_rfkill(sc);
1355 
1356 	iwm_prepare_card_hw(sc);
1357 }
1358 
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_nic_config(struct iwm_softc *sc)
1362 {
1363 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 	uint32_t reg_val = 0;
1365 	uint32_t phy_config = iwm_get_phy_config(sc);
1366 
1367 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1368 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1369 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1370 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1371 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1372 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1373 
1374 	/* SKU control */
1375 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1377 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1378 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1379 
1380 	/* radio configuration */
1381 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384 
1385 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1386 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1393 	    reg_val);
1394 
1395 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 	    radio_cfg_step, radio_cfg_dash);
1398 
1399 	/*
1400 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 	 * to lose ownership and not being able to obtain it back.
1403 	 */
1404 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1405 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1406 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1407 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1408 	}
1409 }
1410 
1411 static int
1412 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1413 {
1414 	int enabled;
1415 
1416 	if (!iwm_nic_lock(sc))
1417 		return EBUSY;
1418 
1419 	/* Stop RX DMA. */
1420 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1421 	/* Disable RX used and free queue operation. */
1422 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1423 
1424 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1425 	    sc->rxq.free_desc_dma.paddr);
1426 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1427 	    sc->rxq.used_desc_dma.paddr);
1428 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1429 	    sc->rxq.stat_dma.paddr);
1430 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1433 
1434 	/* We configure only queue 0 for now. */
1435 	enabled = ((1 << 0) << 16) | (1 << 0);
1436 
1437 	/* Enable RX DMA, 4KB buffer size. */
1438 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1439 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1440 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1441 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1442 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1443 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1444 
1445 	/* Enable RX DMA snooping. */
1446 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1447 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1448 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1449 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1450 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1451 
1452 	/* Enable the configured queue(s). */
1453 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1458 
1459 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1466 {
1467 
1468 	/* Stop Rx DMA */
1469 	iwm_pcie_rx_stop(sc);
1470 
1471 	if (!iwm_nic_lock(sc))
1472 		return EBUSY;
1473 
1474 	/* reset and flush pointers */
1475 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1477 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1479 
1480 	/* Set physical address of RX ring (256-byte aligned). */
1481 	IWM_WRITE(sc,
1482 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1483 	    sc->rxq.free_desc_dma.paddr >> 8);
1484 
1485 	/* Set physical address of RX status (16-byte aligned). */
1486 	IWM_WRITE(sc,
1487 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1488 
1489 	/* Enable Rx DMA
1490 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 	 *      the credit mechanism in 5000 HW RX FIFO
1493 	 * Direct rx interrupts to hosts
1494 	 * Rx buffer size 4 or 8k or 12k
1495 	 * RB timeout 0x10
1496 	 * 256 RBDs
1497 	 */
1498 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1500 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1501 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1502 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1503 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505 
1506 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507 
1508 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 	if (sc->cfg->host_interrupt_operation_mode)
1510 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1511 
1512 	iwm_nic_unlock(sc);
1513 
1514 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1515 
1516 	return 0;
1517 }
1518 
1519 static int
1520 iwm_nic_rx_init(struct iwm_softc *sc)
1521 {
1522 	if (sc->cfg->mqrx_supported)
1523 		return iwm_nic_rx_mq_init(sc);
1524 	else
1525 		return iwm_nic_rx_legacy_init(sc);
1526 }
1527 
1528 static int
1529 iwm_nic_tx_init(struct iwm_softc *sc)
1530 {
1531 	int qid;
1532 
1533 	if (!iwm_nic_lock(sc))
1534 		return EBUSY;
1535 
1536 	/* Deactivate TX scheduler. */
1537 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1538 
1539 	/* Set physical address of "keep warm" page (16-byte aligned). */
1540 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1541 
1542 	/* Initialize TX rings. */
1543 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 		struct iwm_tx_ring *txq = &sc->txq[qid];
1545 
1546 		/* Set physical address of TX ring (256-byte aligned). */
1547 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1548 		    txq->desc_dma.paddr >> 8);
1549 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1551 		    __func__,
1552 		    qid, txq->desc,
1553 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1554 	}
1555 
1556 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1557 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1558 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1559 
1560 	iwm_nic_unlock(sc);
1561 
1562 	return 0;
1563 }
1564 
1565 static int
1566 iwm_nic_init(struct iwm_softc *sc)
1567 {
1568 	int error;
1569 
1570 	iwm_apm_init(sc);
1571 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1572 		iwm_set_pwr(sc);
1573 
1574 	iwm_nic_config(sc);
1575 
1576 	if ((error = iwm_nic_rx_init(sc)) != 0)
1577 		return error;
1578 
1579 	/*
1580 	 * Ditto for TX, from iwn
1581 	 */
1582 	if ((error = iwm_nic_tx_init(sc)) != 0)
1583 		return error;
1584 
1585 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 	    "%s: shadow registers enabled\n", __func__);
1587 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1588 
1589 	return 0;
1590 }
1591 
1592 int
1593 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594 {
1595 	int qmsk;
1596 
1597 	qmsk = 1 << qid;
1598 
1599 	if (!iwm_nic_lock(sc)) {
1600 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 		    __func__, qid);
1602 		return EBUSY;
1603 	}
1604 
1605 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606 
1607 	if (qid == IWM_CMD_QUEUE) {
1608 		/* Disable the scheduler. */
1609 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1610 
1611 		/* Stop the TX queue prior to configuration. */
1612 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1613 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1614 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1615 
1616 		iwm_nic_unlock(sc);
1617 
1618 		/* Disable aggregations for this queue. */
1619 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1620 
1621 		if (!iwm_nic_lock(sc)) {
1622 			device_printf(sc->sc_dev,
1623 			    "%s: cannot enable txq %d\n", __func__, qid);
1624 			return EBUSY;
1625 		}
1626 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1627 		iwm_nic_unlock(sc);
1628 
1629 		iwm_write_mem32(sc,
1630 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 		/* Set scheduler window size and frame limit. */
1632 		iwm_write_mem32(sc,
1633 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634 		    sizeof(uint32_t),
1635 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639 
1640 		if (!iwm_nic_lock(sc)) {
1641 			device_printf(sc->sc_dev,
1642 			    "%s: cannot enable txq %d\n", __func__, qid);
1643 			return EBUSY;
1644 		}
1645 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1646 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1647 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1648 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1649 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1650 
1651 		/* Enable the scheduler for this queue. */
1652 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1653 	} else {
1654 		struct iwm_scd_txq_cfg_cmd cmd;
1655 		int error;
1656 
1657 		iwm_nic_unlock(sc);
1658 
1659 		memset(&cmd, 0, sizeof(cmd));
1660 		cmd.scd_queue = qid;
1661 		cmd.enable = 1;
1662 		cmd.sta_id = sta_id;
1663 		cmd.tx_fifo = fifo;
1664 		cmd.aggregate = 0;
1665 		cmd.window = IWM_FRAME_LIMIT;
1666 
1667 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1668 		    sizeof(cmd), &cmd);
1669 		if (error) {
1670 			device_printf(sc->sc_dev,
1671 			    "cannot enable txq %d\n", qid);
1672 			return error;
1673 		}
1674 
1675 		if (!iwm_nic_lock(sc))
1676 			return EBUSY;
1677 	}
1678 
1679 	iwm_nic_unlock(sc);
1680 
1681 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 	    __func__, qid, fifo);
1683 
1684 	return 0;
1685 }
1686 
1687 static int
1688 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689 {
1690 	int error, chnl;
1691 
1692 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694 
1695 	if (!iwm_nic_lock(sc))
1696 		return EBUSY;
1697 
1698 	iwm_ict_reset(sc);
1699 
1700 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1701 	if (scd_base_addr != 0 &&
1702 	    scd_base_addr != sc->scd_base_addr) {
1703 		device_printf(sc->sc_dev,
1704 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 		    __func__, sc->scd_base_addr, scd_base_addr);
1706 	}
1707 
1708 	iwm_nic_unlock(sc);
1709 
1710 	/* reset context data, TX status and translation data */
1711 	error = iwm_write_mem(sc,
1712 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1713 	    NULL, clear_dwords);
1714 	if (error)
1715 		return EBUSY;
1716 
1717 	if (!iwm_nic_lock(sc))
1718 		return EBUSY;
1719 
1720 	/* Set physical address of TX scheduler rings (1KB aligned). */
1721 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1722 
1723 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1724 
1725 	iwm_nic_unlock(sc);
1726 
1727 	/* enable command channel */
1728 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 	if (error)
1730 		return error;
1731 
1732 	if (!iwm_nic_lock(sc))
1733 		return EBUSY;
1734 
1735 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736 
1737 	/* Enable DMA channels. */
1738 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1739 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1740 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1742 	}
1743 
1744 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1745 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1746 
1747 	iwm_nic_unlock(sc);
1748 
1749 	/* Enable L1-Active */
1750 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1751 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1752 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1753 	}
1754 
1755 	return error;
1756 }
1757 
1758 /*
1759  * NVM read access and content parsing.  We do not support
1760  * external NVM or writing NVM.
1761  * iwlwifi/mvm/nvm.c
1762  */
1763 
1764 /* Default NVM size to read */
1765 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1766 
1767 #define IWM_NVM_WRITE_OPCODE 1
1768 #define IWM_NVM_READ_OPCODE 0
1769 
1770 /* load nvm chunk response */
1771 enum {
1772 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1773 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1774 };
1775 
1776 static int
1777 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779 {
1780 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 		.offset = htole16(offset),
1782 		.length = htole16(length),
1783 		.type = htole16(section),
1784 		.op_code = IWM_NVM_READ_OPCODE,
1785 	};
1786 	struct iwm_nvm_access_resp *nvm_resp;
1787 	struct iwm_rx_packet *pkt;
1788 	struct iwm_host_cmd cmd = {
1789 		.id = IWM_NVM_ACCESS_CMD,
1790 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1791 		.data = { &nvm_access_cmd, },
1792 	};
1793 	int ret, bytes_read, offset_read;
1794 	uint8_t *resp_data;
1795 
1796 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797 
1798 	ret = iwm_send_cmd(sc, &cmd);
1799 	if (ret) {
1800 		device_printf(sc->sc_dev,
1801 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	pkt = cmd.resp_pkt;
1806 
1807 	/* Extract NVM response */
1808 	nvm_resp = (void *)pkt->data;
1809 	ret = le16toh(nvm_resp->status);
1810 	bytes_read = le16toh(nvm_resp->length);
1811 	offset_read = le16toh(nvm_resp->offset);
1812 	resp_data = nvm_resp->data;
1813 	if (ret) {
1814 		if ((offset != 0) &&
1815 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1816 			/*
1817 			 * meaning of NOT_VALID_ADDRESS:
1818 			 * driver try to read chunk from address that is
1819 			 * multiple of 2K and got an error since addr is empty.
1820 			 * meaning of (offset != 0): driver already
1821 			 * read valid data from another chunk so this case
1822 			 * is not an error.
1823 			 */
1824 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 				    offset);
1827 			*len = 0;
1828 			ret = 0;
1829 		} else {
1830 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 				    "NVM access command failed with status %d\n", ret);
1832 			ret = EIO;
1833 		}
1834 		goto exit;
1835 	}
1836 
1837 	if (offset_read != offset) {
1838 		device_printf(sc->sc_dev,
1839 		    "NVM ACCESS response with invalid offset %d\n",
1840 		    offset_read);
1841 		ret = EINVAL;
1842 		goto exit;
1843 	}
1844 
1845 	if (bytes_read > length) {
1846 		device_printf(sc->sc_dev,
1847 		    "NVM ACCESS response with too much data "
1848 		    "(%d bytes requested, %d bytes received)\n",
1849 		    length, bytes_read);
1850 		ret = EINVAL;
1851 		goto exit;
1852 	}
1853 
1854 	/* Write data to NVM */
1855 	memcpy(data + offset, resp_data, bytes_read);
1856 	*len = bytes_read;
1857 
1858  exit:
1859 	iwm_free_resp(sc, &cmd);
1860 	return ret;
1861 }
1862 
1863 /*
1864  * Reads an NVM section completely.
1865  * NICs prior to 7000 family don't have a real NVM, but just read
1866  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867  * by uCode, we need to manually check in this case that we don't
1868  * overflow and try to read more than the EEPROM size.
1869  * For 7000 family NICs, we supply the maximal size we can read, and
1870  * the uCode fills the response with as much data as we can,
1871  * without overflowing, so no check is needed.
1872  */
1873 static int
1874 iwm_nvm_read_section(struct iwm_softc *sc,
1875 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876 {
1877 	uint16_t seglen, length, offset = 0;
1878 	int ret;
1879 
1880 	/* Set nvm section read length */
1881 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1882 
1883 	seglen = length;
1884 
1885 	/* Read the NVM until exhausted (reading less than requested) */
1886 	while (seglen == length) {
1887 		/* Check no memory assumptions fail and cause an overflow */
1888 		if ((size_read + offset + length) >
1889 		    sc->cfg->eeprom_size) {
1890 			device_printf(sc->sc_dev,
1891 			    "EEPROM size is too small for NVM\n");
1892 			return ENOBUFS;
1893 		}
1894 
1895 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 		if (ret) {
1897 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 				    "Cannot read NVM from section %d offset %d, length %d\n",
1899 				    section, offset, length);
1900 			return ret;
1901 		}
1902 		offset += seglen;
1903 	}
1904 
1905 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 		    "NVM section %d read completed\n", section);
1907 	*len = offset;
1908 	return 0;
1909 }
1910 
1911 /*
1912  * BEGIN IWM_NVM_PARSE
1913  */
1914 
1915 /* iwlwifi/iwl-nvm-parse.c */
1916 
1917 /* NVM offsets (in words) definitions */
1918 enum iwm_nvm_offsets {
1919 	/* NVM HW-Section offset (in words) definitions */
1920 	IWM_HW_ADDR = 0x15,
1921 
1922 /* NVM SW-Section offset (in words) definitions */
1923 	IWM_NVM_SW_SECTION = 0x1C0,
1924 	IWM_NVM_VERSION = 0,
1925 	IWM_RADIO_CFG = 1,
1926 	IWM_SKU = 2,
1927 	IWM_N_HW_ADDRS = 3,
1928 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1929 
1930 /* NVM calibration section offset (in words) definitions */
1931 	IWM_NVM_CALIB_SECTION = 0x2B8,
1932 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1933 };
1934 
1935 enum iwm_8000_nvm_offsets {
1936 	/* NVM HW-Section offset (in words) definitions */
1937 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1938 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1939 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1940 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1941 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1942 
1943 	/* NVM SW-Section offset (in words) definitions */
1944 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1945 	IWM_NVM_VERSION_8000 = 0,
1946 	IWM_RADIO_CFG_8000 = 0,
1947 	IWM_SKU_8000 = 2,
1948 	IWM_N_HW_ADDRS_8000 = 3,
1949 
1950 	/* NVM REGULATORY -Section offset (in words) definitions */
1951 	IWM_NVM_CHANNELS_8000 = 0,
1952 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1953 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1954 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1955 
1956 	/* NVM calibration section offset (in words) definitions */
1957 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1958 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1959 };
1960 
1961 /* SKU Capabilities (actual values from NVM definition) */
1962 enum nvm_sku_bits {
1963 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1964 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1965 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1966 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1967 };
1968 
1969 /* radio config bits (actual values from NVM definition) */
1970 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1971 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1972 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1973 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1974 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1975 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1976 
1977 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1978 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1979 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1980 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1981 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1982 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1983 
1984 /**
1985  * enum iwm_nvm_channel_flags - channel flags in NVM
1986  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1987  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1988  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1989  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1990  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1991  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1992  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1993  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1994  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1995  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1996  */
1997 enum iwm_nvm_channel_flags {
1998 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1999 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2000 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2001 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2002 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2003 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2004 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2005 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2006 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2007 };
2008 
2009 /*
2010  * Translate EEPROM flags to net80211.
2011  */
2012 static uint32_t
2013 iwm_eeprom_channel_flags(uint16_t ch_flags)
2014 {
2015 	uint32_t nflags;
2016 
2017 	nflags = 0;
2018 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2019 		nflags |= IEEE80211_CHAN_PASSIVE;
2020 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2021 		nflags |= IEEE80211_CHAN_NOADHOC;
2022 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2023 		nflags |= IEEE80211_CHAN_DFS;
2024 		/* Just in case. */
2025 		nflags |= IEEE80211_CHAN_NOADHOC;
2026 	}
2027 
2028 	return (nflags);
2029 }
2030 
2031 static void
2032 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2033     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2034     const uint8_t bands[])
2035 {
2036 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2037 	uint32_t nflags;
2038 	uint16_t ch_flags;
2039 	uint8_t ieee;
2040 	int error;
2041 
2042 	for (; ch_idx < ch_num; ch_idx++) {
2043 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2044 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2045 			ieee = iwm_nvm_channels[ch_idx];
2046 		else
2047 			ieee = iwm_nvm_channels_8000[ch_idx];
2048 
2049 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2050 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2051 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2052 			    ieee, ch_flags,
2053 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2054 			    "5.2" : "2.4");
2055 			continue;
2056 		}
2057 
2058 		nflags = iwm_eeprom_channel_flags(ch_flags);
2059 		error = ieee80211_add_channel(chans, maxchans, nchans,
2060 		    ieee, 0, 0, nflags, bands);
2061 		if (error != 0)
2062 			break;
2063 
2064 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2065 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2066 		    ieee, ch_flags,
2067 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2068 		    "5.2" : "2.4");
2069 	}
2070 }
2071 
2072 static void
2073 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2074     struct ieee80211_channel chans[])
2075 {
2076 	struct iwm_softc *sc = ic->ic_softc;
2077 	struct iwm_nvm_data *data = sc->nvm_data;
2078 	uint8_t bands[IEEE80211_MODE_BYTES];
2079 	size_t ch_num;
2080 
2081 	memset(bands, 0, sizeof(bands));
2082 	/* 1-13: 11b/g channels. */
2083 	setbit(bands, IEEE80211_MODE_11B);
2084 	setbit(bands, IEEE80211_MODE_11G);
2085 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2086 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2087 
2088 	/* 14: 11b channel only. */
2089 	clrbit(bands, IEEE80211_MODE_11G);
2090 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2091 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2092 
2093 	if (data->sku_cap_band_52GHz_enable) {
2094 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2095 			ch_num = nitems(iwm_nvm_channels);
2096 		else
2097 			ch_num = nitems(iwm_nvm_channels_8000);
2098 		memset(bands, 0, sizeof(bands));
2099 		setbit(bands, IEEE80211_MODE_11A);
2100 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2101 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2102 	}
2103 }
2104 
2105 static void
2106 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2107 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2108 {
2109 	const uint8_t *hw_addr;
2110 
2111 	if (mac_override) {
2112 		static const uint8_t reserved_mac[] = {
2113 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2114 		};
2115 
2116 		hw_addr = (const uint8_t *)(mac_override +
2117 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2118 
2119 		/*
2120 		 * Store the MAC address from MAO section.
2121 		 * No byte swapping is required in MAO section
2122 		 */
2123 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2124 
2125 		/*
2126 		 * Force the use of the OTP MAC address in case of reserved MAC
2127 		 * address in the NVM, or if address is given but invalid.
2128 		 */
2129 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2130 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2131 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2132 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2133 			return;
2134 
2135 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2136 		    "%s: mac address from nvm override section invalid\n",
2137 		    __func__);
2138 	}
2139 
2140 	if (nvm_hw) {
2141 		/* read the mac address from WFMP registers */
2142 		uint32_t mac_addr0 =
2143 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2144 		uint32_t mac_addr1 =
2145 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2146 
2147 		hw_addr = (const uint8_t *)&mac_addr0;
2148 		data->hw_addr[0] = hw_addr[3];
2149 		data->hw_addr[1] = hw_addr[2];
2150 		data->hw_addr[2] = hw_addr[1];
2151 		data->hw_addr[3] = hw_addr[0];
2152 
2153 		hw_addr = (const uint8_t *)&mac_addr1;
2154 		data->hw_addr[4] = hw_addr[1];
2155 		data->hw_addr[5] = hw_addr[0];
2156 
2157 		return;
2158 	}
2159 
2160 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2161 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2162 }
2163 
2164 static int
2165 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2166 	    const uint16_t *phy_sku)
2167 {
2168 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2169 		return le16_to_cpup(nvm_sw + IWM_SKU);
2170 
2171 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2172 }
2173 
2174 static int
2175 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2176 {
2177 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2178 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2179 	else
2180 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2181 						IWM_NVM_VERSION_8000));
2182 }
2183 
2184 static int
2185 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2186 		  const uint16_t *phy_sku)
2187 {
2188         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2189                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2190 
2191         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2192 }
2193 
2194 static int
2195 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2196 {
2197 	int n_hw_addr;
2198 
2199 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2200 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2201 
2202 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2203 
2204         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2205 }
2206 
2207 static void
2208 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2209 		  uint32_t radio_cfg)
2210 {
2211 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2212 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2213 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2214 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2215 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2216 		return;
2217 	}
2218 
2219 	/* set the radio configuration for family 8000 */
2220 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2221 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2222 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2223 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2224 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2225 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2226 }
2227 
2228 static int
2229 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2230 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2231 {
2232 #ifdef notyet /* for FAMILY 9000 */
2233 	if (cfg->mac_addr_from_csr) {
2234 		iwm_set_hw_address_from_csr(sc, data);
2235         } else
2236 #endif
2237 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2238 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2239 
2240 		/* The byte order is little endian 16 bit, meaning 214365 */
2241 		data->hw_addr[0] = hw_addr[1];
2242 		data->hw_addr[1] = hw_addr[0];
2243 		data->hw_addr[2] = hw_addr[3];
2244 		data->hw_addr[3] = hw_addr[2];
2245 		data->hw_addr[4] = hw_addr[5];
2246 		data->hw_addr[5] = hw_addr[4];
2247 	} else {
2248 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2249 	}
2250 
2251 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2252 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2253 		return EINVAL;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 static struct iwm_nvm_data *
2260 iwm_parse_nvm_data(struct iwm_softc *sc,
2261 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2262 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2263 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2264 {
2265 	struct iwm_nvm_data *data;
2266 	uint32_t sku, radio_cfg;
2267 	uint16_t lar_config;
2268 
2269 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2270 		data = malloc(sizeof(*data) +
2271 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2272 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2273 	} else {
2274 		data = malloc(sizeof(*data) +
2275 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2276 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2277 	}
2278 	if (!data)
2279 		return NULL;
2280 
2281 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2282 
2283 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2284 	iwm_set_radio_cfg(sc, data, radio_cfg);
2285 
2286 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2287 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2288 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2289 	data->sku_cap_11n_enable = 0;
2290 
2291 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2292 
2293 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2294 		/* TODO: use IWL_NVM_EXT */
2295 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2296 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2297 				       IWM_NVM_LAR_OFFSET_8000;
2298 
2299 		lar_config = le16_to_cpup(regulatory + lar_offset);
2300 		data->lar_enabled = !!(lar_config &
2301 				       IWM_NVM_LAR_ENABLED_8000);
2302 	}
2303 
2304 	/* If no valid mac address was found - bail out */
2305 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2306 		free(data, M_DEVBUF);
2307 		return NULL;
2308 	}
2309 
2310 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2311 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2312 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2313 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2314 	} else {
2315 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2316 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2317 	}
2318 
2319 	return data;
2320 }
2321 
2322 static void
2323 iwm_free_nvm_data(struct iwm_nvm_data *data)
2324 {
2325 	if (data != NULL)
2326 		free(data, M_DEVBUF);
2327 }
2328 
2329 static struct iwm_nvm_data *
2330 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2331 {
2332 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2333 
2334 	/* Checking for required sections */
2335 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2336 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2337 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2338 			device_printf(sc->sc_dev,
2339 			    "Can't parse empty OTP/NVM sections\n");
2340 			return NULL;
2341 		}
2342 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2343 		/* SW and REGULATORY sections are mandatory */
2344 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2345 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2346 			device_printf(sc->sc_dev,
2347 			    "Can't parse empty OTP/NVM sections\n");
2348 			return NULL;
2349 		}
2350 		/* MAC_OVERRIDE or at least HW section must exist */
2351 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2352 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2353 			device_printf(sc->sc_dev,
2354 			    "Can't parse mac_address, empty sections\n");
2355 			return NULL;
2356 		}
2357 
2358 		/* PHY_SKU section is mandatory in B0 */
2359 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2360 			device_printf(sc->sc_dev,
2361 			    "Can't parse phy_sku in B0, empty sections\n");
2362 			return NULL;
2363 		}
2364 	} else {
2365 		panic("unknown device family %d\n", sc->cfg->device_family);
2366 	}
2367 
2368 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2369 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2370 	calib = (const uint16_t *)
2371 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2372 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2373 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2374 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2375 	mac_override = (const uint16_t *)
2376 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2377 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2378 
2379 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2380 	    phy_sku, regulatory);
2381 }
2382 
2383 static int
2384 iwm_nvm_init(struct iwm_softc *sc)
2385 {
2386 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2387 	int i, ret, section;
2388 	uint32_t size_read = 0;
2389 	uint8_t *nvm_buffer, *temp;
2390 	uint16_t len;
2391 
2392 	memset(nvm_sections, 0, sizeof(nvm_sections));
2393 
2394 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2395 		return EINVAL;
2396 
2397 	/* load NVM values from nic */
2398 	/* Read From FW NVM */
2399 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2400 
2401 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2402 	if (!nvm_buffer)
2403 		return ENOMEM;
2404 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2405 		/* we override the constness for initial read */
2406 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2407 					   &len, size_read);
2408 		if (ret)
2409 			continue;
2410 		size_read += len;
2411 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2412 		if (!temp) {
2413 			ret = ENOMEM;
2414 			break;
2415 		}
2416 		memcpy(temp, nvm_buffer, len);
2417 
2418 		nvm_sections[section].data = temp;
2419 		nvm_sections[section].length = len;
2420 	}
2421 	if (!size_read)
2422 		device_printf(sc->sc_dev, "OTP is blank\n");
2423 	free(nvm_buffer, M_DEVBUF);
2424 
2425 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2426 	if (!sc->nvm_data)
2427 		return EINVAL;
2428 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2429 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2430 
2431 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2432 		if (nvm_sections[i].data != NULL)
2433 			free(nvm_sections[i].data, M_DEVBUF);
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static int
2440 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2441 	const struct iwm_fw_desc *section)
2442 {
2443 	struct iwm_dma_info *dma = &sc->fw_dma;
2444 	uint8_t *v_addr;
2445 	bus_addr_t p_addr;
2446 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2447 	int ret = 0;
2448 
2449 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2450 		    "%s: [%d] uCode section being loaded...\n",
2451 		    __func__, section_num);
2452 
2453 	v_addr = dma->vaddr;
2454 	p_addr = dma->paddr;
2455 
2456 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2457 		uint32_t copy_size, dst_addr;
2458 		int extended_addr = FALSE;
2459 
2460 		copy_size = MIN(chunk_sz, section->len - offset);
2461 		dst_addr = section->offset + offset;
2462 
2463 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2464 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2465 			extended_addr = TRUE;
2466 
2467 		if (extended_addr)
2468 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2469 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2470 
2471 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2472 		    copy_size);
2473 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2474 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2475 						   copy_size);
2476 
2477 		if (extended_addr)
2478 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2479 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2480 
2481 		if (ret) {
2482 			device_printf(sc->sc_dev,
2483 			    "%s: Could not load the [%d] uCode section\n",
2484 			    __func__, section_num);
2485 			break;
2486 		}
2487 	}
2488 
2489 	return ret;
2490 }
2491 
2492 /*
2493  * ucode
2494  */
2495 static int
2496 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2497 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2498 {
2499 	sc->sc_fw_chunk_done = 0;
2500 
2501 	if (!iwm_nic_lock(sc))
2502 		return EBUSY;
2503 
2504 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2505 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2506 
2507 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2508 	    dst_addr);
2509 
2510 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2511 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2512 
2513 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2514 	    (iwm_get_dma_hi_addr(phy_addr)
2515 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2516 
2517 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2518 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2519 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2520 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2521 
2522 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2523 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2524 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2525 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2526 
2527 	iwm_nic_unlock(sc);
2528 
2529 	/* wait up to 5s for this segment to load */
2530 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2531 
2532 	if (!sc->sc_fw_chunk_done) {
2533 		device_printf(sc->sc_dev,
2534 		    "fw chunk addr 0x%x len %d failed to load\n",
2535 		    dst_addr, byte_cnt);
2536 		return ETIMEDOUT;
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static int
2543 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2544 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2545 {
2546 	int shift_param;
2547 	int i, ret = 0, sec_num = 0x1;
2548 	uint32_t val, last_read_idx = 0;
2549 
2550 	if (cpu == 1) {
2551 		shift_param = 0;
2552 		*first_ucode_section = 0;
2553 	} else {
2554 		shift_param = 16;
2555 		(*first_ucode_section)++;
2556 	}
2557 
2558 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2559 		last_read_idx = i;
2560 
2561 		/*
2562 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2563 		 * CPU1 to CPU2.
2564 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2565 		 * CPU2 non paged to CPU2 paging sec.
2566 		 */
2567 		if (!image->sec[i].data ||
2568 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2569 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2570 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2571 				    "Break since Data not valid or Empty section, sec = %d\n",
2572 				    i);
2573 			break;
2574 		}
2575 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2576 		if (ret)
2577 			return ret;
2578 
2579 		/* Notify the ucode of the loaded section number and status */
2580 		if (iwm_nic_lock(sc)) {
2581 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2582 			val = val | (sec_num << shift_param);
2583 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2584 			sec_num = (sec_num << 1) | 0x1;
2585 			iwm_nic_unlock(sc);
2586 		}
2587 	}
2588 
2589 	*first_ucode_section = last_read_idx;
2590 
2591 	iwm_enable_interrupts(sc);
2592 
2593 	if (iwm_nic_lock(sc)) {
2594 		if (cpu == 1)
2595 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2596 		else
2597 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2598 		iwm_nic_unlock(sc);
2599 	}
2600 
2601 	return 0;
2602 }
2603 
2604 static int
2605 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2606 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2607 {
2608 	int shift_param;
2609 	int i, ret = 0;
2610 	uint32_t last_read_idx = 0;
2611 
2612 	if (cpu == 1) {
2613 		shift_param = 0;
2614 		*first_ucode_section = 0;
2615 	} else {
2616 		shift_param = 16;
2617 		(*first_ucode_section)++;
2618 	}
2619 
2620 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2621 		last_read_idx = i;
2622 
2623 		/*
2624 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2625 		 * CPU1 to CPU2.
2626 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2627 		 * CPU2 non paged to CPU2 paging sec.
2628 		 */
2629 		if (!image->sec[i].data ||
2630 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2631 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2632 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2633 				    "Break since Data not valid or Empty section, sec = %d\n",
2634 				     i);
2635 			break;
2636 		}
2637 
2638 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2639 		if (ret)
2640 			return ret;
2641 	}
2642 
2643 	*first_ucode_section = last_read_idx;
2644 
2645 	return 0;
2646 
2647 }
2648 
2649 static int
2650 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2651 {
2652 	int ret = 0;
2653 	int first_ucode_section;
2654 
2655 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2656 		     image->is_dual_cpus ? "Dual" : "Single");
2657 
2658 	/* load to FW the binary non secured sections of CPU1 */
2659 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2660 	if (ret)
2661 		return ret;
2662 
2663 	if (image->is_dual_cpus) {
2664 		/* set CPU2 header address */
2665 		if (iwm_nic_lock(sc)) {
2666 			iwm_write_prph(sc,
2667 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2668 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2669 			iwm_nic_unlock(sc);
2670 		}
2671 
2672 		/* load to FW the binary sections of CPU2 */
2673 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2674 						 &first_ucode_section);
2675 		if (ret)
2676 			return ret;
2677 	}
2678 
2679 	iwm_enable_interrupts(sc);
2680 
2681 	/* release CPU reset */
2682 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2683 
2684 	return 0;
2685 }
2686 
2687 int
2688 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2689 	const struct iwm_fw_img *image)
2690 {
2691 	int ret = 0;
2692 	int first_ucode_section;
2693 
2694 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2695 		    image->is_dual_cpus ? "Dual" : "Single");
2696 
2697 	/* configure the ucode to be ready to get the secured image */
2698 	/* release CPU reset */
2699 	if (iwm_nic_lock(sc)) {
2700 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2701 		    IWM_RELEASE_CPU_RESET_BIT);
2702 		iwm_nic_unlock(sc);
2703 	}
2704 
2705 	/* load to FW the binary Secured sections of CPU1 */
2706 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2707 	    &first_ucode_section);
2708 	if (ret)
2709 		return ret;
2710 
2711 	/* load to FW the binary sections of CPU2 */
2712 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2713 	    &first_ucode_section);
2714 }
2715 
2716 /* XXX Get rid of this definition */
2717 static inline void
2718 iwm_enable_fw_load_int(struct iwm_softc *sc)
2719 {
2720 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2721 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2722 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2723 }
2724 
2725 /* XXX Add proper rfkill support code */
2726 static int
2727 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2728 {
2729 	int ret;
2730 
2731 	/* This may fail if AMT took ownership of the device */
2732 	if (iwm_prepare_card_hw(sc)) {
2733 		device_printf(sc->sc_dev,
2734 		    "%s: Exit HW not ready\n", __func__);
2735 		ret = EIO;
2736 		goto out;
2737 	}
2738 
2739 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2740 
2741 	iwm_disable_interrupts(sc);
2742 
2743 	/* make sure rfkill handshake bits are cleared */
2744 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2745 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2746 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2747 
2748 	/* clear (again), then enable host interrupts */
2749 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2750 
2751 	ret = iwm_nic_init(sc);
2752 	if (ret) {
2753 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2754 		goto out;
2755 	}
2756 
2757 	/*
2758 	 * Now, we load the firmware and don't want to be interrupted, even
2759 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2760 	 * FH_TX interrupt which is needed to load the firmware). If the
2761 	 * RF-Kill switch is toggled, we will find out after having loaded
2762 	 * the firmware and return the proper value to the caller.
2763 	 */
2764 	iwm_enable_fw_load_int(sc);
2765 
2766 	/* really make sure rfkill handshake bits are cleared */
2767 	/* maybe we should write a few times more?  just to make sure */
2768 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2769 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2770 
2771 	/* Load the given image to the HW */
2772 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2773 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2774 	else
2775 		ret = iwm_pcie_load_given_ucode(sc, fw);
2776 
2777 	/* XXX re-check RF-Kill state */
2778 
2779 out:
2780 	return ret;
2781 }
2782 
2783 static int
2784 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2785 {
2786 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2787 		.valid = htole32(valid_tx_ant),
2788 	};
2789 
2790 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2791 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2792 }
2793 
2794 /* iwlwifi: mvm/fw.c */
2795 static int
2796 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2797 {
2798 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2799 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2800 
2801 	/* Set parameters */
2802 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2803 	phy_cfg_cmd.calib_control.event_trigger =
2804 	    sc->sc_default_calib[ucode_type].event_trigger;
2805 	phy_cfg_cmd.calib_control.flow_trigger =
2806 	    sc->sc_default_calib[ucode_type].flow_trigger;
2807 
2808 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2809 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2810 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2811 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2812 }
2813 
2814 static int
2815 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2816 {
2817 	struct iwm_alive_data *alive_data = data;
2818 	struct iwm_alive_resp_v3 *palive3;
2819 	struct iwm_alive_resp *palive;
2820 	struct iwm_umac_alive *umac;
2821 	struct iwm_lmac_alive *lmac1;
2822 	struct iwm_lmac_alive *lmac2 = NULL;
2823 	uint16_t status;
2824 
2825 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2826 		palive = (void *)pkt->data;
2827 		umac = &palive->umac_data;
2828 		lmac1 = &palive->lmac_data[0];
2829 		lmac2 = &palive->lmac_data[1];
2830 		status = le16toh(palive->status);
2831 	} else {
2832 		palive3 = (void *)pkt->data;
2833 		umac = &palive3->umac_data;
2834 		lmac1 = &palive3->lmac_data;
2835 		status = le16toh(palive3->status);
2836 	}
2837 
2838 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2839 	if (lmac2)
2840 		sc->error_event_table[1] =
2841 			le32toh(lmac2->error_event_table_ptr);
2842 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2843 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2844 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2845 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2846 	if (sc->umac_error_event_table)
2847 		sc->support_umac_log = TRUE;
2848 
2849 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2850 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2851 		    status, lmac1->ver_type, lmac1->ver_subtype);
2852 
2853 	if (lmac2)
2854 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2855 
2856 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2857 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2858 		    le32toh(umac->umac_major),
2859 		    le32toh(umac->umac_minor));
2860 
2861 	return TRUE;
2862 }
2863 
2864 static int
2865 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2866 	struct iwm_rx_packet *pkt, void *data)
2867 {
2868 	struct iwm_phy_db *phy_db = data;
2869 
2870 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2871 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2872 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2873 			    __func__, pkt->hdr.code);
2874 		}
2875 		return TRUE;
2876 	}
2877 
2878 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2879 		device_printf(sc->sc_dev,
2880 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2881 	}
2882 
2883 	return FALSE;
2884 }
2885 
2886 static int
2887 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2888 	enum iwm_ucode_type ucode_type)
2889 {
2890 	struct iwm_notification_wait alive_wait;
2891 	struct iwm_alive_data alive_data;
2892 	const struct iwm_fw_img *fw;
2893 	enum iwm_ucode_type old_type = sc->cur_ucode;
2894 	int error;
2895 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2896 
2897 	fw = &sc->sc_fw.img[ucode_type];
2898 	sc->cur_ucode = ucode_type;
2899 	sc->ucode_loaded = FALSE;
2900 
2901 	memset(&alive_data, 0, sizeof(alive_data));
2902 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2903 				   alive_cmd, nitems(alive_cmd),
2904 				   iwm_alive_fn, &alive_data);
2905 
2906 	error = iwm_start_fw(sc, fw);
2907 	if (error) {
2908 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2909 		sc->cur_ucode = old_type;
2910 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2911 		return error;
2912 	}
2913 
2914 	/*
2915 	 * Some things may run in the background now, but we
2916 	 * just wait for the ALIVE notification here.
2917 	 */
2918 	IWM_UNLOCK(sc);
2919 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2920 				      IWM_UCODE_ALIVE_TIMEOUT);
2921 	IWM_LOCK(sc);
2922 	if (error) {
2923 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2924 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2925 			if (iwm_nic_lock(sc)) {
2926 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2927 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2928 				iwm_nic_unlock(sc);
2929 			}
2930 			device_printf(sc->sc_dev,
2931 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2932 			    a, b);
2933 		}
2934 		sc->cur_ucode = old_type;
2935 		return error;
2936 	}
2937 
2938 	if (!alive_data.valid) {
2939 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2940 		    __func__);
2941 		sc->cur_ucode = old_type;
2942 		return EIO;
2943 	}
2944 
2945 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2946 
2947 	/*
2948 	 * configure and operate fw paging mechanism.
2949 	 * driver configures the paging flow only once, CPU2 paging image
2950 	 * included in the IWM_UCODE_INIT image.
2951 	 */
2952 	if (fw->paging_mem_size) {
2953 		error = iwm_save_fw_paging(sc, fw);
2954 		if (error) {
2955 			device_printf(sc->sc_dev,
2956 			    "%s: failed to save the FW paging image\n",
2957 			    __func__);
2958 			return error;
2959 		}
2960 
2961 		error = iwm_send_paging_cmd(sc, fw);
2962 		if (error) {
2963 			device_printf(sc->sc_dev,
2964 			    "%s: failed to send the paging cmd\n", __func__);
2965 			iwm_free_fw_paging(sc);
2966 			return error;
2967 		}
2968 	}
2969 
2970 	if (!error)
2971 		sc->ucode_loaded = TRUE;
2972 	return error;
2973 }
2974 
2975 /*
2976  * mvm misc bits
2977  */
2978 
2979 /*
2980  * follows iwlwifi/fw.c
2981  */
2982 static int
2983 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2984 {
2985 	struct iwm_notification_wait calib_wait;
2986 	static const uint16_t init_complete[] = {
2987 		IWM_INIT_COMPLETE_NOTIF,
2988 		IWM_CALIB_RES_NOTIF_PHY_DB
2989 	};
2990 	int ret;
2991 
2992 	/* do not operate with rfkill switch turned on */
2993 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2994 		device_printf(sc->sc_dev,
2995 		    "radio is disabled by hardware switch\n");
2996 		return EPERM;
2997 	}
2998 
2999 	iwm_init_notification_wait(sc->sc_notif_wait,
3000 				   &calib_wait,
3001 				   init_complete,
3002 				   nitems(init_complete),
3003 				   iwm_wait_phy_db_entry,
3004 				   sc->sc_phy_db);
3005 
3006 	/* Will also start the device */
3007 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3008 	if (ret) {
3009 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3010 		    ret);
3011 		goto error;
3012 	}
3013 
3014 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3015 		ret = iwm_send_bt_init_conf(sc);
3016 		if (ret) {
3017 			device_printf(sc->sc_dev,
3018 			    "failed to send bt coex configuration: %d\n", ret);
3019 			goto error;
3020 		}
3021 	}
3022 
3023 	if (justnvm) {
3024 		/* Read nvm */
3025 		ret = iwm_nvm_init(sc);
3026 		if (ret) {
3027 			device_printf(sc->sc_dev, "failed to read nvm\n");
3028 			goto error;
3029 		}
3030 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3031 		goto error;
3032 	}
3033 
3034 	/* Send TX valid antennas before triggering calibrations */
3035 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3036 	if (ret) {
3037 		device_printf(sc->sc_dev,
3038 		    "failed to send antennas before calibration: %d\n", ret);
3039 		goto error;
3040 	}
3041 
3042 	/*
3043 	 * Send phy configurations command to init uCode
3044 	 * to start the 16.0 uCode init image internal calibrations.
3045 	 */
3046 	ret = iwm_send_phy_cfg_cmd(sc);
3047 	if (ret) {
3048 		device_printf(sc->sc_dev,
3049 		    "%s: Failed to run INIT calibrations: %d\n",
3050 		    __func__, ret);
3051 		goto error;
3052 	}
3053 
3054 	/*
3055 	 * Nothing to do but wait for the init complete notification
3056 	 * from the firmware.
3057 	 */
3058 	IWM_UNLOCK(sc);
3059 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3060 	    IWM_UCODE_CALIB_TIMEOUT);
3061 	IWM_LOCK(sc);
3062 
3063 
3064 	goto out;
3065 
3066 error:
3067 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3068 out:
3069 	return ret;
3070 }
3071 
3072 static int
3073 iwm_config_ltr(struct iwm_softc *sc)
3074 {
3075 	struct iwm_ltr_config_cmd cmd = {
3076 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3077 	};
3078 
3079 	if (!sc->sc_ltr_enabled)
3080 		return 0;
3081 
3082 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3083 }
3084 
3085 /*
3086  * receive side
3087  */
3088 
3089 /* (re)stock rx ring, called at init-time and at runtime */
3090 static int
3091 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3092 {
3093 	struct iwm_rx_ring *ring = &sc->rxq;
3094 	struct iwm_rx_data *data = &ring->data[idx];
3095 	struct mbuf *m;
3096 	bus_dmamap_t dmamap;
3097 	bus_dma_segment_t seg;
3098 	int nsegs, error;
3099 
3100 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3101 	if (m == NULL)
3102 		return ENOBUFS;
3103 
3104 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3105 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3106 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3107 	if (error != 0) {
3108 		device_printf(sc->sc_dev,
3109 		    "%s: can't map mbuf, error %d\n", __func__, error);
3110 		m_freem(m);
3111 		return error;
3112 	}
3113 
3114 	if (data->m != NULL)
3115 		bus_dmamap_unload(ring->data_dmat, data->map);
3116 
3117 	/* Swap ring->spare_map with data->map */
3118 	dmamap = data->map;
3119 	data->map = ring->spare_map;
3120 	ring->spare_map = dmamap;
3121 
3122 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3123 	data->m = m;
3124 
3125 	/* Update RX descriptor. */
3126 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3127 	if (sc->cfg->mqrx_supported)
3128 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3129 	else
3130 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3131 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3132 	    BUS_DMASYNC_PREWRITE);
3133 
3134 	return 0;
3135 }
3136 
3137 static void
3138 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3139 {
3140 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3141 
3142 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3143 
3144 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3145 }
3146 
3147 /*
3148  * Retrieve the average noise (in dBm) among receivers.
3149  */
3150 static int
3151 iwm_get_noise(struct iwm_softc *sc,
3152     const struct iwm_statistics_rx_non_phy *stats)
3153 {
3154 	int i, total, nbant, noise;
3155 
3156 	total = nbant = noise = 0;
3157 	for (i = 0; i < 3; i++) {
3158 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3159 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3160 		    __func__,
3161 		    i,
3162 		    noise);
3163 
3164 		if (noise) {
3165 			total += noise;
3166 			nbant++;
3167 		}
3168 	}
3169 
3170 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3171 	    __func__, nbant, total);
3172 #if 0
3173 	/* There should be at least one antenna but check anyway. */
3174 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3175 #else
3176 	/* For now, just hard-code it to -96 to be safe */
3177 	return (-96);
3178 #endif
3179 }
3180 
3181 static void
3182 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3183 {
3184 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3185 
3186 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3187 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3188 }
3189 
3190 /* iwlwifi: mvm/rx.c */
3191 /*
3192  * iwm_get_signal_strength - use new rx PHY INFO API
3193  * values are reported by the fw as positive values - need to negate
3194  * to obtain their dBM.  Account for missing antennas by replacing 0
3195  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3196  */
3197 static int
3198 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3199     struct iwm_rx_phy_info *phy_info)
3200 {
3201 	int energy_a, energy_b, energy_c, max_energy;
3202 	uint32_t val;
3203 
3204 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3205 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3206 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3207 	energy_a = energy_a ? -energy_a : -256;
3208 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3209 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3210 	energy_b = energy_b ? -energy_b : -256;
3211 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3212 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3213 	energy_c = energy_c ? -energy_c : -256;
3214 	max_energy = MAX(energy_a, energy_b);
3215 	max_energy = MAX(max_energy, energy_c);
3216 
3217 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218 	    "energy In A %d B %d C %d , and max %d\n",
3219 	    energy_a, energy_b, energy_c, max_energy);
3220 
3221 	return max_energy;
3222 }
3223 
3224 static int
3225 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3226     struct iwm_rx_mpdu_desc *desc)
3227 {
3228 	int energy_a, energy_b;
3229 
3230 	energy_a = desc->v1.energy_a;
3231 	energy_b = desc->v1.energy_b;
3232 	energy_a = energy_a ? -energy_a : -256;
3233 	energy_b = energy_b ? -energy_b : -256;
3234 	return MAX(energy_a, energy_b);
3235 }
3236 
3237 /*
3238  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3239  *
3240  * Handles the actual data of the Rx packet from the fw
3241  */
3242 static bool
3243 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3244     bool stolen)
3245 {
3246 	struct ieee80211com *ic = &sc->sc_ic;
3247 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3248 	struct ieee80211_frame *wh;
3249 	struct ieee80211_rx_stats rxs;
3250 	struct iwm_rx_phy_info *phy_info;
3251 	struct iwm_rx_mpdu_res_start *rx_res;
3252 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3253 	uint32_t len;
3254 	uint32_t rx_pkt_status;
3255 	int rssi;
3256 
3257 	phy_info = &sc->sc_last_phy_info;
3258 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3259 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3260 	len = le16toh(rx_res->byte_count);
3261 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3262 
3263 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3264 		device_printf(sc->sc_dev,
3265 		    "dsp size out of range [0,20]: %d\n",
3266 		    phy_info->cfg_phy_cnt);
3267 		return false;
3268 	}
3269 
3270 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3271 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3272 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3273 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3274 		return false;
3275 	}
3276 
3277 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3278 
3279 	/* Map it to relative value */
3280 	rssi = rssi - sc->sc_noise;
3281 
3282 	/* replenish ring for the buffer we're going to feed to the sharks */
3283 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3284 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3285 		    __func__);
3286 		return false;
3287 	}
3288 
3289 	m->m_data = pkt->data + sizeof(*rx_res);
3290 	m->m_pkthdr.len = m->m_len = len;
3291 
3292 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3293 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3294 
3295 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3296 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3297 	    __func__,
3298 	    le16toh(phy_info->channel),
3299 	    le16toh(phy_info->phy_flags));
3300 
3301 	/*
3302 	 * Populate an RX state struct with the provided information.
3303 	 */
3304 	bzero(&rxs, sizeof(rxs));
3305 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3306 	rxs.r_flags |= IEEE80211_R_BAND;
3307 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3308 	rxs.c_ieee = le16toh(phy_info->channel);
3309 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3310 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3311 		rxs.c_band = IEEE80211_CHAN_2GHZ;
3312 	} else {
3313 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3314 		rxs.c_band = IEEE80211_CHAN_5GHZ;
3315 	}
3316 
3317 	/* rssi is in 1/2db units */
3318 	rxs.c_rssi = rssi * 2;
3319 	rxs.c_nf = sc->sc_noise;
3320 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3321 		return false;
3322 
3323 	if (ieee80211_radiotap_active_vap(vap)) {
3324 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3325 
3326 		tap->wr_flags = 0;
3327 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3328 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3329 		tap->wr_chan_freq = htole16(rxs.c_freq);
3330 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3331 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3332 		tap->wr_dbm_antsignal = (int8_t)rssi;
3333 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3334 		tap->wr_tsft = phy_info->system_timestamp;
3335 		switch (phy_info->rate) {
3336 		/* CCK rates. */
3337 		case  10: tap->wr_rate =   2; break;
3338 		case  20: tap->wr_rate =   4; break;
3339 		case  55: tap->wr_rate =  11; break;
3340 		case 110: tap->wr_rate =  22; break;
3341 		/* OFDM rates. */
3342 		case 0xd: tap->wr_rate =  12; break;
3343 		case 0xf: tap->wr_rate =  18; break;
3344 		case 0x5: tap->wr_rate =  24; break;
3345 		case 0x7: tap->wr_rate =  36; break;
3346 		case 0x9: tap->wr_rate =  48; break;
3347 		case 0xb: tap->wr_rate =  72; break;
3348 		case 0x1: tap->wr_rate =  96; break;
3349 		case 0x3: tap->wr_rate = 108; break;
3350 		/* Unknown rate: should not happen. */
3351 		default:  tap->wr_rate =   0;
3352 		}
3353 	}
3354 
3355 	return true;
3356 }
3357 
3358 static bool
3359 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3360     bool stolen)
3361 {
3362 	struct ieee80211com *ic = &sc->sc_ic;
3363 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3364 	struct ieee80211_frame *wh;
3365 	struct ieee80211_rx_stats rxs;
3366 	struct iwm_rx_mpdu_desc *desc;
3367 	struct iwm_rx_packet *pkt;
3368 	int rssi;
3369 	uint32_t hdrlen, len, rate_n_flags;
3370 	uint16_t phy_info;
3371 	uint8_t channel;
3372 
3373 	pkt = mtodo(m, offset);
3374 	desc = (void *)pkt->data;
3375 
3376 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3377 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3378 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3379 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3380 		return false;
3381 	}
3382 
3383 	channel = desc->v1.channel;
3384 	len = le16toh(desc->mpdu_len);
3385 	phy_info = le16toh(desc->phy_info);
3386 	rate_n_flags = desc->v1.rate_n_flags;
3387 
3388 	wh = mtodo(m, sizeof(*desc));
3389 	m->m_data = pkt->data + sizeof(*desc);
3390 	m->m_pkthdr.len = m->m_len = len;
3391 	m->m_len = len;
3392 
3393 	/* Account for padding following the frame header. */
3394 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3395 		hdrlen = ieee80211_anyhdrsize(wh);
3396 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3397 		m->m_data = mtodo(m, 2);
3398 		wh = mtod(m, struct ieee80211_frame *);
3399 	}
3400 
3401 	/* Map it to relative value */
3402 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3403 	rssi = rssi - sc->sc_noise;
3404 
3405 	/* replenish ring for the buffer we're going to feed to the sharks */
3406 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3407 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3408 		    __func__);
3409 		return false;
3410 	}
3411 
3412 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3413 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3414 
3415 	/*
3416 	 * Populate an RX state struct with the provided information.
3417 	 */
3418 	bzero(&rxs, sizeof(rxs));
3419 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3420 	rxs.r_flags |= IEEE80211_R_BAND;
3421 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3422 	rxs.c_ieee = channel;
3423 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3424 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3425 	rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3426 
3427 	/* rssi is in 1/2db units */
3428 	rxs.c_rssi = rssi * 2;
3429 	rxs.c_nf = sc->sc_noise;
3430 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3431 		return false;
3432 
3433 	if (ieee80211_radiotap_active_vap(vap)) {
3434 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3435 
3436 		tap->wr_flags = 0;
3437 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3438 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3439 		tap->wr_chan_freq = htole16(rxs.c_freq);
3440 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3441 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3442 		tap->wr_dbm_antsignal = (int8_t)rssi;
3443 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3444 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3445 		switch ((rate_n_flags & 0xff)) {
3446 		/* CCK rates. */
3447 		case  10: tap->wr_rate =   2; break;
3448 		case  20: tap->wr_rate =   4; break;
3449 		case  55: tap->wr_rate =  11; break;
3450 		case 110: tap->wr_rate =  22; break;
3451 		/* OFDM rates. */
3452 		case 0xd: tap->wr_rate =  12; break;
3453 		case 0xf: tap->wr_rate =  18; break;
3454 		case 0x5: tap->wr_rate =  24; break;
3455 		case 0x7: tap->wr_rate =  36; break;
3456 		case 0x9: tap->wr_rate =  48; break;
3457 		case 0xb: tap->wr_rate =  72; break;
3458 		case 0x1: tap->wr_rate =  96; break;
3459 		case 0x3: tap->wr_rate = 108; break;
3460 		/* Unknown rate: should not happen. */
3461 		default:  tap->wr_rate =   0;
3462 		}
3463 	}
3464 
3465 	return true;
3466 }
3467 
3468 static bool
3469 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3470     bool stolen)
3471 {
3472   	struct epoch_tracker et;
3473 	struct ieee80211com *ic;
3474 	struct ieee80211_frame *wh;
3475 	struct ieee80211_node *ni;
3476 	bool ret;
3477 
3478 	ic = &sc->sc_ic;
3479 
3480 	ret = sc->cfg->mqrx_supported ?
3481 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3482 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3483 	if (!ret) {
3484 		counter_u64_add(ic->ic_ierrors, 1);
3485 		return (ret);
3486 	}
3487 
3488 	wh = mtod(m, struct ieee80211_frame *);
3489 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3490 
3491 	IWM_UNLOCK(sc);
3492 
3493 	NET_EPOCH_ENTER(et);
3494 	if (ni != NULL) {
3495 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3496 		ieee80211_input_mimo(ni, m);
3497 		ieee80211_free_node(ni);
3498 	} else {
3499 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3500 		ieee80211_input_mimo_all(ic, m);
3501 	}
3502 	NET_EPOCH_EXIT(et);
3503 
3504 	IWM_LOCK(sc);
3505 
3506 	return true;
3507 }
3508 
3509 static int
3510 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3511 	struct iwm_node *in)
3512 {
3513 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3514 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3515 	struct ieee80211_node *ni = &in->in_ni;
3516 	struct ieee80211vap *vap = ni->ni_vap;
3517 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3518 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3519 	boolean_t rate_matched;
3520 	uint8_t tx_resp_rate;
3521 
3522 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3523 
3524 	/* Update rate control statistics. */
3525 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3526 	    __func__,
3527 	    (int) le16toh(tx_resp->status.status),
3528 	    (int) le16toh(tx_resp->status.sequence),
3529 	    tx_resp->frame_count,
3530 	    tx_resp->bt_kill_count,
3531 	    tx_resp->failure_rts,
3532 	    tx_resp->failure_frame,
3533 	    le32toh(tx_resp->initial_rate),
3534 	    (int) le16toh(tx_resp->wireless_media_time));
3535 
3536 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3537 
3538 	/* For rate control, ignore frames sent at different initial rate */
3539 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3540 
3541 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3542 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3543 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3544 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3545 	}
3546 
3547 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3548 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3549 	txs->short_retries = tx_resp->failure_rts;
3550 	txs->long_retries = tx_resp->failure_frame;
3551 	if (status != IWM_TX_STATUS_SUCCESS &&
3552 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3553 		switch (status) {
3554 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3555 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3556 			break;
3557 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3558 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3559 			break;
3560 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3561 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3562 			break;
3563 		default:
3564 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3565 			break;
3566 		}
3567 	} else {
3568 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3569 	}
3570 
3571 	if (rate_matched) {
3572 		ieee80211_ratectl_tx_complete(ni, txs);
3573 
3574 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3575 		new_rate = vap->iv_bss->ni_txrate;
3576 		if (new_rate != 0 && new_rate != cur_rate) {
3577 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3578 			iwm_setrates(sc, in, rix);
3579 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3580 		}
3581  	}
3582 
3583 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3584 }
3585 
3586 static void
3587 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3588 {
3589 	struct iwm_cmd_header *cmd_hdr;
3590 	struct iwm_tx_ring *ring;
3591 	struct iwm_tx_data *txd;
3592 	struct iwm_node *in;
3593 	struct mbuf *m;
3594 	int idx, qid, qmsk, status;
3595 
3596 	cmd_hdr = &pkt->hdr;
3597 	idx = cmd_hdr->idx;
3598 	qid = cmd_hdr->qid;
3599 
3600 	ring = &sc->txq[qid];
3601 	txd = &ring->data[idx];
3602 	in = txd->in;
3603 	m = txd->m;
3604 
3605 	KASSERT(txd->done == 0, ("txd not done"));
3606 	KASSERT(txd->in != NULL, ("txd without node"));
3607 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3608 
3609 	sc->sc_tx_timer = 0;
3610 
3611 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3612 
3613 	/* Unmap and free mbuf. */
3614 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3615 	bus_dmamap_unload(ring->data_dmat, txd->map);
3616 
3617 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3618 	    "free txd %p, in %p\n", txd, txd->in);
3619 	txd->done = 1;
3620 	txd->m = NULL;
3621 	txd->in = NULL;
3622 
3623 	ieee80211_tx_complete(&in->in_ni, m, status);
3624 
3625 	qmsk = 1 << qid;
3626 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3627 		sc->qfullmsk &= ~qmsk;
3628 		if (sc->qfullmsk == 0)
3629 			iwm_start(sc);
3630 	}
3631 }
3632 
3633 /*
3634  * transmit side
3635  */
3636 
3637 /*
3638  * Process a "command done" firmware notification.  This is where we wakeup
3639  * processes waiting for a synchronous command completion.
3640  * from if_iwn
3641  */
3642 static void
3643 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3644 {
3645 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3646 	struct iwm_tx_data *data;
3647 
3648 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3649 		return;	/* Not a command ack. */
3650 	}
3651 
3652 	/* XXX wide commands? */
3653 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3654 	    "cmd notification type 0x%x qid %d idx %d\n",
3655 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3656 
3657 	data = &ring->data[pkt->hdr.idx];
3658 
3659 	/* If the command was mapped in an mbuf, free it. */
3660 	if (data->m != NULL) {
3661 		bus_dmamap_sync(ring->data_dmat, data->map,
3662 		    BUS_DMASYNC_POSTWRITE);
3663 		bus_dmamap_unload(ring->data_dmat, data->map);
3664 		m_freem(data->m);
3665 		data->m = NULL;
3666 	}
3667 	wakeup(&ring->desc[pkt->hdr.idx]);
3668 
3669 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3670 		device_printf(sc->sc_dev,
3671 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3672 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3673 		/* XXX call iwm_force_nmi() */
3674 	}
3675 
3676 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3677 	ring->queued--;
3678 	if (ring->queued == 0)
3679 		iwm_pcie_clear_cmd_in_flight(sc);
3680 }
3681 
3682 #if 0
3683 /*
3684  * necessary only for block ack mode
3685  */
3686 void
3687 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3688 	uint16_t len)
3689 {
3690 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3691 	uint16_t w_val;
3692 
3693 	scd_bc_tbl = sc->sched_dma.vaddr;
3694 
3695 	len += 8; /* magic numbers came naturally from paris */
3696 	len = roundup(len, 4) / 4;
3697 
3698 	w_val = htole16(sta_id << 12 | len);
3699 
3700 	/* Update TX scheduler. */
3701 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3702 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3703 	    BUS_DMASYNC_PREWRITE);
3704 
3705 	/* I really wonder what this is ?!? */
3706 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3707 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3708 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3709 		    BUS_DMASYNC_PREWRITE);
3710 	}
3711 }
3712 #endif
3713 
3714 static int
3715 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3716 {
3717 	int i;
3718 
3719 	for (i = 0; i < nitems(iwm_rates); i++) {
3720 		if (iwm_rates[i].rate == rate)
3721 			return (i);
3722 	}
3723 	/* XXX error? */
3724 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3725 	    "%s: couldn't find an entry for rate=%d\n",
3726 	    __func__,
3727 	    rate);
3728 	return (0);
3729 }
3730 
3731 /*
3732  * Fill in the rate related information for a transmit command.
3733  */
3734 static const struct iwm_rate *
3735 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3736 	struct mbuf *m, struct iwm_tx_cmd *tx)
3737 {
3738 	struct ieee80211_node *ni = &in->in_ni;
3739 	struct ieee80211_frame *wh;
3740 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3741 	const struct iwm_rate *rinfo;
3742 	int type;
3743 	int ridx, rate_flags;
3744 
3745 	wh = mtod(m, struct ieee80211_frame *);
3746 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3747 
3748 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3749 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3750 
3751 	if (type == IEEE80211_FC0_TYPE_MGT ||
3752 	    type == IEEE80211_FC0_TYPE_CTL ||
3753 	    (m->m_flags & M_EAPOL) != 0) {
3754 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3755 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3756 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3757 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3758 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3759 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3760 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3761 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3762 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3763 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3764 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3765 	} else {
3766 		/* for data frames, use RS table */
3767 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3768 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3769 		if (ridx == -1)
3770 			ridx = 0;
3771 
3772 		/* This is the index into the programmed table */
3773 		tx->initial_rate_index = 0;
3774 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3775 	}
3776 
3777 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3778 	    "%s: frame type=%d txrate %d\n",
3779 	        __func__, type, iwm_rates[ridx].rate);
3780 
3781 	rinfo = &iwm_rates[ridx];
3782 
3783 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3784 	    __func__, ridx,
3785 	    rinfo->rate,
3786 	    !! (IWM_RIDX_IS_CCK(ridx))
3787 	    );
3788 
3789 	/* XXX TODO: hard-coded TX antenna? */
3790 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3791 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3792 	else
3793 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3794 	if (IWM_RIDX_IS_CCK(ridx))
3795 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3796 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3797 
3798 	return rinfo;
3799 }
3800 
3801 #define TB0_SIZE 16
3802 static int
3803 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3804 {
3805 	struct ieee80211com *ic = &sc->sc_ic;
3806 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3807 	struct iwm_node *in = IWM_NODE(ni);
3808 	struct iwm_tx_ring *ring;
3809 	struct iwm_tx_data *data;
3810 	struct iwm_tfd *desc;
3811 	struct iwm_device_cmd *cmd;
3812 	struct iwm_tx_cmd *tx;
3813 	struct ieee80211_frame *wh;
3814 	struct ieee80211_key *k = NULL;
3815 	struct mbuf *m1;
3816 	const struct iwm_rate *rinfo;
3817 	uint32_t flags;
3818 	u_int hdrlen;
3819 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3820 	int nsegs;
3821 	uint8_t tid, type;
3822 	int i, totlen, error, pad;
3823 
3824 	wh = mtod(m, struct ieee80211_frame *);
3825 	hdrlen = ieee80211_anyhdrsize(wh);
3826 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3827 	tid = 0;
3828 	ring = &sc->txq[ac];
3829 	desc = &ring->desc[ring->cur];
3830 	data = &ring->data[ring->cur];
3831 
3832 	/* Fill out iwm_tx_cmd to send to the firmware */
3833 	cmd = &ring->cmd[ring->cur];
3834 	cmd->hdr.code = IWM_TX_CMD;
3835 	cmd->hdr.flags = 0;
3836 	cmd->hdr.qid = ring->qid;
3837 	cmd->hdr.idx = ring->cur;
3838 
3839 	tx = (void *)cmd->data;
3840 	memset(tx, 0, sizeof(*tx));
3841 
3842 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3843 
3844 	/* Encrypt the frame if need be. */
3845 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3846 		/* Retrieve key for TX && do software encryption. */
3847 		k = ieee80211_crypto_encap(ni, m);
3848 		if (k == NULL) {
3849 			m_freem(m);
3850 			return (ENOBUFS);
3851 		}
3852 		/* 802.11 header may have moved. */
3853 		wh = mtod(m, struct ieee80211_frame *);
3854 	}
3855 
3856 	if (ieee80211_radiotap_active_vap(vap)) {
3857 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3858 
3859 		tap->wt_flags = 0;
3860 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3861 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3862 		tap->wt_rate = rinfo->rate;
3863 		if (k != NULL)
3864 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3865 		ieee80211_radiotap_tx(vap, m);
3866 	}
3867 
3868 	flags = 0;
3869 	totlen = m->m_pkthdr.len;
3870 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3871 		flags |= IWM_TX_CMD_FLG_ACK;
3872 	}
3873 
3874 	if (type == IEEE80211_FC0_TYPE_DATA &&
3875 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3876 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3877 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3878 	}
3879 
3880 	tx->sta_id = IWM_STATION_ID;
3881 
3882 	if (type == IEEE80211_FC0_TYPE_MGT) {
3883 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3884 
3885 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3886 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3887 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3888 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3889 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3890 		} else {
3891 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3892 		}
3893 	} else {
3894 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3895 	}
3896 
3897 	if (hdrlen & 3) {
3898 		/* First segment length must be a multiple of 4. */
3899 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3900 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
3901 		pad = 4 - (hdrlen & 3);
3902 	} else {
3903 		tx->offload_assist = 0;
3904 		pad = 0;
3905 	}
3906 
3907 	tx->len = htole16(totlen);
3908 	tx->tid_tspec = tid;
3909 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3910 
3911 	/* Set physical address of "scratch area". */
3912 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3913 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3914 
3915 	/* Copy 802.11 header in TX command. */
3916 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3917 
3918 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3919 
3920 	tx->sec_ctl = 0;
3921 	tx->tx_flags |= htole32(flags);
3922 
3923 	/* Trim 802.11 header. */
3924 	m_adj(m, hdrlen);
3925 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3926 	    segs, &nsegs, BUS_DMA_NOWAIT);
3927 	if (error != 0) {
3928 		if (error != EFBIG) {
3929 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3930 			    error);
3931 			m_freem(m);
3932 			return error;
3933 		}
3934 		/* Too many DMA segments, linearize mbuf. */
3935 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3936 		if (m1 == NULL) {
3937 			device_printf(sc->sc_dev,
3938 			    "%s: could not defrag mbuf\n", __func__);
3939 			m_freem(m);
3940 			return (ENOBUFS);
3941 		}
3942 		m = m1;
3943 
3944 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3945 		    segs, &nsegs, BUS_DMA_NOWAIT);
3946 		if (error != 0) {
3947 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3948 			    error);
3949 			m_freem(m);
3950 			return error;
3951 		}
3952 	}
3953 	data->m = m;
3954 	data->in = in;
3955 	data->done = 0;
3956 
3957 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3958 	    "sending txd %p, in %p\n", data, data->in);
3959 	KASSERT(data->in != NULL, ("node is NULL"));
3960 
3961 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3962 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3963 	    ring->qid, ring->cur, totlen, nsegs,
3964 	    le32toh(tx->tx_flags),
3965 	    le32toh(tx->rate_n_flags),
3966 	    tx->initial_rate_index
3967 	    );
3968 
3969 	/* Fill TX descriptor. */
3970 	memset(desc, 0, sizeof(*desc));
3971 	desc->num_tbs = 2 + nsegs;
3972 
3973 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3974 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3975 	    (TB0_SIZE << 4));
3976 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3977 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3978 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3979 	    hdrlen + pad - TB0_SIZE) << 4));
3980 
3981 	/* Other DMA segments are for data payload. */
3982 	for (i = 0; i < nsegs; i++) {
3983 		seg = &segs[i];
3984 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3985 		desc->tbs[i + 2].hi_n_len =
3986 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3987 		    (seg->ds_len << 4);
3988 	}
3989 
3990 	bus_dmamap_sync(ring->data_dmat, data->map,
3991 	    BUS_DMASYNC_PREWRITE);
3992 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3993 	    BUS_DMASYNC_PREWRITE);
3994 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3995 	    BUS_DMASYNC_PREWRITE);
3996 
3997 #if 0
3998 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3999 #endif
4000 
4001 	/* Kick TX ring. */
4002 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4003 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4004 
4005 	/* Mark TX ring as full if we reach a certain threshold. */
4006 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4007 		sc->qfullmsk |= 1 << ring->qid;
4008 	}
4009 
4010 	return 0;
4011 }
4012 
4013 static int
4014 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4015     const struct ieee80211_bpf_params *params)
4016 {
4017 	struct ieee80211com *ic = ni->ni_ic;
4018 	struct iwm_softc *sc = ic->ic_softc;
4019 	int error = 0;
4020 
4021 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4022 	    "->%s begin\n", __func__);
4023 
4024 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4025 		m_freem(m);
4026 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4027 		    "<-%s not RUNNING\n", __func__);
4028 		return (ENETDOWN);
4029         }
4030 
4031 	IWM_LOCK(sc);
4032 	/* XXX fix this */
4033         if (params == NULL) {
4034 		error = iwm_tx(sc, m, ni, 0);
4035 	} else {
4036 		error = iwm_tx(sc, m, ni, 0);
4037 	}
4038 	if (sc->sc_tx_timer == 0)
4039 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4040 	sc->sc_tx_timer = 5;
4041 	IWM_UNLOCK(sc);
4042 
4043         return (error);
4044 }
4045 
4046 /*
4047  * mvm/tx.c
4048  */
4049 
4050 /*
4051  * Note that there are transports that buffer frames before they reach
4052  * the firmware. This means that after flush_tx_path is called, the
4053  * queue might not be empty. The race-free way to handle this is to:
4054  * 1) set the station as draining
4055  * 2) flush the Tx path
4056  * 3) wait for the transport queues to be empty
4057  */
4058 int
4059 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4060 {
4061 	int ret;
4062 	struct iwm_tx_path_flush_cmd flush_cmd = {
4063 		.queues_ctl = htole32(tfd_msk),
4064 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4065 	};
4066 
4067 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4068 	    sizeof(flush_cmd), &flush_cmd);
4069 	if (ret)
4070                 device_printf(sc->sc_dev,
4071 		    "Flushing tx queue failed: %d\n", ret);
4072 	return ret;
4073 }
4074 
4075 /*
4076  * BEGIN mvm/quota.c
4077  */
4078 
4079 static int
4080 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4081 {
4082 	struct iwm_time_quota_cmd cmd;
4083 	int i, idx, ret, num_active_macs, quota, quota_rem;
4084 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4085 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4086 	uint16_t id;
4087 
4088 	memset(&cmd, 0, sizeof(cmd));
4089 
4090 	/* currently, PHY ID == binding ID */
4091 	if (ivp) {
4092 		id = ivp->phy_ctxt->id;
4093 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4094 		colors[id] = ivp->phy_ctxt->color;
4095 
4096 		if (1)
4097 			n_ifs[id] = 1;
4098 	}
4099 
4100 	/*
4101 	 * The FW's scheduling session consists of
4102 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4103 	 * equally between all the bindings that require quota
4104 	 */
4105 	num_active_macs = 0;
4106 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4107 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4108 		num_active_macs += n_ifs[i];
4109 	}
4110 
4111 	quota = 0;
4112 	quota_rem = 0;
4113 	if (num_active_macs) {
4114 		quota = IWM_MAX_QUOTA / num_active_macs;
4115 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4116 	}
4117 
4118 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4119 		if (colors[i] < 0)
4120 			continue;
4121 
4122 		cmd.quotas[idx].id_and_color =
4123 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4124 
4125 		if (n_ifs[i] <= 0) {
4126 			cmd.quotas[idx].quota = htole32(0);
4127 			cmd.quotas[idx].max_duration = htole32(0);
4128 		} else {
4129 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4130 			cmd.quotas[idx].max_duration = htole32(0);
4131 		}
4132 		idx++;
4133 	}
4134 
4135 	/* Give the remainder of the session to the first binding */
4136 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4137 
4138 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4139 	    sizeof(cmd), &cmd);
4140 	if (ret)
4141 		device_printf(sc->sc_dev,
4142 		    "%s: Failed to send quota: %d\n", __func__, ret);
4143 	return ret;
4144 }
4145 
4146 /*
4147  * END mvm/quota.c
4148  */
4149 
4150 /*
4151  * ieee80211 routines
4152  */
4153 
4154 /*
4155  * Change to AUTH state in 80211 state machine.  Roughly matches what
4156  * Linux does in bss_info_changed().
4157  */
4158 static int
4159 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4160 {
4161 	struct ieee80211_node *ni;
4162 	struct iwm_node *in;
4163 	struct iwm_vap *iv = IWM_VAP(vap);
4164 	uint32_t duration;
4165 	int error;
4166 
4167 	/*
4168 	 * XXX i have a feeling that the vap node is being
4169 	 * freed from underneath us. Grr.
4170 	 */
4171 	ni = ieee80211_ref_node(vap->iv_bss);
4172 	in = IWM_NODE(ni);
4173 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4174 	    "%s: called; vap=%p, bss ni=%p\n",
4175 	    __func__,
4176 	    vap,
4177 	    ni);
4178 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4179 	    __func__, ether_sprintf(ni->ni_bssid));
4180 
4181 	in->in_assoc = 0;
4182 	iv->iv_auth = 1;
4183 
4184 	/*
4185 	 * Firmware bug - it'll crash if the beacon interval is less
4186 	 * than 16. We can't avoid connecting at all, so refuse the
4187 	 * station state change, this will cause net80211 to abandon
4188 	 * attempts to connect to this AP, and eventually wpa_s will
4189 	 * blacklist the AP...
4190 	 */
4191 	if (ni->ni_intval < 16) {
4192 		device_printf(sc->sc_dev,
4193 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4194 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4195 		error = EINVAL;
4196 		goto out;
4197 	}
4198 
4199 	error = iwm_allow_mcast(vap, sc);
4200 	if (error) {
4201 		device_printf(sc->sc_dev,
4202 		    "%s: failed to set multicast\n", __func__);
4203 		goto out;
4204 	}
4205 
4206 	/*
4207 	 * This is where it deviates from what Linux does.
4208 	 *
4209 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4210 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4211 	 * and always does a mac_ctx_changed().
4212 	 *
4213 	 * The openbsd port doesn't attempt to do that - it reset things
4214 	 * at odd states and does the add here.
4215 	 *
4216 	 * So, until the state handling is fixed (ie, we never reset
4217 	 * the NIC except for a firmware failure, which should drag
4218 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4219 	 * contexts that are required), let's do a dirty hack here.
4220 	 */
4221 	if (iv->is_uploaded) {
4222 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4223 			device_printf(sc->sc_dev,
4224 			    "%s: failed to update MAC\n", __func__);
4225 			goto out;
4226 		}
4227 	} else {
4228 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4229 			device_printf(sc->sc_dev,
4230 			    "%s: failed to add MAC\n", __func__);
4231 			goto out;
4232 		}
4233 	}
4234 	sc->sc_firmware_state = 1;
4235 
4236 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4237 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4238 		device_printf(sc->sc_dev,
4239 		    "%s: failed update phy ctxt\n", __func__);
4240 		goto out;
4241 	}
4242 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4243 
4244 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4245 		device_printf(sc->sc_dev,
4246 		    "%s: binding update cmd\n", __func__);
4247 		goto out;
4248 	}
4249 	sc->sc_firmware_state = 2;
4250 	/*
4251 	 * Authentication becomes unreliable when powersaving is left enabled
4252 	 * here. Powersaving will be activated again when association has
4253 	 * finished or is aborted.
4254 	 */
4255 	iv->ps_disabled = TRUE;
4256 	error = iwm_power_update_mac(sc);
4257 	iv->ps_disabled = FALSE;
4258 	if (error != 0) {
4259 		device_printf(sc->sc_dev,
4260 		    "%s: failed to update power management\n",
4261 		    __func__);
4262 		goto out;
4263 	}
4264 	if ((error = iwm_add_sta(sc, in)) != 0) {
4265 		device_printf(sc->sc_dev,
4266 		    "%s: failed to add sta\n", __func__);
4267 		goto out;
4268 	}
4269 	sc->sc_firmware_state = 3;
4270 
4271 	/*
4272 	 * Prevent the FW from wandering off channel during association
4273 	 * by "protecting" the session with a time event.
4274 	 */
4275 	/* XXX duration is in units of TU, not MS */
4276 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4277 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4278 
4279 	error = 0;
4280 out:
4281 	if (error != 0)
4282 		iv->iv_auth = 0;
4283 	ieee80211_free_node(ni);
4284 	return (error);
4285 }
4286 
4287 static struct ieee80211_node *
4288 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4289 {
4290 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4291 	    M_NOWAIT | M_ZERO);
4292 }
4293 
4294 static uint8_t
4295 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4296 {
4297 	uint8_t plcp = rate_n_flags & 0xff;
4298 	int i;
4299 
4300 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4301 		if (iwm_rates[i].plcp == plcp)
4302 			return iwm_rates[i].rate;
4303 	}
4304 	return 0;
4305 }
4306 
4307 uint8_t
4308 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4309 {
4310 	int i;
4311 	uint8_t rval;
4312 
4313 	for (i = 0; i < rs->rs_nrates; i++) {
4314 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4315 		if (rval == iwm_rates[ridx].rate)
4316 			return rs->rs_rates[i];
4317 	}
4318 
4319 	return 0;
4320 }
4321 
4322 static int
4323 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4324 {
4325 	int i;
4326 
4327 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4328 		if (iwm_rates[i].rate == rate)
4329 			return i;
4330 	}
4331 
4332 	device_printf(sc->sc_dev,
4333 	    "%s: WARNING: device rate for %u not found!\n",
4334 	    __func__, rate);
4335 
4336 	return -1;
4337 }
4338 
4339 
4340 static void
4341 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4342 {
4343 	struct ieee80211_node *ni = &in->in_ni;
4344 	struct iwm_lq_cmd *lq = &in->in_lq;
4345 	struct ieee80211_rateset *rs = &ni->ni_rates;
4346 	int nrates = rs->rs_nrates;
4347 	int i, ridx, tab = 0;
4348 //	int txant = 0;
4349 
4350 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4351 
4352 	if (nrates > nitems(lq->rs_table)) {
4353 		device_printf(sc->sc_dev,
4354 		    "%s: node supports %d rates, driver handles "
4355 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4356 		return;
4357 	}
4358 	if (nrates == 0) {
4359 		device_printf(sc->sc_dev,
4360 		    "%s: node supports 0 rates, odd!\n", __func__);
4361 		return;
4362 	}
4363 	nrates = imin(rix + 1, nrates);
4364 
4365 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4366 	    "%s: nrates=%d\n", __func__, nrates);
4367 
4368 	/* then construct a lq_cmd based on those */
4369 	memset(lq, 0, sizeof(*lq));
4370 	lq->sta_id = IWM_STATION_ID;
4371 
4372 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4373 	if (ni->ni_flags & IEEE80211_NODE_HT)
4374 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4375 
4376 	/*
4377 	 * are these used? (we don't do SISO or MIMO)
4378 	 * need to set them to non-zero, though, or we get an error.
4379 	 */
4380 	lq->single_stream_ant_msk = 1;
4381 	lq->dual_stream_ant_msk = 1;
4382 
4383 	/*
4384 	 * Build the actual rate selection table.
4385 	 * The lowest bits are the rates.  Additionally,
4386 	 * CCK needs bit 9 to be set.  The rest of the bits
4387 	 * we add to the table select the tx antenna
4388 	 * Note that we add the rates in the highest rate first
4389 	 * (opposite of ni_rates).
4390 	 */
4391 	for (i = 0; i < nrates; i++) {
4392 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4393 		int nextant;
4394 
4395 		/* Map 802.11 rate to HW rate index. */
4396 		ridx = iwm_rate2ridx(sc, rate);
4397 		if (ridx == -1)
4398 			continue;
4399 
4400 #if 0
4401 		if (txant == 0)
4402 			txant = iwm_get_valid_tx_ant(sc);
4403 		nextant = 1<<(ffs(txant)-1);
4404 		txant &= ~nextant;
4405 #else
4406 		nextant = iwm_get_valid_tx_ant(sc);
4407 #endif
4408 		tab = iwm_rates[ridx].plcp;
4409 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4410 		if (IWM_RIDX_IS_CCK(ridx))
4411 			tab |= IWM_RATE_MCS_CCK_MSK;
4412 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4413 		    "station rate i=%d, rate=%d, hw=%x\n",
4414 		    i, iwm_rates[ridx].rate, tab);
4415 		lq->rs_table[i] = htole32(tab);
4416 	}
4417 	/* then fill the rest with the lowest possible rate */
4418 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4419 		KASSERT(tab != 0, ("invalid tab"));
4420 		lq->rs_table[i] = htole32(tab);
4421 	}
4422 }
4423 
4424 static void
4425 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4426 {
4427 	struct iwm_vap *ivp = IWM_VAP(vap);
4428 	int error;
4429 
4430 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4431 	sc->sc_tx_timer = 0;
4432 
4433 	ivp->iv_auth = 0;
4434 	if (sc->sc_firmware_state == 3) {
4435 		iwm_xmit_queue_drain(sc);
4436 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4437 		error = iwm_rm_sta(sc, vap, TRUE);
4438 		if (error) {
4439 			device_printf(sc->sc_dev,
4440 			    "%s: Failed to remove station: %d\n",
4441 			    __func__, error);
4442 		}
4443 	}
4444 	if (sc->sc_firmware_state == 3) {
4445 		error = iwm_mac_ctxt_changed(sc, vap);
4446 		if (error) {
4447 			device_printf(sc->sc_dev,
4448 			    "%s: Failed to change mac context: %d\n",
4449 			    __func__, error);
4450 		}
4451 	}
4452 	if (sc->sc_firmware_state == 3) {
4453 		error = iwm_sf_update(sc, vap, FALSE);
4454 		if (error) {
4455 			device_printf(sc->sc_dev,
4456 			    "%s: Failed to update smart FIFO: %d\n",
4457 			    __func__, error);
4458 		}
4459 	}
4460 	if (sc->sc_firmware_state == 3) {
4461 		error = iwm_rm_sta_id(sc, vap);
4462 		if (error) {
4463 			device_printf(sc->sc_dev,
4464 			    "%s: Failed to remove station id: %d\n",
4465 			    __func__, error);
4466 		}
4467 	}
4468 	if (sc->sc_firmware_state == 3) {
4469 		error = iwm_update_quotas(sc, NULL);
4470 		if (error) {
4471 			device_printf(sc->sc_dev,
4472 			    "%s: Failed to update PHY quota: %d\n",
4473 			    __func__, error);
4474 		}
4475 	}
4476 	if (sc->sc_firmware_state == 3) {
4477 		/* XXX Might need to specify bssid correctly. */
4478 		error = iwm_mac_ctxt_changed(sc, vap);
4479 		if (error) {
4480 			device_printf(sc->sc_dev,
4481 			    "%s: Failed to change mac context: %d\n",
4482 			    __func__, error);
4483 		}
4484 	}
4485 	if (sc->sc_firmware_state == 3) {
4486 		sc->sc_firmware_state = 2;
4487 	}
4488 	if (sc->sc_firmware_state > 1) {
4489 		error = iwm_binding_remove_vif(sc, ivp);
4490 		if (error) {
4491 			device_printf(sc->sc_dev,
4492 			    "%s: Failed to remove channel ctx: %d\n",
4493 			    __func__, error);
4494 		}
4495 	}
4496 	if (sc->sc_firmware_state > 1) {
4497 		sc->sc_firmware_state = 1;
4498 	}
4499 	ivp->phy_ctxt = NULL;
4500 	if (sc->sc_firmware_state > 0) {
4501 		error = iwm_mac_ctxt_changed(sc, vap);
4502 		if (error) {
4503 			device_printf(sc->sc_dev,
4504 			    "%s: Failed to change mac context: %d\n",
4505 			    __func__, error);
4506 		}
4507 	}
4508 	if (sc->sc_firmware_state > 0) {
4509 		error = iwm_power_update_mac(sc);
4510 		if (error != 0) {
4511 			device_printf(sc->sc_dev,
4512 			    "%s: failed to update power management\n",
4513 			    __func__);
4514 		}
4515 	}
4516 	sc->sc_firmware_state = 0;
4517 }
4518 
4519 static int
4520 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4521 {
4522 	struct iwm_vap *ivp = IWM_VAP(vap);
4523 	struct ieee80211com *ic = vap->iv_ic;
4524 	struct iwm_softc *sc = ic->ic_softc;
4525 	struct iwm_node *in;
4526 	int error;
4527 
4528 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4529 	    "switching state %s -> %s arg=0x%x\n",
4530 	    ieee80211_state_name[vap->iv_state],
4531 	    ieee80211_state_name[nstate],
4532 	    arg);
4533 
4534 	IEEE80211_UNLOCK(ic);
4535 	IWM_LOCK(sc);
4536 
4537 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4538 	    (nstate == IEEE80211_S_AUTH ||
4539 	     nstate == IEEE80211_S_ASSOC ||
4540 	     nstate == IEEE80211_S_RUN)) {
4541 		/* Stop blinking for a scan, when authenticating. */
4542 		iwm_led_blink_stop(sc);
4543 	}
4544 
4545 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4546 		iwm_led_disable(sc);
4547 		/* disable beacon filtering if we're hopping out of RUN */
4548 		iwm_disable_beacon_filter(sc);
4549 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4550 			in->in_assoc = 0;
4551 	}
4552 
4553 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4554 	     vap->iv_state == IEEE80211_S_ASSOC ||
4555 	     vap->iv_state == IEEE80211_S_RUN) &&
4556 	    (nstate == IEEE80211_S_INIT ||
4557 	     nstate == IEEE80211_S_SCAN ||
4558 	     nstate == IEEE80211_S_AUTH)) {
4559 		iwm_stop_session_protection(sc, ivp);
4560 	}
4561 
4562 	if ((vap->iv_state == IEEE80211_S_RUN ||
4563 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4564 	    nstate == IEEE80211_S_INIT) {
4565 		/*
4566 		 * In this case, iv_newstate() wants to send an 80211 frame on
4567 		 * the network that we are leaving. So we need to call it,
4568 		 * before tearing down all the firmware state.
4569 		 */
4570 		IWM_UNLOCK(sc);
4571 		IEEE80211_LOCK(ic);
4572 		ivp->iv_newstate(vap, nstate, arg);
4573 		IEEE80211_UNLOCK(ic);
4574 		IWM_LOCK(sc);
4575 		iwm_bring_down_firmware(sc, vap);
4576 		IWM_UNLOCK(sc);
4577 		IEEE80211_LOCK(ic);
4578 		return 0;
4579 	}
4580 
4581 	switch (nstate) {
4582 	case IEEE80211_S_INIT:
4583 	case IEEE80211_S_SCAN:
4584 		break;
4585 
4586 	case IEEE80211_S_AUTH:
4587 		iwm_bring_down_firmware(sc, vap);
4588 		if ((error = iwm_auth(vap, sc)) != 0) {
4589 			device_printf(sc->sc_dev,
4590 			    "%s: could not move to auth state: %d\n",
4591 			    __func__, error);
4592 			iwm_bring_down_firmware(sc, vap);
4593 			IWM_UNLOCK(sc);
4594 			IEEE80211_LOCK(ic);
4595 			return 1;
4596 		}
4597 		break;
4598 
4599 	case IEEE80211_S_ASSOC:
4600 		/*
4601 		 * EBS may be disabled due to previous failures reported by FW.
4602 		 * Reset EBS status here assuming environment has been changed.
4603 		 */
4604 		sc->last_ebs_successful = TRUE;
4605 		break;
4606 
4607 	case IEEE80211_S_RUN:
4608 		in = IWM_NODE(vap->iv_bss);
4609 		/* Update the association state, now we have it all */
4610 		/* (eg associd comes in at this point */
4611 		error = iwm_update_sta(sc, in);
4612 		if (error != 0) {
4613 			device_printf(sc->sc_dev,
4614 			    "%s: failed to update STA\n", __func__);
4615 			IWM_UNLOCK(sc);
4616 			IEEE80211_LOCK(ic);
4617 			return error;
4618 		}
4619 		in->in_assoc = 1;
4620 		error = iwm_mac_ctxt_changed(sc, vap);
4621 		if (error != 0) {
4622 			device_printf(sc->sc_dev,
4623 			    "%s: failed to update MAC: %d\n", __func__, error);
4624 		}
4625 
4626 		iwm_sf_update(sc, vap, FALSE);
4627 		iwm_enable_beacon_filter(sc, ivp);
4628 		iwm_power_update_mac(sc);
4629 		iwm_update_quotas(sc, ivp);
4630 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4631 		iwm_setrates(sc, in, rix);
4632 
4633 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4634 			device_printf(sc->sc_dev,
4635 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4636 		}
4637 
4638 		iwm_led_enable(sc);
4639 		break;
4640 
4641 	default:
4642 		break;
4643 	}
4644 	IWM_UNLOCK(sc);
4645 	IEEE80211_LOCK(ic);
4646 
4647 	return (ivp->iv_newstate(vap, nstate, arg));
4648 }
4649 
4650 void
4651 iwm_endscan_cb(void *arg, int pending)
4652 {
4653 	struct iwm_softc *sc = arg;
4654 	struct ieee80211com *ic = &sc->sc_ic;
4655 
4656 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4657 	    "%s: scan ended\n",
4658 	    __func__);
4659 
4660 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4661 }
4662 
4663 static int
4664 iwm_send_bt_init_conf(struct iwm_softc *sc)
4665 {
4666 	struct iwm_bt_coex_cmd bt_cmd;
4667 
4668 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4669 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4670 
4671 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4672 	    &bt_cmd);
4673 }
4674 
4675 static boolean_t
4676 iwm_is_lar_supported(struct iwm_softc *sc)
4677 {
4678 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4679 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4680 
4681 	if (iwm_lar_disable)
4682 		return FALSE;
4683 
4684 	/*
4685 	 * Enable LAR only if it is supported by the FW (TLV) &&
4686 	 * enabled in the NVM
4687 	 */
4688 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4689 		return nvm_lar && tlv_lar;
4690 	else
4691 		return tlv_lar;
4692 }
4693 
4694 static boolean_t
4695 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4696 {
4697 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4698 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4699 }
4700 
4701 static int
4702 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4703 {
4704 	struct iwm_mcc_update_cmd mcc_cmd;
4705 	struct iwm_host_cmd hcmd = {
4706 		.id = IWM_MCC_UPDATE_CMD,
4707 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4708 		.data = { &mcc_cmd },
4709 	};
4710 	int ret;
4711 #ifdef IWM_DEBUG
4712 	struct iwm_rx_packet *pkt;
4713 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4714 	struct iwm_mcc_update_resp *mcc_resp;
4715 	int n_channels;
4716 	uint16_t mcc;
4717 #endif
4718 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4719 
4720 	if (!iwm_is_lar_supported(sc)) {
4721 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4722 		    __func__);
4723 		return 0;
4724 	}
4725 
4726 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4727 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4728 	if (iwm_is_wifi_mcc_supported(sc))
4729 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4730 	else
4731 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4732 
4733 	if (resp_v2)
4734 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4735 	else
4736 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4737 
4738 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4739 	    "send MCC update to FW with '%c%c' src = %d\n",
4740 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4741 
4742 	ret = iwm_send_cmd(sc, &hcmd);
4743 	if (ret)
4744 		return ret;
4745 
4746 #ifdef IWM_DEBUG
4747 	pkt = hcmd.resp_pkt;
4748 
4749 	/* Extract MCC response */
4750 	if (resp_v2) {
4751 		mcc_resp = (void *)pkt->data;
4752 		mcc = mcc_resp->mcc;
4753 		n_channels =  le32toh(mcc_resp->n_channels);
4754 	} else {
4755 		mcc_resp_v1 = (void *)pkt->data;
4756 		mcc = mcc_resp_v1->mcc;
4757 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4758 	}
4759 
4760 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4761 	if (mcc == 0)
4762 		mcc = 0x3030;  /* "00" - world */
4763 
4764 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4765 	    "regulatory domain '%c%c' (%d channels available)\n",
4766 	    mcc >> 8, mcc & 0xff, n_channels);
4767 #endif
4768 	iwm_free_resp(sc, &hcmd);
4769 
4770 	return 0;
4771 }
4772 
4773 static void
4774 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4775 {
4776 	struct iwm_host_cmd cmd = {
4777 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4778 		.len = { sizeof(uint32_t), },
4779 		.data = { &backoff, },
4780 	};
4781 
4782 	if (iwm_send_cmd(sc, &cmd) != 0) {
4783 		device_printf(sc->sc_dev,
4784 		    "failed to change thermal tx backoff\n");
4785 	}
4786 }
4787 
4788 static int
4789 iwm_init_hw(struct iwm_softc *sc)
4790 {
4791 	struct ieee80211com *ic = &sc->sc_ic;
4792 	int error, i, ac;
4793 
4794 	sc->sf_state = IWM_SF_UNINIT;
4795 
4796 	if ((error = iwm_start_hw(sc)) != 0) {
4797 		printf("iwm_start_hw: failed %d\n", error);
4798 		return error;
4799 	}
4800 
4801 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4802 		printf("iwm_run_init_ucode: failed %d\n", error);
4803 		return error;
4804 	}
4805 
4806 	/*
4807 	 * should stop and start HW since that INIT
4808 	 * image just loaded
4809 	 */
4810 	iwm_stop_device(sc);
4811 	sc->sc_ps_disabled = FALSE;
4812 	if ((error = iwm_start_hw(sc)) != 0) {
4813 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4814 		return error;
4815 	}
4816 
4817 	/* omstart, this time with the regular firmware */
4818 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4819 	if (error) {
4820 		device_printf(sc->sc_dev, "could not load firmware\n");
4821 		goto error;
4822 	}
4823 
4824 	error = iwm_sf_update(sc, NULL, FALSE);
4825 	if (error)
4826 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4827 
4828 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4829 		device_printf(sc->sc_dev, "bt init conf failed\n");
4830 		goto error;
4831 	}
4832 
4833 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4834 	if (error != 0) {
4835 		device_printf(sc->sc_dev, "antenna config failed\n");
4836 		goto error;
4837 	}
4838 
4839 	/* Send phy db control command and then phy db calibration */
4840 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4841 		goto error;
4842 
4843 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4844 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4845 		goto error;
4846 	}
4847 
4848 	/* Add auxiliary station for scanning */
4849 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4850 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4851 		goto error;
4852 	}
4853 
4854 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4855 		/*
4856 		 * The channel used here isn't relevant as it's
4857 		 * going to be overwritten in the other flows.
4858 		 * For now use the first channel we have.
4859 		 */
4860 		if ((error = iwm_phy_ctxt_add(sc,
4861 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4862 			goto error;
4863 	}
4864 
4865 	/* Initialize tx backoffs to the minimum. */
4866 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4867 		iwm_tt_tx_backoff(sc, 0);
4868 
4869 	if (iwm_config_ltr(sc) != 0)
4870 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4871 
4872 	error = iwm_power_update_device(sc);
4873 	if (error)
4874 		goto error;
4875 
4876 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4877 		goto error;
4878 
4879 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4880 		if ((error = iwm_config_umac_scan(sc)) != 0)
4881 			goto error;
4882 	}
4883 
4884 	/* Enable Tx queues. */
4885 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4886 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4887 		    iwm_ac_to_tx_fifo[ac]);
4888 		if (error)
4889 			goto error;
4890 	}
4891 
4892 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4893 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4894 		goto error;
4895 	}
4896 
4897 	return 0;
4898 
4899  error:
4900 	iwm_stop_device(sc);
4901 	return error;
4902 }
4903 
4904 /* Allow multicast from our BSSID. */
4905 static int
4906 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4907 {
4908 	struct ieee80211_node *ni = vap->iv_bss;
4909 	struct iwm_mcast_filter_cmd *cmd;
4910 	size_t size;
4911 	int error;
4912 
4913 	size = roundup(sizeof(*cmd), 4);
4914 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4915 	if (cmd == NULL)
4916 		return ENOMEM;
4917 	cmd->filter_own = 1;
4918 	cmd->port_id = 0;
4919 	cmd->count = 0;
4920 	cmd->pass_all = 1;
4921 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4922 
4923 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4924 	    IWM_CMD_SYNC, size, cmd);
4925 	free(cmd, M_DEVBUF);
4926 
4927 	return (error);
4928 }
4929 
4930 /*
4931  * ifnet interfaces
4932  */
4933 
4934 static void
4935 iwm_init(struct iwm_softc *sc)
4936 {
4937 	int error;
4938 
4939 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4940 		return;
4941 	}
4942 	sc->sc_generation++;
4943 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4944 
4945 	if ((error = iwm_init_hw(sc)) != 0) {
4946 		printf("iwm_init_hw failed %d\n", error);
4947 		iwm_stop(sc);
4948 		return;
4949 	}
4950 
4951 	/*
4952 	 * Ok, firmware loaded and we are jogging
4953 	 */
4954 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4955 }
4956 
4957 static int
4958 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4959 {
4960 	struct iwm_softc *sc;
4961 	int error;
4962 
4963 	sc = ic->ic_softc;
4964 
4965 	IWM_LOCK(sc);
4966 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4967 		IWM_UNLOCK(sc);
4968 		return (ENXIO);
4969 	}
4970 	error = mbufq_enqueue(&sc->sc_snd, m);
4971 	if (error) {
4972 		IWM_UNLOCK(sc);
4973 		return (error);
4974 	}
4975 	iwm_start(sc);
4976 	IWM_UNLOCK(sc);
4977 	return (0);
4978 }
4979 
4980 /*
4981  * Dequeue packets from sendq and call send.
4982  */
4983 static void
4984 iwm_start(struct iwm_softc *sc)
4985 {
4986 	struct ieee80211_node *ni;
4987 	struct mbuf *m;
4988 	int ac = 0;
4989 
4990 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4991 	while (sc->qfullmsk == 0 &&
4992 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4993 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4994 		if (iwm_tx(sc, m, ni, ac) != 0) {
4995 			if_inc_counter(ni->ni_vap->iv_ifp,
4996 			    IFCOUNTER_OERRORS, 1);
4997 			ieee80211_free_node(ni);
4998 			continue;
4999 		}
5000 		if (sc->sc_tx_timer == 0) {
5001 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5002 			    sc);
5003 		}
5004 		sc->sc_tx_timer = 15;
5005 	}
5006 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5007 }
5008 
5009 static void
5010 iwm_stop(struct iwm_softc *sc)
5011 {
5012 
5013 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5014 	sc->sc_flags |= IWM_FLAG_STOPPED;
5015 	sc->sc_generation++;
5016 	iwm_led_blink_stop(sc);
5017 	sc->sc_tx_timer = 0;
5018 	iwm_stop_device(sc);
5019 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5020 }
5021 
5022 static void
5023 iwm_watchdog(void *arg)
5024 {
5025 	struct iwm_softc *sc = arg;
5026 	struct ieee80211com *ic = &sc->sc_ic;
5027 
5028 	if (sc->sc_attached == 0)
5029 		return;
5030 
5031 	if (sc->sc_tx_timer > 0) {
5032 		if (--sc->sc_tx_timer == 0) {
5033 			device_printf(sc->sc_dev, "device timeout\n");
5034 #ifdef IWM_DEBUG
5035 			iwm_nic_error(sc);
5036 #endif
5037 			ieee80211_restart_all(ic);
5038 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5039 			return;
5040 		}
5041 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5042 	}
5043 }
5044 
5045 static void
5046 iwm_parent(struct ieee80211com *ic)
5047 {
5048 	struct iwm_softc *sc = ic->ic_softc;
5049 	int startall = 0;
5050 	int rfkill = 0;
5051 
5052 	IWM_LOCK(sc);
5053 	if (ic->ic_nrunning > 0) {
5054 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5055 			iwm_init(sc);
5056 			rfkill = iwm_check_rfkill(sc);
5057 			if (!rfkill)
5058 				startall = 1;
5059 		}
5060 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5061 		iwm_stop(sc);
5062 	IWM_UNLOCK(sc);
5063 	if (startall)
5064 		ieee80211_start_all(ic);
5065 	else if (rfkill)
5066 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5067 }
5068 
5069 static void
5070 iwm_rftoggle_task(void *arg, int npending __unused)
5071 {
5072 	struct iwm_softc *sc = arg;
5073 	struct ieee80211com *ic = &sc->sc_ic;
5074 	int rfkill;
5075 
5076 	IWM_LOCK(sc);
5077 	rfkill = iwm_check_rfkill(sc);
5078 	IWM_UNLOCK(sc);
5079 	if (rfkill) {
5080 		device_printf(sc->sc_dev,
5081 		    "%s: rfkill switch, disabling interface\n", __func__);
5082 		ieee80211_suspend_all(ic);
5083 		ieee80211_notify_radio(ic, 0);
5084 	} else {
5085 		device_printf(sc->sc_dev,
5086 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
5087 		ieee80211_resume_all(ic);
5088 		ieee80211_notify_radio(ic, 1);
5089 	}
5090 }
5091 
5092 /*
5093  * The interrupt side of things
5094  */
5095 
5096 /*
5097  * error dumping routines are from iwlwifi/mvm/utils.c
5098  */
5099 
5100 /*
5101  * Note: This structure is read from the device with IO accesses,
5102  * and the reading already does the endian conversion. As it is
5103  * read with uint32_t-sized accesses, any members with a different size
5104  * need to be ordered correctly though!
5105  */
5106 struct iwm_error_event_table {
5107 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5108 	uint32_t error_id;		/* type of error */
5109 	uint32_t trm_hw_status0;	/* TRM HW status */
5110 	uint32_t trm_hw_status1;	/* TRM HW status */
5111 	uint32_t blink2;		/* branch link */
5112 	uint32_t ilink1;		/* interrupt link */
5113 	uint32_t ilink2;		/* interrupt link */
5114 	uint32_t data1;		/* error-specific data */
5115 	uint32_t data2;		/* error-specific data */
5116 	uint32_t data3;		/* error-specific data */
5117 	uint32_t bcon_time;		/* beacon timer */
5118 	uint32_t tsf_low;		/* network timestamp function timer */
5119 	uint32_t tsf_hi;		/* network timestamp function timer */
5120 	uint32_t gp1;		/* GP1 timer register */
5121 	uint32_t gp2;		/* GP2 timer register */
5122 	uint32_t fw_rev_type;	/* firmware revision type */
5123 	uint32_t major;		/* uCode version major */
5124 	uint32_t minor;		/* uCode version minor */
5125 	uint32_t hw_ver;		/* HW Silicon version */
5126 	uint32_t brd_ver;		/* HW board version */
5127 	uint32_t log_pc;		/* log program counter */
5128 	uint32_t frame_ptr;		/* frame pointer */
5129 	uint32_t stack_ptr;		/* stack pointer */
5130 	uint32_t hcmd;		/* last host command header */
5131 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5132 				 * rxtx_flag */
5133 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5134 				 * host_flag */
5135 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5136 				 * enc_flag */
5137 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5138 				 * time_flag */
5139 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5140 				 * wico interrupt */
5141 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5142 	uint32_t wait_event;		/* wait event() caller address */
5143 	uint32_t l2p_control;	/* L2pControlField */
5144 	uint32_t l2p_duration;	/* L2pDurationField */
5145 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5146 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5147 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5148 				 * (LMPM_PMG_SEL) */
5149 	uint32_t u_timestamp;	/* indicate when the date and time of the
5150 				 * compilation */
5151 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5152 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5153 
5154 /*
5155  * UMAC error struct - relevant starting from family 8000 chip.
5156  * Note: This structure is read from the device with IO accesses,
5157  * and the reading already does the endian conversion. As it is
5158  * read with u32-sized accesses, any members with a different size
5159  * need to be ordered correctly though!
5160  */
5161 struct iwm_umac_error_event_table {
5162 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5163 	uint32_t error_id;	/* type of error */
5164 	uint32_t blink1;	/* branch link */
5165 	uint32_t blink2;	/* branch link */
5166 	uint32_t ilink1;	/* interrupt link */
5167 	uint32_t ilink2;	/* interrupt link */
5168 	uint32_t data1;		/* error-specific data */
5169 	uint32_t data2;		/* error-specific data */
5170 	uint32_t data3;		/* error-specific data */
5171 	uint32_t umac_major;
5172 	uint32_t umac_minor;
5173 	uint32_t frame_pointer;	/* core register 27*/
5174 	uint32_t stack_pointer;	/* core register 28 */
5175 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5176 	uint32_t nic_isr_pref;	/* ISR status register */
5177 } __packed;
5178 
5179 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5180 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5181 
5182 #ifdef IWM_DEBUG
5183 struct {
5184 	const char *name;
5185 	uint8_t num;
5186 } advanced_lookup[] = {
5187 	{ "NMI_INTERRUPT_WDG", 0x34 },
5188 	{ "SYSASSERT", 0x35 },
5189 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5190 	{ "BAD_COMMAND", 0x38 },
5191 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5192 	{ "FATAL_ERROR", 0x3D },
5193 	{ "NMI_TRM_HW_ERR", 0x46 },
5194 	{ "NMI_INTERRUPT_TRM", 0x4C },
5195 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5196 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5197 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5198 	{ "NMI_INTERRUPT_HOST", 0x66 },
5199 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5200 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5201 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5202 	{ "ADVANCED_SYSASSERT", 0 },
5203 };
5204 
5205 static const char *
5206 iwm_desc_lookup(uint32_t num)
5207 {
5208 	int i;
5209 
5210 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5211 		if (advanced_lookup[i].num == num)
5212 			return advanced_lookup[i].name;
5213 
5214 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5215 	return advanced_lookup[i].name;
5216 }
5217 
5218 static void
5219 iwm_nic_umac_error(struct iwm_softc *sc)
5220 {
5221 	struct iwm_umac_error_event_table table;
5222 	uint32_t base;
5223 
5224 	base = sc->umac_error_event_table;
5225 
5226 	if (base < 0x800000) {
5227 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5228 		    base);
5229 		return;
5230 	}
5231 
5232 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5233 		device_printf(sc->sc_dev, "reading errlog failed\n");
5234 		return;
5235 	}
5236 
5237 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5238 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5239 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5240 		    sc->sc_flags, table.valid);
5241 	}
5242 
5243 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5244 		iwm_desc_lookup(table.error_id));
5245 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5246 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5247 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5248 	    table.ilink1);
5249 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5250 	    table.ilink2);
5251 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5252 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5253 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5254 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5255 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5256 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5257 	    table.frame_pointer);
5258 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5259 	    table.stack_pointer);
5260 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5261 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5262 	    table.nic_isr_pref);
5263 }
5264 
5265 /*
5266  * Support for dumping the error log seemed like a good idea ...
5267  * but it's mostly hex junk and the only sensible thing is the
5268  * hw/ucode revision (which we know anyway).  Since it's here,
5269  * I'll just leave it in, just in case e.g. the Intel guys want to
5270  * help us decipher some "ADVANCED_SYSASSERT" later.
5271  */
5272 static void
5273 iwm_nic_error(struct iwm_softc *sc)
5274 {
5275 	struct iwm_error_event_table table;
5276 	uint32_t base;
5277 
5278 	device_printf(sc->sc_dev, "dumping device error log\n");
5279 	base = sc->error_event_table[0];
5280 	if (base < 0x800000) {
5281 		device_printf(sc->sc_dev,
5282 		    "Invalid error log pointer 0x%08x\n", base);
5283 		return;
5284 	}
5285 
5286 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5287 		device_printf(sc->sc_dev, "reading errlog failed\n");
5288 		return;
5289 	}
5290 
5291 	if (!table.valid) {
5292 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5293 		return;
5294 	}
5295 
5296 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5297 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5298 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5299 		    sc->sc_flags, table.valid);
5300 	}
5301 
5302 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5303 	    iwm_desc_lookup(table.error_id));
5304 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5305 	    table.trm_hw_status0);
5306 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5307 	    table.trm_hw_status1);
5308 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5309 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5310 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5311 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5312 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5313 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5314 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5315 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5316 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5317 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5318 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5319 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5320 	    table.fw_rev_type);
5321 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5322 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5323 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5324 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5325 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5326 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5327 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5328 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5329 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5330 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5331 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5332 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5333 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5334 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5335 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5336 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5337 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5338 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5339 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5340 
5341 	if (sc->umac_error_event_table)
5342 		iwm_nic_umac_error(sc);
5343 }
5344 #endif
5345 
5346 static void
5347 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5348 {
5349 	struct ieee80211com *ic = &sc->sc_ic;
5350 	struct iwm_cmd_response *cresp;
5351 	struct mbuf *m1;
5352 	uint32_t offset = 0;
5353 	uint32_t maxoff = IWM_RBUF_SIZE;
5354 	uint32_t nextoff;
5355 	boolean_t stolen = FALSE;
5356 
5357 #define HAVEROOM(a)	\
5358     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5359 
5360 	while (HAVEROOM(offset)) {
5361 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5362 		    offset);
5363 		int qid, idx, code, len;
5364 
5365 		qid = pkt->hdr.qid;
5366 		idx = pkt->hdr.idx;
5367 
5368 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5369 
5370 		/*
5371 		 * randomly get these from the firmware, no idea why.
5372 		 * they at least seem harmless, so just ignore them for now
5373 		 */
5374 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5375 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5376 			break;
5377 		}
5378 
5379 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5380 		    "rx packet qid=%d idx=%d type=%x\n",
5381 		    qid & ~0x80, pkt->hdr.idx, code);
5382 
5383 		len = iwm_rx_packet_len(pkt);
5384 		len += sizeof(uint32_t); /* account for status word */
5385 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5386 
5387 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5388 
5389 		switch (code) {
5390 		case IWM_REPLY_RX_PHY_CMD:
5391 			iwm_rx_rx_phy_cmd(sc, pkt);
5392 			break;
5393 
5394 		case IWM_REPLY_RX_MPDU_CMD: {
5395 			/*
5396 			 * If this is the last frame in the RX buffer, we
5397 			 * can directly feed the mbuf to the sharks here.
5398 			 */
5399 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5400 			    struct iwm_rx_packet *, nextoff);
5401 			if (!HAVEROOM(nextoff) ||
5402 			    (nextpkt->hdr.code == 0 &&
5403 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5404 			     nextpkt->hdr.idx == 0) ||
5405 			    (nextpkt->len_n_flags ==
5406 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5407 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5408 					stolen = FALSE;
5409 					/* Make sure we abort the loop */
5410 					nextoff = maxoff;
5411 				}
5412 				break;
5413 			}
5414 
5415 			/*
5416 			 * Use m_copym instead of m_split, because that
5417 			 * makes it easier to keep a valid rx buffer in
5418 			 * the ring, when iwm_rx_mpdu() fails.
5419 			 *
5420 			 * We need to start m_copym() at offset 0, to get the
5421 			 * M_PKTHDR flag preserved.
5422 			 */
5423 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5424 			if (m1) {
5425 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5426 					stolen = TRUE;
5427 				else
5428 					m_freem(m1);
5429 			}
5430 			break;
5431 		}
5432 
5433 		case IWM_TX_CMD:
5434 			iwm_rx_tx_cmd(sc, pkt);
5435 			break;
5436 
5437 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5438 			struct iwm_missed_beacons_notif *resp;
5439 			int missed;
5440 
5441 			/* XXX look at mac_id to determine interface ID */
5442 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5443 
5444 			resp = (void *)pkt->data;
5445 			missed = le32toh(resp->consec_missed_beacons);
5446 
5447 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5448 			    "%s: MISSED_BEACON: mac_id=%d, "
5449 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5450 			    "num_rx=%d\n",
5451 			    __func__,
5452 			    le32toh(resp->mac_id),
5453 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5454 			    le32toh(resp->consec_missed_beacons),
5455 			    le32toh(resp->num_expected_beacons),
5456 			    le32toh(resp->num_recvd_beacons));
5457 
5458 			/* Be paranoid */
5459 			if (vap == NULL)
5460 				break;
5461 
5462 			/* XXX no net80211 locking? */
5463 			if (vap->iv_state == IEEE80211_S_RUN &&
5464 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5465 				if (missed > vap->iv_bmissthreshold) {
5466 					/* XXX bad locking; turn into task */
5467 					IWM_UNLOCK(sc);
5468 					ieee80211_beacon_miss(ic);
5469 					IWM_LOCK(sc);
5470 				}
5471 			}
5472 
5473 			break;
5474 		}
5475 
5476 		case IWM_MFUART_LOAD_NOTIFICATION:
5477 			break;
5478 
5479 		case IWM_ALIVE:
5480 			break;
5481 
5482 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5483 			break;
5484 
5485 		case IWM_STATISTICS_NOTIFICATION:
5486 			iwm_handle_rx_statistics(sc, pkt);
5487 			break;
5488 
5489 		case IWM_NVM_ACCESS_CMD:
5490 		case IWM_MCC_UPDATE_CMD:
5491 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5492 				memcpy(sc->sc_cmd_resp,
5493 				    pkt, sizeof(sc->sc_cmd_resp));
5494 			}
5495 			break;
5496 
5497 		case IWM_MCC_CHUB_UPDATE_CMD: {
5498 			struct iwm_mcc_chub_notif *notif;
5499 			notif = (void *)pkt->data;
5500 
5501 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5502 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5503 			sc->sc_fw_mcc[2] = '\0';
5504 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5505 			    "fw source %d sent CC '%s'\n",
5506 			    notif->source_id, sc->sc_fw_mcc);
5507 			break;
5508 		}
5509 
5510 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5511 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5512 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5513 			struct iwm_dts_measurement_notif_v1 *notif;
5514 
5515 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5516 				device_printf(sc->sc_dev,
5517 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5518 				break;
5519 			}
5520 			notif = (void *)pkt->data;
5521 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5522 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5523 			    notif->temp);
5524 			break;
5525 		}
5526 
5527 		case IWM_PHY_CONFIGURATION_CMD:
5528 		case IWM_TX_ANT_CONFIGURATION_CMD:
5529 		case IWM_ADD_STA:
5530 		case IWM_MAC_CONTEXT_CMD:
5531 		case IWM_REPLY_SF_CFG_CMD:
5532 		case IWM_POWER_TABLE_CMD:
5533 		case IWM_LTR_CONFIG:
5534 		case IWM_PHY_CONTEXT_CMD:
5535 		case IWM_BINDING_CONTEXT_CMD:
5536 		case IWM_TIME_EVENT_CMD:
5537 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5538 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5539 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5540 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5541 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5542 		case IWM_REPLY_BEACON_FILTERING_CMD:
5543 		case IWM_MAC_PM_POWER_TABLE:
5544 		case IWM_TIME_QUOTA_CMD:
5545 		case IWM_REMOVE_STA:
5546 		case IWM_TXPATH_FLUSH:
5547 		case IWM_LQ_CMD:
5548 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5549 				 IWM_FW_PAGING_BLOCK_CMD):
5550 		case IWM_BT_CONFIG:
5551 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5552 			cresp = (void *)pkt->data;
5553 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5554 				memcpy(sc->sc_cmd_resp,
5555 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5556 			}
5557 			break;
5558 
5559 		/* ignore */
5560 		case IWM_PHY_DB_CMD:
5561 			break;
5562 
5563 		case IWM_INIT_COMPLETE_NOTIF:
5564 			break;
5565 
5566 		case IWM_SCAN_OFFLOAD_COMPLETE:
5567 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5568 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5569 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5570 				ieee80211_runtask(ic, &sc->sc_es_task);
5571 			}
5572 			break;
5573 
5574 		case IWM_SCAN_ITERATION_COMPLETE: {
5575 			struct iwm_lmac_scan_complete_notif *notif;
5576 			notif = (void *)pkt->data;
5577 			break;
5578 		}
5579 
5580 		case IWM_SCAN_COMPLETE_UMAC:
5581 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5582 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5583 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5584 				ieee80211_runtask(ic, &sc->sc_es_task);
5585 			}
5586 			break;
5587 
5588 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5589 			struct iwm_umac_scan_iter_complete_notif *notif;
5590 			notif = (void *)pkt->data;
5591 
5592 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5593 			    "complete, status=0x%x, %d channels scanned\n",
5594 			    notif->status, notif->scanned_channels);
5595 			break;
5596 		}
5597 
5598 		case IWM_REPLY_ERROR: {
5599 			struct iwm_error_resp *resp;
5600 			resp = (void *)pkt->data;
5601 
5602 			device_printf(sc->sc_dev,
5603 			    "firmware error 0x%x, cmd 0x%x\n",
5604 			    le32toh(resp->error_type),
5605 			    resp->cmd_id);
5606 			break;
5607 		}
5608 
5609 		case IWM_TIME_EVENT_NOTIFICATION:
5610 			iwm_rx_time_event_notif(sc, pkt);
5611 			break;
5612 
5613 		/*
5614 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5615 		 * messages. Just ignore them for now.
5616 		 */
5617 		case IWM_DEBUG_LOG_MSG:
5618 			break;
5619 
5620 		case IWM_MCAST_FILTER_CMD:
5621 			break;
5622 
5623 		case IWM_SCD_QUEUE_CFG: {
5624 			struct iwm_scd_txq_cfg_rsp *rsp;
5625 			rsp = (void *)pkt->data;
5626 
5627 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5628 			    "queue cfg token=0x%x sta_id=%d "
5629 			    "tid=%d scd_queue=%d\n",
5630 			    rsp->token, rsp->sta_id, rsp->tid,
5631 			    rsp->scd_queue);
5632 			break;
5633 		}
5634 
5635 		default:
5636 			device_printf(sc->sc_dev,
5637 			    "code %x, frame %d/%d %x unhandled\n",
5638 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5639 			break;
5640 		}
5641 
5642 		/*
5643 		 * Why test bit 0x80?  The Linux driver:
5644 		 *
5645 		 * There is one exception:  uCode sets bit 15 when it
5646 		 * originates the response/notification, i.e. when the
5647 		 * response/notification is not a direct response to a
5648 		 * command sent by the driver.  For example, uCode issues
5649 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5650 		 * it is not a direct response to any driver command.
5651 		 *
5652 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5653 		 * uses a slightly different format for pkt->hdr, and "qid"
5654 		 * is actually the upper byte of a two-byte field.
5655 		 */
5656 		if (!(qid & (1 << 7)))
5657 			iwm_cmd_done(sc, pkt);
5658 
5659 		offset = nextoff;
5660 	}
5661 	if (stolen)
5662 		m_freem(m);
5663 #undef HAVEROOM
5664 }
5665 
5666 /*
5667  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5668  * Basic structure from if_iwn
5669  */
5670 static void
5671 iwm_notif_intr(struct iwm_softc *sc)
5672 {
5673 	int count;
5674 	uint32_t wreg;
5675 	uint16_t hw;
5676 
5677 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5678 	    BUS_DMASYNC_POSTREAD);
5679 
5680 	if (sc->cfg->mqrx_supported) {
5681 		count = IWM_RX_MQ_RING_COUNT;
5682 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5683 	} else {
5684 		count = IWM_RX_LEGACY_RING_COUNT;
5685 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5686 	}
5687 
5688 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5689 
5690 	/*
5691 	 * Process responses
5692 	 */
5693 	while (sc->rxq.cur != hw) {
5694 		struct iwm_rx_ring *ring = &sc->rxq;
5695 		struct iwm_rx_data *data = &ring->data[ring->cur];
5696 
5697 		bus_dmamap_sync(ring->data_dmat, data->map,
5698 		    BUS_DMASYNC_POSTREAD);
5699 
5700 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5701 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5702 		iwm_handle_rxb(sc, data->m);
5703 
5704 		ring->cur = (ring->cur + 1) % count;
5705 	}
5706 
5707 	/*
5708 	 * Tell the firmware that it can reuse the ring entries that
5709 	 * we have just processed.
5710 	 * Seems like the hardware gets upset unless we align
5711 	 * the write by 8??
5712 	 */
5713 	hw = (hw == 0) ? count - 1 : hw - 1;
5714 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5715 }
5716 
5717 static void
5718 iwm_intr(void *arg)
5719 {
5720 	struct iwm_softc *sc = arg;
5721 	int handled = 0;
5722 	int r1, r2, rv = 0;
5723 	int isperiodic = 0;
5724 
5725 	IWM_LOCK(sc);
5726 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5727 
5728 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5729 		uint32_t *ict = sc->ict_dma.vaddr;
5730 		int tmp;
5731 
5732 		tmp = htole32(ict[sc->ict_cur]);
5733 		if (!tmp)
5734 			goto out_ena;
5735 
5736 		/*
5737 		 * ok, there was something.  keep plowing until we have all.
5738 		 */
5739 		r1 = r2 = 0;
5740 		while (tmp) {
5741 			r1 |= tmp;
5742 			ict[sc->ict_cur] = 0;
5743 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5744 			tmp = htole32(ict[sc->ict_cur]);
5745 		}
5746 
5747 		/* this is where the fun begins.  don't ask */
5748 		if (r1 == 0xffffffff)
5749 			r1 = 0;
5750 
5751 		/* i am not expected to understand this */
5752 		if (r1 & 0xc0000)
5753 			r1 |= 0x8000;
5754 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5755 	} else {
5756 		r1 = IWM_READ(sc, IWM_CSR_INT);
5757 		/* "hardware gone" (where, fishing?) */
5758 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5759 			goto out;
5760 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5761 	}
5762 	if (r1 == 0 && r2 == 0) {
5763 		goto out_ena;
5764 	}
5765 
5766 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5767 
5768 	/* Safely ignore these bits for debug checks below */
5769 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5770 
5771 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5772 		int i;
5773 		struct ieee80211com *ic = &sc->sc_ic;
5774 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5775 
5776 #ifdef IWM_DEBUG
5777 		iwm_nic_error(sc);
5778 #endif
5779 		/* Dump driver status (TX and RX rings) while we're here. */
5780 		device_printf(sc->sc_dev, "driver status:\n");
5781 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5782 			struct iwm_tx_ring *ring = &sc->txq[i];
5783 			device_printf(sc->sc_dev,
5784 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5785 			    "queued=%-3d\n",
5786 			    i, ring->qid, ring->cur, ring->queued);
5787 		}
5788 		device_printf(sc->sc_dev,
5789 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5790 		device_printf(sc->sc_dev,
5791 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5792 
5793 		/* Reset our firmware state tracking. */
5794 		sc->sc_firmware_state = 0;
5795 		/* Don't stop the device; just do a VAP restart */
5796 		IWM_UNLOCK(sc);
5797 
5798 		if (vap == NULL) {
5799 			printf("%s: null vap\n", __func__);
5800 			return;
5801 		}
5802 
5803 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5804 		    "restarting\n", __func__, vap->iv_state);
5805 
5806 		ieee80211_restart_all(ic);
5807 		return;
5808 	}
5809 
5810 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5811 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5812 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5813 		iwm_stop(sc);
5814 		rv = 1;
5815 		goto out;
5816 	}
5817 
5818 	/* firmware chunk loaded */
5819 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5820 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5821 		handled |= IWM_CSR_INT_BIT_FH_TX;
5822 		sc->sc_fw_chunk_done = 1;
5823 		wakeup(&sc->sc_fw);
5824 	}
5825 
5826 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5827 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5828 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5829 	}
5830 
5831 	/*
5832 	 * The Linux driver uses periodic interrupts to avoid races.
5833 	 * We cargo-cult like it's going out of fashion.
5834 	 */
5835 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5836 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5837 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5838 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5839 			IWM_WRITE_1(sc,
5840 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5841 		isperiodic = 1;
5842 	}
5843 
5844 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5845 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5846 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5847 
5848 		iwm_notif_intr(sc);
5849 
5850 		/* enable periodic interrupt, see above */
5851 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5852 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5853 			    IWM_CSR_INT_PERIODIC_ENA);
5854 	}
5855 
5856 	if (__predict_false(r1 & ~handled))
5857 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5858 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5859 	rv = 1;
5860 
5861  out_ena:
5862 	iwm_restore_interrupts(sc);
5863  out:
5864 	IWM_UNLOCK(sc);
5865 	return;
5866 }
5867 
5868 /*
5869  * Autoconf glue-sniffing
5870  */
5871 #define	PCI_VENDOR_INTEL		0x8086
5872 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5873 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5874 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5875 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5876 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5877 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5878 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5879 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5880 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5881 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5882 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5883 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5884 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5885 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5886 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5887 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5888 
5889 static const struct iwm_devices {
5890 	uint16_t		device;
5891 	const struct iwm_cfg	*cfg;
5892 } iwm_devices[] = {
5893 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5894 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5895 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5896 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5897 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5898 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5899 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5900 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5901 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5902 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5903 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5904 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5905 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5906 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5907 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5908 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5909 };
5910 
5911 static int
5912 iwm_probe(device_t dev)
5913 {
5914 	int i;
5915 
5916 	for (i = 0; i < nitems(iwm_devices); i++) {
5917 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5918 		    pci_get_device(dev) == iwm_devices[i].device) {
5919 			device_set_desc(dev, iwm_devices[i].cfg->name);
5920 			return (BUS_PROBE_DEFAULT);
5921 		}
5922 	}
5923 
5924 	return (ENXIO);
5925 }
5926 
5927 static int
5928 iwm_dev_check(device_t dev)
5929 {
5930 	struct iwm_softc *sc;
5931 	uint16_t devid;
5932 	int i;
5933 
5934 	sc = device_get_softc(dev);
5935 
5936 	devid = pci_get_device(dev);
5937 	for (i = 0; i < nitems(iwm_devices); i++) {
5938 		if (iwm_devices[i].device == devid) {
5939 			sc->cfg = iwm_devices[i].cfg;
5940 			return (0);
5941 		}
5942 	}
5943 	device_printf(dev, "unknown adapter type\n");
5944 	return ENXIO;
5945 }
5946 
5947 /* PCI registers */
5948 #define PCI_CFG_RETRY_TIMEOUT	0x041
5949 
5950 static int
5951 iwm_pci_attach(device_t dev)
5952 {
5953 	struct iwm_softc *sc;
5954 	int count, error, rid;
5955 	uint16_t reg;
5956 
5957 	sc = device_get_softc(dev);
5958 
5959 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5960 	 * PCI Tx retries from interfering with C3 CPU state */
5961 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5962 
5963 	/* Enable bus-mastering and hardware bug workaround. */
5964 	pci_enable_busmaster(dev);
5965 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5966 	/* if !MSI */
5967 	if (reg & PCIM_STATUS_INTxSTATE) {
5968 		reg &= ~PCIM_STATUS_INTxSTATE;
5969 	}
5970 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5971 
5972 	rid = PCIR_BAR(0);
5973 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5974 	    RF_ACTIVE);
5975 	if (sc->sc_mem == NULL) {
5976 		device_printf(sc->sc_dev, "can't map mem space\n");
5977 		return (ENXIO);
5978 	}
5979 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5980 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5981 
5982 	/* Install interrupt handler. */
5983 	count = 1;
5984 	rid = 0;
5985 	if (pci_alloc_msi(dev, &count) == 0)
5986 		rid = 1;
5987 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5988 	    (rid != 0 ? 0 : RF_SHAREABLE));
5989 	if (sc->sc_irq == NULL) {
5990 		device_printf(dev, "can't map interrupt\n");
5991 			return (ENXIO);
5992 	}
5993 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5994 	    NULL, iwm_intr, sc, &sc->sc_ih);
5995 	if (sc->sc_ih == NULL) {
5996 		device_printf(dev, "can't establish interrupt");
5997 			return (ENXIO);
5998 	}
5999 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6000 
6001 	return (0);
6002 }
6003 
6004 static void
6005 iwm_pci_detach(device_t dev)
6006 {
6007 	struct iwm_softc *sc = device_get_softc(dev);
6008 
6009 	if (sc->sc_irq != NULL) {
6010 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6011 		bus_release_resource(dev, SYS_RES_IRQ,
6012 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6013 		pci_release_msi(dev);
6014         }
6015 	if (sc->sc_mem != NULL)
6016 		bus_release_resource(dev, SYS_RES_MEMORY,
6017 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6018 }
6019 
6020 static int
6021 iwm_attach(device_t dev)
6022 {
6023 	struct iwm_softc *sc = device_get_softc(dev);
6024 	struct ieee80211com *ic = &sc->sc_ic;
6025 	int error;
6026 	int txq_i, i;
6027 
6028 	sc->sc_dev = dev;
6029 	sc->sc_attached = 1;
6030 	IWM_LOCK_INIT(sc);
6031 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6032 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6033 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6034 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6035 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
6036 
6037 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
6038 	    taskqueue_thread_enqueue, &sc->sc_tq);
6039 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
6040 	if (error != 0) {
6041 		device_printf(dev, "can't start taskq thread, error %d\n",
6042 		    error);
6043 		goto fail;
6044 	}
6045 
6046 	error = iwm_dev_check(dev);
6047 	if (error != 0)
6048 		goto fail;
6049 
6050 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6051 	if (sc->sc_notif_wait == NULL) {
6052 		device_printf(dev, "failed to init notification wait struct\n");
6053 		goto fail;
6054 	}
6055 
6056 	sc->sf_state = IWM_SF_UNINIT;
6057 
6058 	/* Init phy db */
6059 	sc->sc_phy_db = iwm_phy_db_init(sc);
6060 	if (!sc->sc_phy_db) {
6061 		device_printf(dev, "Cannot init phy_db\n");
6062 		goto fail;
6063 	}
6064 
6065 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6066 	sc->last_ebs_successful = TRUE;
6067 
6068 	/* PCI attach */
6069 	error = iwm_pci_attach(dev);
6070 	if (error != 0)
6071 		goto fail;
6072 
6073 	sc->sc_wantresp = -1;
6074 
6075 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6076 	/*
6077 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6078 	 * changed, and now the revision step also includes bit 0-1 (no more
6079 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6080 	 * in the old format.
6081 	 */
6082 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6083 		int ret;
6084 		uint32_t hw_step;
6085 
6086 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6087 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6088 
6089 		if (iwm_prepare_card_hw(sc) != 0) {
6090 			device_printf(dev, "could not initialize hardware\n");
6091 			goto fail;
6092 		}
6093 
6094 		/*
6095 		 * In order to recognize C step the driver should read the
6096 		 * chip version id located at the AUX bus MISC address.
6097 		 */
6098 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6099 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6100 		DELAY(2);
6101 
6102 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6103 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6104 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6105 				   25000);
6106 		if (!ret) {
6107 			device_printf(sc->sc_dev,
6108 			    "Failed to wake up the nic\n");
6109 			goto fail;
6110 		}
6111 
6112 		if (iwm_nic_lock(sc)) {
6113 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6114 			hw_step |= IWM_ENABLE_WFPM;
6115 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6116 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6117 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6118 			if (hw_step == 0x3)
6119 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6120 						(IWM_SILICON_C_STEP << 2);
6121 			iwm_nic_unlock(sc);
6122 		} else {
6123 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6124 			goto fail;
6125 		}
6126 	}
6127 
6128 	/* special-case 7265D, it has the same PCI IDs. */
6129 	if (sc->cfg == &iwm7265_cfg &&
6130 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6131 		sc->cfg = &iwm7265d_cfg;
6132 	}
6133 
6134 	/* Allocate DMA memory for firmware transfers. */
6135 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6136 		device_printf(dev, "could not allocate memory for firmware\n");
6137 		goto fail;
6138 	}
6139 
6140 	/* Allocate "Keep Warm" page. */
6141 	if ((error = iwm_alloc_kw(sc)) != 0) {
6142 		device_printf(dev, "could not allocate keep warm page\n");
6143 		goto fail;
6144 	}
6145 
6146 	/* We use ICT interrupts */
6147 	if ((error = iwm_alloc_ict(sc)) != 0) {
6148 		device_printf(dev, "could not allocate ICT table\n");
6149 		goto fail;
6150 	}
6151 
6152 	/* Allocate TX scheduler "rings". */
6153 	if ((error = iwm_alloc_sched(sc)) != 0) {
6154 		device_printf(dev, "could not allocate TX scheduler rings\n");
6155 		goto fail;
6156 	}
6157 
6158 	/* Allocate TX rings */
6159 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6160 		if ((error = iwm_alloc_tx_ring(sc,
6161 		    &sc->txq[txq_i], txq_i)) != 0) {
6162 			device_printf(dev,
6163 			    "could not allocate TX ring %d\n",
6164 			    txq_i);
6165 			goto fail;
6166 		}
6167 	}
6168 
6169 	/* Allocate RX ring. */
6170 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6171 		device_printf(dev, "could not allocate RX ring\n");
6172 		goto fail;
6173 	}
6174 
6175 	/* Clear pending interrupts. */
6176 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6177 
6178 	ic->ic_softc = sc;
6179 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6180 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6181 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6182 
6183 	/* Set device capabilities. */
6184 	ic->ic_caps =
6185 	    IEEE80211_C_STA |
6186 	    IEEE80211_C_WPA |		/* WPA/RSN */
6187 	    IEEE80211_C_WME |
6188 	    IEEE80211_C_PMGT |
6189 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6190 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6191 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6192 	    ;
6193 	/* Advertise full-offload scanning */
6194 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6195 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6196 		sc->sc_phyctxt[i].id = i;
6197 		sc->sc_phyctxt[i].color = 0;
6198 		sc->sc_phyctxt[i].ref = 0;
6199 		sc->sc_phyctxt[i].channel = NULL;
6200 	}
6201 
6202 	/* Default noise floor */
6203 	sc->sc_noise = -96;
6204 
6205 	/* Max RSSI */
6206 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6207 
6208 #ifdef IWM_DEBUG
6209 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6210 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6211 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6212 #endif
6213 
6214 	error = iwm_read_firmware(sc);
6215 	if (error) {
6216 		goto fail;
6217 	} else if (sc->sc_fw.fw_fp == NULL) {
6218 		/*
6219 		 * XXX Add a solution for properly deferring firmware load
6220 		 *     during bootup.
6221 		 */
6222 		goto fail;
6223 	} else {
6224 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6225 		sc->sc_preinit_hook.ich_arg = sc;
6226 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6227 			device_printf(dev,
6228 			    "config_intrhook_establish failed\n");
6229 			goto fail;
6230 		}
6231 	}
6232 
6233 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6234 	    "<-%s\n", __func__);
6235 
6236 	return 0;
6237 
6238 	/* Free allocated memory if something failed during attachment. */
6239 fail:
6240 	iwm_detach_local(sc, 0);
6241 
6242 	return ENXIO;
6243 }
6244 
6245 static int
6246 iwm_is_valid_ether_addr(uint8_t *addr)
6247 {
6248 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6249 
6250 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6251 		return (FALSE);
6252 
6253 	return (TRUE);
6254 }
6255 
6256 static int
6257 iwm_wme_update(struct ieee80211com *ic)
6258 {
6259 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6260 	struct iwm_softc *sc = ic->ic_softc;
6261 	struct chanAccParams chp;
6262 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6263 	struct iwm_vap *ivp = IWM_VAP(vap);
6264 	struct iwm_node *in;
6265 	struct wmeParams tmp[WME_NUM_AC];
6266 	int aci, error;
6267 
6268 	if (vap == NULL)
6269 		return (0);
6270 
6271 	ieee80211_wme_ic_getparams(ic, &chp);
6272 
6273 	IEEE80211_LOCK(ic);
6274 	for (aci = 0; aci < WME_NUM_AC; aci++)
6275 		tmp[aci] = chp.cap_wmeParams[aci];
6276 	IEEE80211_UNLOCK(ic);
6277 
6278 	IWM_LOCK(sc);
6279 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6280 		const struct wmeParams *ac = &tmp[aci];
6281 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6282 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6283 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6284 		ivp->queue_params[aci].edca_txop =
6285 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6286 	}
6287 	ivp->have_wme = TRUE;
6288 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6289 		in = IWM_NODE(vap->iv_bss);
6290 		if (in->in_assoc) {
6291 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6292 				device_printf(sc->sc_dev,
6293 				    "%s: failed to update MAC\n", __func__);
6294 			}
6295 		}
6296 	}
6297 	IWM_UNLOCK(sc);
6298 
6299 	return (0);
6300 #undef IWM_EXP2
6301 }
6302 
6303 static void
6304 iwm_preinit(void *arg)
6305 {
6306 	struct iwm_softc *sc = arg;
6307 	device_t dev = sc->sc_dev;
6308 	struct ieee80211com *ic = &sc->sc_ic;
6309 	int error;
6310 
6311 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6312 	    "->%s\n", __func__);
6313 
6314 	IWM_LOCK(sc);
6315 	if ((error = iwm_start_hw(sc)) != 0) {
6316 		device_printf(dev, "could not initialize hardware\n");
6317 		IWM_UNLOCK(sc);
6318 		goto fail;
6319 	}
6320 
6321 	error = iwm_run_init_ucode(sc, 1);
6322 	iwm_stop_device(sc);
6323 	if (error) {
6324 		IWM_UNLOCK(sc);
6325 		goto fail;
6326 	}
6327 	device_printf(dev,
6328 	    "hw rev 0x%x, fw ver %s, address %s\n",
6329 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6330 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6331 
6332 	/* not all hardware can do 5GHz band */
6333 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6334 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6335 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6336 	IWM_UNLOCK(sc);
6337 
6338 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6339 	    ic->ic_channels);
6340 
6341 	/*
6342 	 * At this point we've committed - if we fail to do setup,
6343 	 * we now also have to tear down the net80211 state.
6344 	 */
6345 	ieee80211_ifattach(ic);
6346 	ic->ic_vap_create = iwm_vap_create;
6347 	ic->ic_vap_delete = iwm_vap_delete;
6348 	ic->ic_raw_xmit = iwm_raw_xmit;
6349 	ic->ic_node_alloc = iwm_node_alloc;
6350 	ic->ic_scan_start = iwm_scan_start;
6351 	ic->ic_scan_end = iwm_scan_end;
6352 	ic->ic_update_mcast = iwm_update_mcast;
6353 	ic->ic_getradiocaps = iwm_init_channel_map;
6354 	ic->ic_set_channel = iwm_set_channel;
6355 	ic->ic_scan_curchan = iwm_scan_curchan;
6356 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6357 	ic->ic_wme.wme_update = iwm_wme_update;
6358 	ic->ic_parent = iwm_parent;
6359 	ic->ic_transmit = iwm_transmit;
6360 	iwm_radiotap_attach(sc);
6361 	if (bootverbose)
6362 		ieee80211_announce(ic);
6363 
6364 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6365 	    "<-%s\n", __func__);
6366 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6367 
6368 	return;
6369 fail:
6370 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6371 	iwm_detach_local(sc, 0);
6372 }
6373 
6374 /*
6375  * Attach the interface to 802.11 radiotap.
6376  */
6377 static void
6378 iwm_radiotap_attach(struct iwm_softc *sc)
6379 {
6380         struct ieee80211com *ic = &sc->sc_ic;
6381 
6382 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6383 	    "->%s begin\n", __func__);
6384         ieee80211_radiotap_attach(ic,
6385             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6386                 IWM_TX_RADIOTAP_PRESENT,
6387             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6388                 IWM_RX_RADIOTAP_PRESENT);
6389 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6390 	    "->%s end\n", __func__);
6391 }
6392 
6393 static struct ieee80211vap *
6394 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6395     enum ieee80211_opmode opmode, int flags,
6396     const uint8_t bssid[IEEE80211_ADDR_LEN],
6397     const uint8_t mac[IEEE80211_ADDR_LEN])
6398 {
6399 	struct iwm_vap *ivp;
6400 	struct ieee80211vap *vap;
6401 
6402 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6403 		return NULL;
6404 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6405 	vap = &ivp->iv_vap;
6406 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6407 	vap->iv_bmissthreshold = 10;            /* override default */
6408 	/* Override with driver methods. */
6409 	ivp->iv_newstate = vap->iv_newstate;
6410 	vap->iv_newstate = iwm_newstate;
6411 
6412 	ivp->id = IWM_DEFAULT_MACID;
6413 	ivp->color = IWM_DEFAULT_COLOR;
6414 
6415 	ivp->have_wme = FALSE;
6416 	ivp->ps_disabled = FALSE;
6417 
6418 	ieee80211_ratectl_init(vap);
6419 	/* Complete setup. */
6420 	ieee80211_vap_attach(vap, ieee80211_media_change,
6421 	    ieee80211_media_status, mac);
6422 	ic->ic_opmode = opmode;
6423 
6424 	return vap;
6425 }
6426 
6427 static void
6428 iwm_vap_delete(struct ieee80211vap *vap)
6429 {
6430 	struct iwm_vap *ivp = IWM_VAP(vap);
6431 
6432 	ieee80211_ratectl_deinit(vap);
6433 	ieee80211_vap_detach(vap);
6434 	free(ivp, M_80211_VAP);
6435 }
6436 
6437 static void
6438 iwm_xmit_queue_drain(struct iwm_softc *sc)
6439 {
6440 	struct mbuf *m;
6441 	struct ieee80211_node *ni;
6442 
6443 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6444 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6445 		ieee80211_free_node(ni);
6446 		m_freem(m);
6447 	}
6448 }
6449 
6450 static void
6451 iwm_scan_start(struct ieee80211com *ic)
6452 {
6453 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6454 	struct iwm_softc *sc = ic->ic_softc;
6455 	int error;
6456 
6457 	IWM_LOCK(sc);
6458 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6459 		/* This should not be possible */
6460 		device_printf(sc->sc_dev,
6461 		    "%s: Previous scan not completed yet\n", __func__);
6462 	}
6463 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6464 		error = iwm_umac_scan(sc);
6465 	else
6466 		error = iwm_lmac_scan(sc);
6467 	if (error != 0) {
6468 		device_printf(sc->sc_dev, "could not initiate scan\n");
6469 		IWM_UNLOCK(sc);
6470 		ieee80211_cancel_scan(vap);
6471 	} else {
6472 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6473 		iwm_led_blink_start(sc);
6474 		IWM_UNLOCK(sc);
6475 	}
6476 }
6477 
6478 static void
6479 iwm_scan_end(struct ieee80211com *ic)
6480 {
6481 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6482 	struct iwm_softc *sc = ic->ic_softc;
6483 
6484 	IWM_LOCK(sc);
6485 	iwm_led_blink_stop(sc);
6486 	if (vap->iv_state == IEEE80211_S_RUN)
6487 		iwm_led_enable(sc);
6488 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6489 		/*
6490 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6491 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6492 		 * taskqueue.
6493 		 */
6494 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6495 		iwm_scan_stop_wait(sc);
6496 	}
6497 	IWM_UNLOCK(sc);
6498 
6499 	/*
6500 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6501 	 * This is to make sure that it won't call ieee80211_scan_done
6502 	 * when we have already started the next scan.
6503 	 */
6504 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6505 }
6506 
6507 static void
6508 iwm_update_mcast(struct ieee80211com *ic)
6509 {
6510 }
6511 
6512 static void
6513 iwm_set_channel(struct ieee80211com *ic)
6514 {
6515 }
6516 
6517 static void
6518 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6519 {
6520 }
6521 
6522 static void
6523 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6524 {
6525 }
6526 
6527 void
6528 iwm_init_task(void *arg1)
6529 {
6530 	struct iwm_softc *sc = arg1;
6531 
6532 	IWM_LOCK(sc);
6533 	while (sc->sc_flags & IWM_FLAG_BUSY)
6534 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6535 	sc->sc_flags |= IWM_FLAG_BUSY;
6536 	iwm_stop(sc);
6537 	if (sc->sc_ic.ic_nrunning > 0)
6538 		iwm_init(sc);
6539 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6540 	wakeup(&sc->sc_flags);
6541 	IWM_UNLOCK(sc);
6542 }
6543 
6544 static int
6545 iwm_resume(device_t dev)
6546 {
6547 	struct iwm_softc *sc = device_get_softc(dev);
6548 	int do_reinit = 0;
6549 
6550 	/*
6551 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6552 	 * PCI Tx retries from interfering with C3 CPU state.
6553 	 */
6554 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6555 
6556 	if (!sc->sc_attached)
6557 		return 0;
6558 
6559 	iwm_init_task(device_get_softc(dev));
6560 
6561 	IWM_LOCK(sc);
6562 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6563 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6564 		do_reinit = 1;
6565 	}
6566 	IWM_UNLOCK(sc);
6567 
6568 	if (do_reinit)
6569 		ieee80211_resume_all(&sc->sc_ic);
6570 
6571 	return 0;
6572 }
6573 
6574 static int
6575 iwm_suspend(device_t dev)
6576 {
6577 	int do_stop = 0;
6578 	struct iwm_softc *sc = device_get_softc(dev);
6579 
6580 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6581 
6582 	if (!sc->sc_attached)
6583 		return (0);
6584 
6585 	ieee80211_suspend_all(&sc->sc_ic);
6586 
6587 	if (do_stop) {
6588 		IWM_LOCK(sc);
6589 		iwm_stop(sc);
6590 		sc->sc_flags |= IWM_FLAG_SCANNING;
6591 		IWM_UNLOCK(sc);
6592 	}
6593 
6594 	return (0);
6595 }
6596 
6597 static int
6598 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6599 {
6600 	struct iwm_fw_info *fw = &sc->sc_fw;
6601 	device_t dev = sc->sc_dev;
6602 	int i;
6603 
6604 	if (!sc->sc_attached)
6605 		return 0;
6606 	sc->sc_attached = 0;
6607 	if (do_net80211) {
6608 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6609 	}
6610 	iwm_stop_device(sc);
6611 	taskqueue_drain_all(sc->sc_tq);
6612 	taskqueue_free(sc->sc_tq);
6613 	if (do_net80211) {
6614 		IWM_LOCK(sc);
6615 		iwm_xmit_queue_drain(sc);
6616 		IWM_UNLOCK(sc);
6617 		ieee80211_ifdetach(&sc->sc_ic);
6618 	}
6619 	callout_drain(&sc->sc_led_blink_to);
6620 	callout_drain(&sc->sc_watchdog_to);
6621 
6622 	iwm_phy_db_free(sc->sc_phy_db);
6623 	sc->sc_phy_db = NULL;
6624 
6625 	iwm_free_nvm_data(sc->nvm_data);
6626 
6627 	/* Free descriptor rings */
6628 	iwm_free_rx_ring(sc, &sc->rxq);
6629 	for (i = 0; i < nitems(sc->txq); i++)
6630 		iwm_free_tx_ring(sc, &sc->txq[i]);
6631 
6632 	/* Free firmware */
6633 	if (fw->fw_fp != NULL)
6634 		iwm_fw_info_free(fw);
6635 
6636 	/* Free scheduler */
6637 	iwm_dma_contig_free(&sc->sched_dma);
6638 	iwm_dma_contig_free(&sc->ict_dma);
6639 	iwm_dma_contig_free(&sc->kw_dma);
6640 	iwm_dma_contig_free(&sc->fw_dma);
6641 
6642 	iwm_free_fw_paging(sc);
6643 
6644 	/* Finished with the hardware - detach things */
6645 	iwm_pci_detach(dev);
6646 
6647 	if (sc->sc_notif_wait != NULL) {
6648 		iwm_notification_wait_free(sc->sc_notif_wait);
6649 		sc->sc_notif_wait = NULL;
6650 	}
6651 
6652 	IWM_LOCK_DESTROY(sc);
6653 
6654 	return (0);
6655 }
6656 
6657 static int
6658 iwm_detach(device_t dev)
6659 {
6660 	struct iwm_softc *sc = device_get_softc(dev);
6661 
6662 	return (iwm_detach_local(sc, 1));
6663 }
6664 
6665 static device_method_t iwm_pci_methods[] = {
6666         /* Device interface */
6667         DEVMETHOD(device_probe,         iwm_probe),
6668         DEVMETHOD(device_attach,        iwm_attach),
6669         DEVMETHOD(device_detach,        iwm_detach),
6670         DEVMETHOD(device_suspend,       iwm_suspend),
6671         DEVMETHOD(device_resume,        iwm_resume),
6672 
6673         DEVMETHOD_END
6674 };
6675 
6676 static driver_t iwm_pci_driver = {
6677         "iwm",
6678         iwm_pci_methods,
6679         sizeof (struct iwm_softc)
6680 };
6681 
6682 static devclass_t iwm_devclass;
6683 
6684 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6685 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6686     iwm_devices, nitems(iwm_devices));
6687 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6688 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6689 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6690