xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358 static void	iwm_endscan_cb(void *, int);
359 static int	iwm_send_bt_init_conf(struct iwm_softc *);
360 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int	iwm_init_hw(struct iwm_softc *);
365 static void	iwm_init(struct iwm_softc *);
366 static void	iwm_start(struct iwm_softc *);
367 static void	iwm_stop(struct iwm_softc *);
368 static void	iwm_watchdog(void *);
369 static void	iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 		iwm_desc_lookup(uint32_t);
373 static void	iwm_nic_error(struct iwm_softc *);
374 static void	iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void	iwm_notif_intr(struct iwm_softc *);
378 static void	iwm_intr(void *);
379 static int	iwm_attach(device_t);
380 static int	iwm_is_valid_ether_addr(uint8_t *);
381 static void	iwm_preinit(void *);
382 static int	iwm_detach_local(struct iwm_softc *sc, int);
383 static void	iwm_init_task(void *);
384 static void	iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 		iwm_vap_create(struct ieee80211com *,
387 		               const char [IFNAMSIZ], int,
388 		               enum ieee80211_opmode, int,
389 		               const uint8_t [IEEE80211_ADDR_LEN],
390 		               const uint8_t [IEEE80211_ADDR_LEN]);
391 static void	iwm_vap_delete(struct ieee80211vap *);
392 static void	iwm_xmit_queue_drain(struct iwm_softc *);
393 static void	iwm_scan_start(struct ieee80211com *);
394 static void	iwm_scan_end(struct ieee80211com *);
395 static void	iwm_update_mcast(struct ieee80211com *);
396 static void	iwm_set_channel(struct ieee80211com *);
397 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
399 static int	iwm_detach(device_t);
400 
401 static int	iwm_lar_disable = 0;
402 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403 
404 /*
405  * Firmware parser.
406  */
407 
408 static int
409 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410 {
411 	const struct iwm_fw_cscheme_list *l = (const void *)data;
412 
413 	if (dlen < sizeof(*l) ||
414 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 		return EINVAL;
416 
417 	/* we don't actually store anything for now, always use s/w crypto */
418 
419 	return 0;
420 }
421 
422 static int
423 iwm_firmware_store_section(struct iwm_softc *sc,
424     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425 {
426 	struct iwm_fw_img *fws;
427 	struct iwm_fw_desc *fwone;
428 
429 	if (type >= IWM_UCODE_TYPE_MAX)
430 		return EINVAL;
431 	if (dlen < sizeof(uint32_t))
432 		return EINVAL;
433 
434 	fws = &sc->sc_fw.img[type];
435 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
436 		return EINVAL;
437 
438 	fwone = &fws->sec[fws->fw_count];
439 
440 	/* first 32bit are device load offset */
441 	memcpy(&fwone->offset, data, sizeof(uint32_t));
442 
443 	/* rest is data */
444 	fwone->data = data + sizeof(uint32_t);
445 	fwone->len = dlen - sizeof(uint32_t);
446 
447 	fws->fw_count++;
448 
449 	return 0;
450 }
451 
452 #define IWM_DEFAULT_SCAN_CHANNELS 40
453 
454 /* iwlwifi: iwl-drv.c */
455 struct iwm_tlv_calib_data {
456 	uint32_t ucode_type;
457 	struct iwm_tlv_calib_ctrl calib;
458 } __packed;
459 
460 static int
461 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462 {
463 	const struct iwm_tlv_calib_data *def_calib = data;
464 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
465 
466 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
467 		device_printf(sc->sc_dev,
468 		    "Wrong ucode_type %u for default "
469 		    "calibration.\n", ucode_type);
470 		return EINVAL;
471 	}
472 
473 	sc->sc_default_calib[ucode_type].flow_trigger =
474 	    def_calib->calib.flow_trigger;
475 	sc->sc_default_calib[ucode_type].event_trigger =
476 	    def_calib->calib.event_trigger;
477 
478 	return 0;
479 }
480 
481 static int
482 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 			struct iwm_ucode_capabilities *capa)
484 {
485 	const struct iwm_ucode_api *ucode_api = (const void *)data;
486 	uint32_t api_index = le32toh(ucode_api->api_index);
487 	uint32_t api_flags = le32toh(ucode_api->api_flags);
488 	int i;
489 
490 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 		device_printf(sc->sc_dev,
492 		    "api flags index %d larger than supported by driver\n",
493 		    api_index);
494 		/* don't return an error so we can load FW that has more bits */
495 		return 0;
496 	}
497 
498 	for (i = 0; i < 32; i++) {
499 		if (api_flags & (1U << i))
500 			setbit(capa->enabled_api, i + 32 * api_index);
501 	}
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 			   struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_capa->api_index);
512 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "capa flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_capa, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static void
532 iwm_fw_info_free(struct iwm_fw_info *fw)
533 {
534 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 	fw->fw_fp = NULL;
536 	memset(fw->img, 0, sizeof(fw->img));
537 }
538 
539 static int
540 iwm_read_firmware(struct iwm_softc *sc)
541 {
542 	struct iwm_fw_info *fw = &sc->sc_fw;
543 	const struct iwm_tlv_ucode_header *uhdr;
544 	const struct iwm_ucode_tlv *tlv;
545 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 	enum iwm_ucode_tlv_type tlv_type;
547 	const struct firmware *fwp;
548 	const uint8_t *data;
549 	uint32_t tlv_len;
550 	uint32_t usniffer_img;
551 	const uint8_t *tlv_data;
552 	uint32_t paging_mem_size;
553 	int num_of_cpus;
554 	int error = 0;
555 	size_t len;
556 
557 	/*
558 	 * Load firmware into driver memory.
559 	 * fw_fp will be set.
560 	 */
561 	fwp = firmware_get(sc->cfg->fw_name);
562 	if (fwp == NULL) {
563 		device_printf(sc->sc_dev,
564 		    "could not read firmware %s (error %d)\n",
565 		    sc->cfg->fw_name, error);
566 		goto out;
567 	}
568 	fw->fw_fp = fwp;
569 
570 	/* (Re-)Initialize default values. */
571 	capa->flags = 0;
572 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
573 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
574 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577 
578 	/*
579 	 * Parse firmware contents
580 	 */
581 
582 	uhdr = (const void *)fw->fw_fp->data;
583 	if (*(const uint32_t *)fw->fw_fp->data != 0
584 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 		device_printf(sc->sc_dev, "invalid firmware %s\n",
586 		    sc->cfg->fw_name);
587 		error = EINVAL;
588 		goto out;
589 	}
590 
591 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_API(le32toh(uhdr->ver)));
595 	data = uhdr->data;
596 	len = fw->fw_fp->datasize - sizeof(*uhdr);
597 
598 	while (len >= sizeof(*tlv)) {
599 		len -= sizeof(*tlv);
600 		tlv = (const void *)data;
601 
602 		tlv_len = le32toh(tlv->length);
603 		tlv_type = le32toh(tlv->type);
604 		tlv_data = tlv->data;
605 
606 		if (len < tlv_len) {
607 			device_printf(sc->sc_dev,
608 			    "firmware too short: %zu bytes\n",
609 			    len);
610 			error = EINVAL;
611 			goto parse_out;
612 		}
613 		len -= roundup2(tlv_len, 4);
614 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
615 
616 		switch ((int)tlv_type) {
617 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
618 			if (tlv_len != sizeof(uint32_t)) {
619 				device_printf(sc->sc_dev,
620 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 				    __func__, tlv_len);
622 				error = EINVAL;
623 				goto parse_out;
624 			}
625 			capa->max_probe_length =
626 			    le32_to_cpup((const uint32_t *)tlv_data);
627 			/* limit it to something sensible */
628 			if (capa->max_probe_length >
629 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
630 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 				    "ridiculous\n", __func__);
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			break;
637 		case IWM_UCODE_TLV_PAN:
638 			if (tlv_len) {
639 				device_printf(sc->sc_dev,
640 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 				    __func__, tlv_len);
642 				error = EINVAL;
643 				goto parse_out;
644 			}
645 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
646 			break;
647 		case IWM_UCODE_TLV_FLAGS:
648 			if (tlv_len < sizeof(uint32_t)) {
649 				device_printf(sc->sc_dev,
650 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 				    __func__, tlv_len);
652 				error = EINVAL;
653 				goto parse_out;
654 			}
655 			if (tlv_len % sizeof(uint32_t)) {
656 				device_printf(sc->sc_dev,
657 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 				    __func__, tlv_len);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			/*
663 			 * Apparently there can be many flags, but Linux driver
664 			 * parses only the first one, and so do we.
665 			 *
666 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 			 * Intentional or a bug?  Observations from
668 			 * current firmware file:
669 			 *  1) TLV_PAN is parsed first
670 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
671 			 * ==> this resets TLV_PAN to itself... hnnnk
672 			 */
673 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 			break;
675 		case IWM_UCODE_TLV_CSCHEME:
676 			if ((error = iwm_store_cscheme(sc,
677 			    tlv_data, tlv_len)) != 0) {
678 				device_printf(sc->sc_dev,
679 				    "%s: iwm_store_cscheme(): returned %d\n",
680 				    __func__, error);
681 				goto parse_out;
682 			}
683 			break;
684 		case IWM_UCODE_TLV_NUM_OF_CPU:
685 			if (tlv_len != sizeof(uint32_t)) {
686 				device_printf(sc->sc_dev,
687 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 				    __func__, tlv_len);
689 				error = EINVAL;
690 				goto parse_out;
691 			}
692 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 			if (num_of_cpus == 2) {
694 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
695 					TRUE;
696 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
697 					TRUE;
698 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
699 					TRUE;
700 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 				device_printf(sc->sc_dev,
702 				    "%s: Driver supports only 1 or 2 CPUs\n",
703 				    __func__);
704 				error = EINVAL;
705 				goto parse_out;
706 			}
707 			break;
708 		case IWM_UCODE_TLV_SEC_RT:
709 			if ((error = iwm_firmware_store_section(sc,
710 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 				    __func__, error);
714 				goto parse_out;
715 			}
716 			break;
717 		case IWM_UCODE_TLV_SEC_INIT:
718 			if ((error = iwm_firmware_store_section(sc,
719 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 				device_printf(sc->sc_dev,
721 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 				    __func__, error);
723 				goto parse_out;
724 			}
725 			break;
726 		case IWM_UCODE_TLV_SEC_WOWLAN:
727 			if ((error = iwm_firmware_store_section(sc,
728 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 				device_printf(sc->sc_dev,
730 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 				    __func__, error);
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_DEF_CALIB:
736 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 				    __func__, tlv_len,
740 				    sizeof(struct iwm_tlv_calib_data));
741 				error = EINVAL;
742 				goto parse_out;
743 			}
744 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: iwm_set_default_calib() failed: %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_PHY_SKU:
752 			if (tlv_len != sizeof(uint32_t)) {
753 				error = EINVAL;
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 				    __func__, tlv_len);
757 				goto parse_out;
758 			}
759 			sc->sc_fw.phy_config =
760 			    le32_to_cpup((const uint32_t *)tlv_data);
761 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
762 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
763 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
764 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
765 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
766 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
767 			break;
768 
769 		case IWM_UCODE_TLV_API_CHANGES_SET: {
770 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 				error = EINVAL;
772 				goto parse_out;
773 			}
774 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 				error = EINVAL;
776 				goto parse_out;
777 			}
778 			break;
779 		}
780 
781 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
782 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 				error = EINVAL;
784 				goto parse_out;
785 			}
786 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 				error = EINVAL;
788 				goto parse_out;
789 			}
790 			break;
791 		}
792 
793 		case IWM_UCODE_TLV_CMD_VERSIONS:
794 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
795 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
796 			/* ignore, not used by current driver */
797 			break;
798 
799 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
800 			if ((error = iwm_firmware_store_section(sc,
801 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
802 			    tlv_len)) != 0)
803 				goto parse_out;
804 			break;
805 
806 		case IWM_UCODE_TLV_PAGING:
807 			if (tlv_len != sizeof(uint32_t)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812 
813 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 			    "%s: Paging: paging enabled (size = %u bytes)\n",
815 			    __func__, paging_mem_size);
816 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 				device_printf(sc->sc_dev,
818 					"%s: Paging: driver supports up to %u bytes for paging image\n",
819 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 				error = EINVAL;
821 				goto out;
822 			}
823 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 				device_printf(sc->sc_dev,
825 				    "%s: Paging: image isn't multiple %u\n",
826 				    __func__, IWM_FW_PAGING_SIZE);
827 				error = EINVAL;
828 				goto out;
829 			}
830 
831 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
832 			    paging_mem_size;
833 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 			sc->sc_fw.img[usniffer_img].paging_mem_size =
835 			    paging_mem_size;
836 			break;
837 
838 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
839 			if (tlv_len != sizeof(uint32_t)) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			capa->n_scan_channels =
844 			    le32_to_cpup((const uint32_t *)tlv_data);
845 			break;
846 
847 		case IWM_UCODE_TLV_FW_VERSION:
848 			if (tlv_len != sizeof(uint32_t) * 3) {
849 				error = EINVAL;
850 				goto parse_out;
851 			}
852 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 			    "%u.%u.%u",
854 			    le32toh(((const uint32_t *)tlv_data)[0]),
855 			    le32toh(((const uint32_t *)tlv_data)[1]),
856 			    le32toh(((const uint32_t *)tlv_data)[2]));
857 			break;
858 
859 		case IWM_UCODE_TLV_FW_MEM_SEG:
860 			break;
861 
862 		default:
863 			device_printf(sc->sc_dev,
864 			    "%s: unknown firmware section %d, abort\n",
865 			    __func__, tlv_type);
866 			error = EINVAL;
867 			goto parse_out;
868 		}
869 	}
870 
871 	KASSERT(error == 0, ("unhandled error"));
872 
873  parse_out:
874 	if (error) {
875 		device_printf(sc->sc_dev, "firmware parse error %d, "
876 		    "section type %d\n", error, tlv_type);
877 	}
878 
879  out:
880 	if (error) {
881 		if (fw->fw_fp != NULL)
882 			iwm_fw_info_free(fw);
883 	}
884 
885 	return error;
886 }
887 
888 /*
889  * DMA resource routines
890  */
891 
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896 	/* Must be aligned on a 16-byte boundary. */
897 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
899 }
900 
901 /* tx scheduler rings.  not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905 	/* TX scheduler rings must be aligned on a 1KB boundary. */
906 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909 
910 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916 
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924 
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928 	bus_size_t size;
929 	size_t descsz;
930 	int count, i, error;
931 
932 	ring->cur = 0;
933 	if (sc->cfg->mqrx_supported) {
934 		count = IWM_RX_MQ_RING_COUNT;
935 		descsz = sizeof(uint64_t);
936 	} else {
937 		count = IWM_RX_LEGACY_RING_COUNT;
938 		descsz = sizeof(uint32_t);
939 	}
940 
941 	/* Allocate RX descriptors (256-byte aligned). */
942 	size = count * descsz;
943 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 	    256);
945 	if (error != 0) {
946 		device_printf(sc->sc_dev,
947 		    "could not allocate RX ring DMA memory\n");
948 		goto fail;
949 	}
950 	ring->desc = ring->free_desc_dma.vaddr;
951 
952 	/* Allocate RX status area (16-byte aligned). */
953 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 	    sizeof(*ring->stat), 16);
955 	if (error != 0) {
956 		device_printf(sc->sc_dev,
957 		    "could not allocate RX status DMA memory\n");
958 		goto fail;
959 	}
960 	ring->stat = ring->stat_dma.vaddr;
961 
962 	if (sc->cfg->mqrx_supported) {
963 		size = count * sizeof(uint32_t);
964 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 		    size, 256);
966 		if (error != 0) {
967 			device_printf(sc->sc_dev,
968 			    "could not allocate RX ring DMA memory\n");
969 			goto fail;
970 		}
971 	}
972 
973         /* Create RX buffer DMA tag. */
974         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977         if (error != 0) {
978                 device_printf(sc->sc_dev,
979                     "%s: could not create RX buf DMA tag, error %d\n",
980                     __func__, error);
981                 goto fail;
982         }
983 
984 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 	if (error != 0) {
987 		device_printf(sc->sc_dev,
988 		    "%s: could not create RX buf DMA map, error %d\n",
989 		    __func__, error);
990 		goto fail;
991 	}
992 
993 	/*
994 	 * Allocate and map RX buffers.
995 	 */
996 	for (i = 0; i < count; i++) {
997 		struct iwm_rx_data *data = &ring->data[i];
998 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 		if (error != 0) {
1000 			device_printf(sc->sc_dev,
1001 			    "%s: could not create RX buf DMA map, error %d\n",
1002 			    __func__, error);
1003 			goto fail;
1004 		}
1005 		data->m = NULL;
1006 
1007 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 			goto fail;
1009 		}
1010 	}
1011 	return 0;
1012 
1013 fail:	iwm_free_rx_ring(sc, ring);
1014 	return error;
1015 }
1016 
1017 static void
1018 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019 {
1020 	/* Reset the ring state */
1021 	ring->cur = 0;
1022 
1023 	/*
1024 	 * The hw rx ring index in shared memory must also be cleared,
1025 	 * otherwise the discrepancy can cause reprocessing chaos.
1026 	 */
1027 	if (sc->rxq.stat)
1028 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029 }
1030 
1031 static void
1032 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033 {
1034 	int count, i;
1035 
1036 	iwm_dma_contig_free(&ring->free_desc_dma);
1037 	iwm_dma_contig_free(&ring->stat_dma);
1038 	iwm_dma_contig_free(&ring->used_desc_dma);
1039 
1040 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1041 	    IWM_RX_LEGACY_RING_COUNT;
1042 
1043 	for (i = 0; i < count; i++) {
1044 		struct iwm_rx_data *data = &ring->data[i];
1045 
1046 		if (data->m != NULL) {
1047 			bus_dmamap_sync(ring->data_dmat, data->map,
1048 			    BUS_DMASYNC_POSTREAD);
1049 			bus_dmamap_unload(ring->data_dmat, data->map);
1050 			m_freem(data->m);
1051 			data->m = NULL;
1052 		}
1053 		if (data->map != NULL) {
1054 			bus_dmamap_destroy(ring->data_dmat, data->map);
1055 			data->map = NULL;
1056 		}
1057 	}
1058 	if (ring->spare_map != NULL) {
1059 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 		ring->spare_map = NULL;
1061 	}
1062 	if (ring->data_dmat != NULL) {
1063 		bus_dma_tag_destroy(ring->data_dmat);
1064 		ring->data_dmat = NULL;
1065 	}
1066 }
1067 
1068 static int
1069 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070 {
1071 	bus_addr_t paddr;
1072 	bus_size_t size;
1073 	size_t maxsize;
1074 	int nsegments;
1075 	int i, error;
1076 
1077 	ring->qid = qid;
1078 	ring->queued = 0;
1079 	ring->cur = 0;
1080 
1081 	/* Allocate TX descriptors (256-byte aligned). */
1082 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 	if (error != 0) {
1085 		device_printf(sc->sc_dev,
1086 		    "could not allocate TX ring DMA memory\n");
1087 		goto fail;
1088 	}
1089 	ring->desc = ring->desc_dma.vaddr;
1090 
1091 	/*
1092 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 	 * to allocate commands space for other rings.
1094 	 */
1095 	if (qid > IWM_CMD_QUEUE)
1096 		return 0;
1097 
1098 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 	if (error != 0) {
1101 		device_printf(sc->sc_dev,
1102 		    "could not allocate TX cmd DMA memory\n");
1103 		goto fail;
1104 	}
1105 	ring->cmd = ring->cmd_dma.vaddr;
1106 
1107 	/* FW commands may require more mapped space than packets. */
1108 	if (qid == IWM_CMD_QUEUE) {
1109 		maxsize = IWM_RBUF_SIZE;
1110 		nsegments = 1;
1111 	} else {
1112 		maxsize = MCLBYTES;
1113 		nsegments = IWM_MAX_SCATTER - 2;
1114 	}
1115 
1116 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 	if (error != 0) {
1120 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 		goto fail;
1122 	}
1123 
1124 	paddr = ring->cmd_dma.paddr;
1125 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 		struct iwm_tx_data *data = &ring->data[i];
1127 
1128 		data->cmd_paddr = paddr;
1129 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 		    + offsetof(struct iwm_tx_cmd, scratch);
1131 		paddr += sizeof(struct iwm_device_cmd);
1132 
1133 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 		if (error != 0) {
1135 			device_printf(sc->sc_dev,
1136 			    "could not create TX buf DMA map\n");
1137 			goto fail;
1138 		}
1139 	}
1140 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 	    ("invalid physical address"));
1142 	return 0;
1143 
1144 fail:	iwm_free_tx_ring(sc, ring);
1145 	return error;
1146 }
1147 
1148 static void
1149 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150 {
1151 	int i;
1152 
1153 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 		struct iwm_tx_data *data = &ring->data[i];
1155 
1156 		if (data->m != NULL) {
1157 			bus_dmamap_sync(ring->data_dmat, data->map,
1158 			    BUS_DMASYNC_POSTWRITE);
1159 			bus_dmamap_unload(ring->data_dmat, data->map);
1160 			m_freem(data->m);
1161 			data->m = NULL;
1162 		}
1163 	}
1164 	/* Clear TX descriptors. */
1165 	memset(ring->desc, 0, ring->desc_dma.size);
1166 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 	    BUS_DMASYNC_PREWRITE);
1168 	sc->qfullmsk &= ~(1 << ring->qid);
1169 	ring->queued = 0;
1170 	ring->cur = 0;
1171 
1172 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1173 		iwm_pcie_clear_cmd_in_flight(sc);
1174 }
1175 
1176 static void
1177 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179 	int i;
1180 
1181 	iwm_dma_contig_free(&ring->desc_dma);
1182 	iwm_dma_contig_free(&ring->cmd_dma);
1183 
1184 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 		struct iwm_tx_data *data = &ring->data[i];
1186 
1187 		if (data->m != NULL) {
1188 			bus_dmamap_sync(ring->data_dmat, data->map,
1189 			    BUS_DMASYNC_POSTWRITE);
1190 			bus_dmamap_unload(ring->data_dmat, data->map);
1191 			m_freem(data->m);
1192 			data->m = NULL;
1193 		}
1194 		if (data->map != NULL) {
1195 			bus_dmamap_destroy(ring->data_dmat, data->map);
1196 			data->map = NULL;
1197 		}
1198 	}
1199 	if (ring->data_dmat != NULL) {
1200 		bus_dma_tag_destroy(ring->data_dmat);
1201 		ring->data_dmat = NULL;
1202 	}
1203 }
1204 
1205 /*
1206  * High-level hardware frobbing routines
1207  */
1208 
1209 static void
1210 iwm_enable_interrupts(struct iwm_softc *sc)
1211 {
1212 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1213 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1214 }
1215 
1216 static void
1217 iwm_restore_interrupts(struct iwm_softc *sc)
1218 {
1219 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221 
1222 static void
1223 iwm_disable_interrupts(struct iwm_softc *sc)
1224 {
1225 	/* disable interrupts */
1226 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1227 
1228 	/* acknowledge all interrupts */
1229 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1230 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1231 }
1232 
1233 static void
1234 iwm_ict_reset(struct iwm_softc *sc)
1235 {
1236 	iwm_disable_interrupts(sc);
1237 
1238 	/* Reset ICT table. */
1239 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 	sc->ict_cur = 0;
1241 
1242 	/* Set physical address of ICT table (4KB aligned). */
1243 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1244 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1245 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1247 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1248 
1249 	/* Switch to ICT interrupt mode in driver. */
1250 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1251 
1252 	/* Re-enable interrupts. */
1253 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1254 	iwm_enable_interrupts(sc);
1255 }
1256 
1257 /* iwlwifi pcie/trans.c */
1258 
1259 /*
1260  * Since this .. hard-resets things, it's time to actually
1261  * mark the first vap (if any) as having no mac context.
1262  * It's annoying, but since the driver is potentially being
1263  * stop/start'ed whilst active (thanks openbsd port!) we
1264  * have to correctly track this.
1265  */
1266 static void
1267 iwm_stop_device(struct iwm_softc *sc)
1268 {
1269 	struct ieee80211com *ic = &sc->sc_ic;
1270 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 	int chnl, qid;
1272 	uint32_t mask = 0;
1273 
1274 	/* tell the device to stop sending interrupts */
1275 	iwm_disable_interrupts(sc);
1276 
1277 	/*
1278 	 * FreeBSD-local: mark the first vap as not-uploaded,
1279 	 * so the next transition through auth/assoc
1280 	 * will correctly populate the MAC context.
1281 	 */
1282 	if (vap) {
1283 		struct iwm_vap *iv = IWM_VAP(vap);
1284 		iv->phy_ctxt = NULL;
1285 		iv->is_uploaded = 0;
1286 	}
1287 	sc->sc_firmware_state = 0;
1288 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289 
1290 	/* device going down, Stop using ICT table */
1291 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292 
1293 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1294 
1295 	if (iwm_nic_lock(sc)) {
1296 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1297 
1298 		/* Stop each Tx DMA channel */
1299 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 			IWM_WRITE(sc,
1301 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1302 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1303 		}
1304 
1305 		/* Wait for DMA channels to be idle */
1306 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 		    5000)) {
1308 			device_printf(sc->sc_dev,
1309 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1310 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1311 		}
1312 		iwm_nic_unlock(sc);
1313 	}
1314 	iwm_pcie_rx_stop(sc);
1315 
1316 	/* Stop RX ring. */
1317 	iwm_reset_rx_ring(sc, &sc->rxq);
1318 
1319 	/* Reset all TX rings. */
1320 	for (qid = 0; qid < nitems(sc->txq); qid++)
1321 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322 
1323 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1324 		/* Power-down device's busmaster DMA clocks */
1325 		if (iwm_nic_lock(sc)) {
1326 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1327 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1328 			iwm_nic_unlock(sc);
1329 		}
1330 		DELAY(5);
1331 	}
1332 
1333 	/* Make sure (redundant) we've released our request to stay awake */
1334 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336 
1337 	/* Stop the device, and put it in low power state */
1338 	iwm_apm_stop(sc);
1339 
1340 	/* stop and reset the on-board processor */
1341 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1342 	DELAY(5000);
1343 
1344 	/*
1345 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 	 */
1347 	iwm_disable_interrupts(sc);
1348 
1349 	/*
1350 	 * Even if we stop the HW, we still want the RF kill
1351 	 * interrupt
1352 	 */
1353 	iwm_enable_rfkill_int(sc);
1354 	iwm_check_rfkill(sc);
1355 
1356 	iwm_prepare_card_hw(sc);
1357 }
1358 
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_nic_config(struct iwm_softc *sc)
1362 {
1363 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 	uint32_t reg_val = 0;
1365 	uint32_t phy_config = iwm_get_phy_config(sc);
1366 
1367 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1368 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1369 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1370 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1371 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1372 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1373 
1374 	/* SKU control */
1375 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1377 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1378 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1379 
1380 	/* radio configuration */
1381 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384 
1385 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1386 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1393 	    reg_val);
1394 
1395 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 	    radio_cfg_step, radio_cfg_dash);
1398 
1399 	/*
1400 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 	 * to lose ownership and not being able to obtain it back.
1403 	 */
1404 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1405 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1406 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1407 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1408 	}
1409 }
1410 
1411 static int
1412 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1413 {
1414 	int enabled;
1415 
1416 	if (!iwm_nic_lock(sc))
1417 		return EBUSY;
1418 
1419 	/* Stop RX DMA. */
1420 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1421 	/* Disable RX used and free queue operation. */
1422 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1423 
1424 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1425 	    sc->rxq.free_desc_dma.paddr);
1426 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1427 	    sc->rxq.used_desc_dma.paddr);
1428 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1429 	    sc->rxq.stat_dma.paddr);
1430 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1433 
1434 	/* We configure only queue 0 for now. */
1435 	enabled = ((1 << 0) << 16) | (1 << 0);
1436 
1437 	/* Enable RX DMA, 4KB buffer size. */
1438 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1439 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1440 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1441 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1442 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1443 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1444 
1445 	/* Enable RX DMA snooping. */
1446 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1447 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1448 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1449 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1450 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1451 
1452 	/* Enable the configured queue(s). */
1453 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1458 
1459 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1466 {
1467 
1468 	/* Stop Rx DMA */
1469 	iwm_pcie_rx_stop(sc);
1470 
1471 	if (!iwm_nic_lock(sc))
1472 		return EBUSY;
1473 
1474 	/* reset and flush pointers */
1475 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1477 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1479 
1480 	/* Set physical address of RX ring (256-byte aligned). */
1481 	IWM_WRITE(sc,
1482 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1483 	    sc->rxq.free_desc_dma.paddr >> 8);
1484 
1485 	/* Set physical address of RX status (16-byte aligned). */
1486 	IWM_WRITE(sc,
1487 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1488 
1489 	/* Enable Rx DMA
1490 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 	 *      the credit mechanism in 5000 HW RX FIFO
1493 	 * Direct rx interrupts to hosts
1494 	 * Rx buffer size 4 or 8k or 12k
1495 	 * RB timeout 0x10
1496 	 * 256 RBDs
1497 	 */
1498 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1500 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1501 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1502 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1503 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505 
1506 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507 
1508 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 	if (sc->cfg->host_interrupt_operation_mode)
1510 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1511 
1512 	iwm_nic_unlock(sc);
1513 
1514 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1515 
1516 	return 0;
1517 }
1518 
1519 static int
1520 iwm_nic_rx_init(struct iwm_softc *sc)
1521 {
1522 	if (sc->cfg->mqrx_supported)
1523 		return iwm_nic_rx_mq_init(sc);
1524 	else
1525 		return iwm_nic_rx_legacy_init(sc);
1526 }
1527 
1528 static int
1529 iwm_nic_tx_init(struct iwm_softc *sc)
1530 {
1531 	int qid;
1532 
1533 	if (!iwm_nic_lock(sc))
1534 		return EBUSY;
1535 
1536 	/* Deactivate TX scheduler. */
1537 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1538 
1539 	/* Set physical address of "keep warm" page (16-byte aligned). */
1540 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1541 
1542 	/* Initialize TX rings. */
1543 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 		struct iwm_tx_ring *txq = &sc->txq[qid];
1545 
1546 		/* Set physical address of TX ring (256-byte aligned). */
1547 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1548 		    txq->desc_dma.paddr >> 8);
1549 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1551 		    __func__,
1552 		    qid, txq->desc,
1553 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1554 	}
1555 
1556 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1557 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1558 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1559 
1560 	iwm_nic_unlock(sc);
1561 
1562 	return 0;
1563 }
1564 
1565 static int
1566 iwm_nic_init(struct iwm_softc *sc)
1567 {
1568 	int error;
1569 
1570 	iwm_apm_init(sc);
1571 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1572 		iwm_set_pwr(sc);
1573 
1574 	iwm_nic_config(sc);
1575 
1576 	if ((error = iwm_nic_rx_init(sc)) != 0)
1577 		return error;
1578 
1579 	/*
1580 	 * Ditto for TX, from iwn
1581 	 */
1582 	if ((error = iwm_nic_tx_init(sc)) != 0)
1583 		return error;
1584 
1585 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 	    "%s: shadow registers enabled\n", __func__);
1587 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1588 
1589 	return 0;
1590 }
1591 
1592 int
1593 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594 {
1595 	int qmsk;
1596 
1597 	qmsk = 1 << qid;
1598 
1599 	if (!iwm_nic_lock(sc)) {
1600 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 		    __func__, qid);
1602 		return EBUSY;
1603 	}
1604 
1605 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606 
1607 	if (qid == IWM_CMD_QUEUE) {
1608 		/* Disable the scheduler. */
1609 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1610 
1611 		/* Stop the TX queue prior to configuration. */
1612 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1613 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1614 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1615 
1616 		iwm_nic_unlock(sc);
1617 
1618 		/* Disable aggregations for this queue. */
1619 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1620 
1621 		if (!iwm_nic_lock(sc)) {
1622 			device_printf(sc->sc_dev,
1623 			    "%s: cannot enable txq %d\n", __func__, qid);
1624 			return EBUSY;
1625 		}
1626 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1627 		iwm_nic_unlock(sc);
1628 
1629 		iwm_write_mem32(sc,
1630 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 		/* Set scheduler window size and frame limit. */
1632 		iwm_write_mem32(sc,
1633 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634 		    sizeof(uint32_t),
1635 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639 
1640 		if (!iwm_nic_lock(sc)) {
1641 			device_printf(sc->sc_dev,
1642 			    "%s: cannot enable txq %d\n", __func__, qid);
1643 			return EBUSY;
1644 		}
1645 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1646 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1647 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1648 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1649 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1650 
1651 		/* Enable the scheduler for this queue. */
1652 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1653 	} else {
1654 		struct iwm_scd_txq_cfg_cmd cmd;
1655 		int error;
1656 
1657 		iwm_nic_unlock(sc);
1658 
1659 		memset(&cmd, 0, sizeof(cmd));
1660 		cmd.scd_queue = qid;
1661 		cmd.enable = 1;
1662 		cmd.sta_id = sta_id;
1663 		cmd.tx_fifo = fifo;
1664 		cmd.aggregate = 0;
1665 		cmd.window = IWM_FRAME_LIMIT;
1666 
1667 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1668 		    sizeof(cmd), &cmd);
1669 		if (error) {
1670 			device_printf(sc->sc_dev,
1671 			    "cannot enable txq %d\n", qid);
1672 			return error;
1673 		}
1674 
1675 		if (!iwm_nic_lock(sc))
1676 			return EBUSY;
1677 	}
1678 
1679 	iwm_nic_unlock(sc);
1680 
1681 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 	    __func__, qid, fifo);
1683 
1684 	return 0;
1685 }
1686 
1687 static int
1688 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689 {
1690 	int error, chnl;
1691 
1692 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694 
1695 	if (!iwm_nic_lock(sc))
1696 		return EBUSY;
1697 
1698 	iwm_ict_reset(sc);
1699 
1700 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1701 	if (scd_base_addr != 0 &&
1702 	    scd_base_addr != sc->scd_base_addr) {
1703 		device_printf(sc->sc_dev,
1704 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 		    __func__, sc->scd_base_addr, scd_base_addr);
1706 	}
1707 
1708 	iwm_nic_unlock(sc);
1709 
1710 	/* reset context data, TX status and translation data */
1711 	error = iwm_write_mem(sc,
1712 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1713 	    NULL, clear_dwords);
1714 	if (error)
1715 		return EBUSY;
1716 
1717 	if (!iwm_nic_lock(sc))
1718 		return EBUSY;
1719 
1720 	/* Set physical address of TX scheduler rings (1KB aligned). */
1721 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1722 
1723 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1724 
1725 	iwm_nic_unlock(sc);
1726 
1727 	/* enable command channel */
1728 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 	if (error)
1730 		return error;
1731 
1732 	if (!iwm_nic_lock(sc))
1733 		return EBUSY;
1734 
1735 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736 
1737 	/* Enable DMA channels. */
1738 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1739 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1740 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1742 	}
1743 
1744 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1745 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1746 
1747 	iwm_nic_unlock(sc);
1748 
1749 	/* Enable L1-Active */
1750 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1751 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1752 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1753 	}
1754 
1755 	return error;
1756 }
1757 
1758 /*
1759  * NVM read access and content parsing.  We do not support
1760  * external NVM or writing NVM.
1761  * iwlwifi/mvm/nvm.c
1762  */
1763 
1764 /* Default NVM size to read */
1765 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1766 
1767 #define IWM_NVM_WRITE_OPCODE 1
1768 #define IWM_NVM_READ_OPCODE 0
1769 
1770 /* load nvm chunk response */
1771 enum {
1772 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1773 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1774 };
1775 
1776 static int
1777 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779 {
1780 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 		.offset = htole16(offset),
1782 		.length = htole16(length),
1783 		.type = htole16(section),
1784 		.op_code = IWM_NVM_READ_OPCODE,
1785 	};
1786 	struct iwm_nvm_access_resp *nvm_resp;
1787 	struct iwm_rx_packet *pkt;
1788 	struct iwm_host_cmd cmd = {
1789 		.id = IWM_NVM_ACCESS_CMD,
1790 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1791 		.data = { &nvm_access_cmd, },
1792 	};
1793 	int ret, bytes_read, offset_read;
1794 	uint8_t *resp_data;
1795 
1796 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797 
1798 	ret = iwm_send_cmd(sc, &cmd);
1799 	if (ret) {
1800 		device_printf(sc->sc_dev,
1801 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	pkt = cmd.resp_pkt;
1806 
1807 	/* Extract NVM response */
1808 	nvm_resp = (void *)pkt->data;
1809 	ret = le16toh(nvm_resp->status);
1810 	bytes_read = le16toh(nvm_resp->length);
1811 	offset_read = le16toh(nvm_resp->offset);
1812 	resp_data = nvm_resp->data;
1813 	if (ret) {
1814 		if ((offset != 0) &&
1815 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1816 			/*
1817 			 * meaning of NOT_VALID_ADDRESS:
1818 			 * driver try to read chunk from address that is
1819 			 * multiple of 2K and got an error since addr is empty.
1820 			 * meaning of (offset != 0): driver already
1821 			 * read valid data from another chunk so this case
1822 			 * is not an error.
1823 			 */
1824 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 				    offset);
1827 			*len = 0;
1828 			ret = 0;
1829 		} else {
1830 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 				    "NVM access command failed with status %d\n", ret);
1832 			ret = EIO;
1833 		}
1834 		goto exit;
1835 	}
1836 
1837 	if (offset_read != offset) {
1838 		device_printf(sc->sc_dev,
1839 		    "NVM ACCESS response with invalid offset %d\n",
1840 		    offset_read);
1841 		ret = EINVAL;
1842 		goto exit;
1843 	}
1844 
1845 	if (bytes_read > length) {
1846 		device_printf(sc->sc_dev,
1847 		    "NVM ACCESS response with too much data "
1848 		    "(%d bytes requested, %d bytes received)\n",
1849 		    length, bytes_read);
1850 		ret = EINVAL;
1851 		goto exit;
1852 	}
1853 
1854 	/* Write data to NVM */
1855 	memcpy(data + offset, resp_data, bytes_read);
1856 	*len = bytes_read;
1857 
1858  exit:
1859 	iwm_free_resp(sc, &cmd);
1860 	return ret;
1861 }
1862 
1863 /*
1864  * Reads an NVM section completely.
1865  * NICs prior to 7000 family don't have a real NVM, but just read
1866  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867  * by uCode, we need to manually check in this case that we don't
1868  * overflow and try to read more than the EEPROM size.
1869  * For 7000 family NICs, we supply the maximal size we can read, and
1870  * the uCode fills the response with as much data as we can,
1871  * without overflowing, so no check is needed.
1872  */
1873 static int
1874 iwm_nvm_read_section(struct iwm_softc *sc,
1875 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876 {
1877 	uint16_t seglen, length, offset = 0;
1878 	int ret;
1879 
1880 	/* Set nvm section read length */
1881 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1882 
1883 	seglen = length;
1884 
1885 	/* Read the NVM until exhausted (reading less than requested) */
1886 	while (seglen == length) {
1887 		/* Check no memory assumptions fail and cause an overflow */
1888 		if ((size_read + offset + length) >
1889 		    sc->cfg->eeprom_size) {
1890 			device_printf(sc->sc_dev,
1891 			    "EEPROM size is too small for NVM\n");
1892 			return ENOBUFS;
1893 		}
1894 
1895 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 		if (ret) {
1897 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 				    "Cannot read NVM from section %d offset %d, length %d\n",
1899 				    section, offset, length);
1900 			return ret;
1901 		}
1902 		offset += seglen;
1903 	}
1904 
1905 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 		    "NVM section %d read completed\n", section);
1907 	*len = offset;
1908 	return 0;
1909 }
1910 
1911 /*
1912  * BEGIN IWM_NVM_PARSE
1913  */
1914 
1915 /* iwlwifi/iwl-nvm-parse.c */
1916 
1917 /* NVM offsets (in words) definitions */
1918 enum iwm_nvm_offsets {
1919 	/* NVM HW-Section offset (in words) definitions */
1920 	IWM_HW_ADDR = 0x15,
1921 
1922 /* NVM SW-Section offset (in words) definitions */
1923 	IWM_NVM_SW_SECTION = 0x1C0,
1924 	IWM_NVM_VERSION = 0,
1925 	IWM_RADIO_CFG = 1,
1926 	IWM_SKU = 2,
1927 	IWM_N_HW_ADDRS = 3,
1928 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1929 
1930 /* NVM calibration section offset (in words) definitions */
1931 	IWM_NVM_CALIB_SECTION = 0x2B8,
1932 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1933 };
1934 
1935 enum iwm_8000_nvm_offsets {
1936 	/* NVM HW-Section offset (in words) definitions */
1937 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1938 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1939 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1940 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1941 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1942 
1943 	/* NVM SW-Section offset (in words) definitions */
1944 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1945 	IWM_NVM_VERSION_8000 = 0,
1946 	IWM_RADIO_CFG_8000 = 0,
1947 	IWM_SKU_8000 = 2,
1948 	IWM_N_HW_ADDRS_8000 = 3,
1949 
1950 	/* NVM REGULATORY -Section offset (in words) definitions */
1951 	IWM_NVM_CHANNELS_8000 = 0,
1952 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1953 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1954 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1955 
1956 	/* NVM calibration section offset (in words) definitions */
1957 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1958 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1959 };
1960 
1961 /* SKU Capabilities (actual values from NVM definition) */
1962 enum nvm_sku_bits {
1963 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1964 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1965 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1966 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1967 };
1968 
1969 /* radio config bits (actual values from NVM definition) */
1970 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1971 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1972 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1973 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1974 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1975 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1976 
1977 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1978 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1979 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1980 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1981 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1982 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1983 
1984 /**
1985  * enum iwm_nvm_channel_flags - channel flags in NVM
1986  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1987  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1988  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1989  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1990  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1991  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1992  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1993  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1994  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1995  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1996  */
1997 enum iwm_nvm_channel_flags {
1998 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1999 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2000 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2001 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2002 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2003 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2004 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2005 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2006 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2007 };
2008 
2009 /*
2010  * Translate EEPROM flags to net80211.
2011  */
2012 static uint32_t
2013 iwm_eeprom_channel_flags(uint16_t ch_flags)
2014 {
2015 	uint32_t nflags;
2016 
2017 	nflags = 0;
2018 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2019 		nflags |= IEEE80211_CHAN_PASSIVE;
2020 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2021 		nflags |= IEEE80211_CHAN_NOADHOC;
2022 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2023 		nflags |= IEEE80211_CHAN_DFS;
2024 		/* Just in case. */
2025 		nflags |= IEEE80211_CHAN_NOADHOC;
2026 	}
2027 
2028 	return (nflags);
2029 }
2030 
2031 static void
2032 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2033     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2034     const uint8_t bands[])
2035 {
2036 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2037 	uint32_t nflags;
2038 	uint16_t ch_flags;
2039 	uint8_t ieee;
2040 	int error;
2041 
2042 	for (; ch_idx < ch_num; ch_idx++) {
2043 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2044 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2045 			ieee = iwm_nvm_channels[ch_idx];
2046 		else
2047 			ieee = iwm_nvm_channels_8000[ch_idx];
2048 
2049 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2050 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2051 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2052 			    ieee, ch_flags,
2053 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2054 			    "5.2" : "2.4");
2055 			continue;
2056 		}
2057 
2058 		nflags = iwm_eeprom_channel_flags(ch_flags);
2059 		error = ieee80211_add_channel(chans, maxchans, nchans,
2060 		    ieee, 0, 0, nflags, bands);
2061 		if (error != 0)
2062 			break;
2063 
2064 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2065 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2066 		    ieee, ch_flags,
2067 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2068 		    "5.2" : "2.4");
2069 	}
2070 }
2071 
2072 static void
2073 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2074     struct ieee80211_channel chans[])
2075 {
2076 	struct iwm_softc *sc = ic->ic_softc;
2077 	struct iwm_nvm_data *data = sc->nvm_data;
2078 	uint8_t bands[IEEE80211_MODE_BYTES];
2079 	size_t ch_num;
2080 
2081 	memset(bands, 0, sizeof(bands));
2082 	/* 1-13: 11b/g channels. */
2083 	setbit(bands, IEEE80211_MODE_11B);
2084 	setbit(bands, IEEE80211_MODE_11G);
2085 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2086 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2087 
2088 	/* 14: 11b channel only. */
2089 	clrbit(bands, IEEE80211_MODE_11G);
2090 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2091 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2092 
2093 	if (data->sku_cap_band_52GHz_enable) {
2094 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2095 			ch_num = nitems(iwm_nvm_channels);
2096 		else
2097 			ch_num = nitems(iwm_nvm_channels_8000);
2098 		memset(bands, 0, sizeof(bands));
2099 		setbit(bands, IEEE80211_MODE_11A);
2100 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2101 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2102 	}
2103 }
2104 
2105 static void
2106 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2107 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2108 {
2109 	const uint8_t *hw_addr;
2110 
2111 	if (mac_override) {
2112 		static const uint8_t reserved_mac[] = {
2113 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2114 		};
2115 
2116 		hw_addr = (const uint8_t *)(mac_override +
2117 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2118 
2119 		/*
2120 		 * Store the MAC address from MAO section.
2121 		 * No byte swapping is required in MAO section
2122 		 */
2123 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2124 
2125 		/*
2126 		 * Force the use of the OTP MAC address in case of reserved MAC
2127 		 * address in the NVM, or if address is given but invalid.
2128 		 */
2129 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2130 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2131 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2132 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2133 			return;
2134 
2135 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2136 		    "%s: mac address from nvm override section invalid\n",
2137 		    __func__);
2138 	}
2139 
2140 	if (nvm_hw) {
2141 		/* read the mac address from WFMP registers */
2142 		uint32_t mac_addr0 =
2143 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2144 		uint32_t mac_addr1 =
2145 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2146 
2147 		hw_addr = (const uint8_t *)&mac_addr0;
2148 		data->hw_addr[0] = hw_addr[3];
2149 		data->hw_addr[1] = hw_addr[2];
2150 		data->hw_addr[2] = hw_addr[1];
2151 		data->hw_addr[3] = hw_addr[0];
2152 
2153 		hw_addr = (const uint8_t *)&mac_addr1;
2154 		data->hw_addr[4] = hw_addr[1];
2155 		data->hw_addr[5] = hw_addr[0];
2156 
2157 		return;
2158 	}
2159 
2160 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2161 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2162 }
2163 
2164 static int
2165 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2166 	    const uint16_t *phy_sku)
2167 {
2168 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2169 		return le16_to_cpup(nvm_sw + IWM_SKU);
2170 
2171 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2172 }
2173 
2174 static int
2175 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2176 {
2177 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2178 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2179 	else
2180 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2181 						IWM_NVM_VERSION_8000));
2182 }
2183 
2184 static int
2185 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2186 		  const uint16_t *phy_sku)
2187 {
2188         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2189                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2190 
2191         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2192 }
2193 
2194 static int
2195 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2196 {
2197 	int n_hw_addr;
2198 
2199 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2200 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2201 
2202 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2203 
2204         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2205 }
2206 
2207 static void
2208 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2209 		  uint32_t radio_cfg)
2210 {
2211 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2212 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2213 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2214 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2215 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2216 		return;
2217 	}
2218 
2219 	/* set the radio configuration for family 8000 */
2220 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2221 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2222 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2223 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2224 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2225 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2226 }
2227 
2228 static int
2229 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2230 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2231 {
2232 #ifdef notyet /* for FAMILY 9000 */
2233 	if (cfg->mac_addr_from_csr) {
2234 		iwm_set_hw_address_from_csr(sc, data);
2235         } else
2236 #endif
2237 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2238 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2239 
2240 		/* The byte order is little endian 16 bit, meaning 214365 */
2241 		data->hw_addr[0] = hw_addr[1];
2242 		data->hw_addr[1] = hw_addr[0];
2243 		data->hw_addr[2] = hw_addr[3];
2244 		data->hw_addr[3] = hw_addr[2];
2245 		data->hw_addr[4] = hw_addr[5];
2246 		data->hw_addr[5] = hw_addr[4];
2247 	} else {
2248 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2249 	}
2250 
2251 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2252 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2253 		return EINVAL;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 static struct iwm_nvm_data *
2260 iwm_parse_nvm_data(struct iwm_softc *sc,
2261 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2262 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2263 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2264 {
2265 	struct iwm_nvm_data *data;
2266 	uint32_t sku, radio_cfg;
2267 	uint16_t lar_config;
2268 
2269 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2270 		data = malloc(sizeof(*data) +
2271 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2272 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2273 	} else {
2274 		data = malloc(sizeof(*data) +
2275 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2276 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2277 	}
2278 	if (!data)
2279 		return NULL;
2280 
2281 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2282 
2283 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2284 	iwm_set_radio_cfg(sc, data, radio_cfg);
2285 
2286 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2287 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2288 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2289 	data->sku_cap_11n_enable = 0;
2290 
2291 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2292 
2293 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2294 		/* TODO: use IWL_NVM_EXT */
2295 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2296 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2297 				       IWM_NVM_LAR_OFFSET_8000;
2298 
2299 		lar_config = le16_to_cpup(regulatory + lar_offset);
2300 		data->lar_enabled = !!(lar_config &
2301 				       IWM_NVM_LAR_ENABLED_8000);
2302 	}
2303 
2304 	/* If no valid mac address was found - bail out */
2305 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2306 		free(data, M_DEVBUF);
2307 		return NULL;
2308 	}
2309 
2310 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2311 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2312 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2313 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2314 	} else {
2315 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2316 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2317 	}
2318 
2319 	return data;
2320 }
2321 
2322 static void
2323 iwm_free_nvm_data(struct iwm_nvm_data *data)
2324 {
2325 	if (data != NULL)
2326 		free(data, M_DEVBUF);
2327 }
2328 
2329 static struct iwm_nvm_data *
2330 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2331 {
2332 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2333 
2334 	/* Checking for required sections */
2335 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2336 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2337 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2338 			device_printf(sc->sc_dev,
2339 			    "Can't parse empty OTP/NVM sections\n");
2340 			return NULL;
2341 		}
2342 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2343 		/* SW and REGULATORY sections are mandatory */
2344 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2345 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2346 			device_printf(sc->sc_dev,
2347 			    "Can't parse empty OTP/NVM sections\n");
2348 			return NULL;
2349 		}
2350 		/* MAC_OVERRIDE or at least HW section must exist */
2351 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2352 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2353 			device_printf(sc->sc_dev,
2354 			    "Can't parse mac_address, empty sections\n");
2355 			return NULL;
2356 		}
2357 
2358 		/* PHY_SKU section is mandatory in B0 */
2359 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2360 			device_printf(sc->sc_dev,
2361 			    "Can't parse phy_sku in B0, empty sections\n");
2362 			return NULL;
2363 		}
2364 	} else {
2365 		panic("unknown device family %d\n", sc->cfg->device_family);
2366 	}
2367 
2368 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2369 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2370 	calib = (const uint16_t *)
2371 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2372 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2373 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2374 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2375 	mac_override = (const uint16_t *)
2376 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2377 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2378 
2379 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2380 	    phy_sku, regulatory);
2381 }
2382 
2383 static int
2384 iwm_nvm_init(struct iwm_softc *sc)
2385 {
2386 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2387 	int i, ret, section;
2388 	uint32_t size_read = 0;
2389 	uint8_t *nvm_buffer, *temp;
2390 	uint16_t len;
2391 
2392 	memset(nvm_sections, 0, sizeof(nvm_sections));
2393 
2394 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2395 		return EINVAL;
2396 
2397 	/* load NVM values from nic */
2398 	/* Read From FW NVM */
2399 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2400 
2401 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2402 	if (!nvm_buffer)
2403 		return ENOMEM;
2404 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2405 		/* we override the constness for initial read */
2406 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2407 					   &len, size_read);
2408 		if (ret)
2409 			continue;
2410 		size_read += len;
2411 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2412 		if (!temp) {
2413 			ret = ENOMEM;
2414 			break;
2415 		}
2416 		memcpy(temp, nvm_buffer, len);
2417 
2418 		nvm_sections[section].data = temp;
2419 		nvm_sections[section].length = len;
2420 	}
2421 	if (!size_read)
2422 		device_printf(sc->sc_dev, "OTP is blank\n");
2423 	free(nvm_buffer, M_DEVBUF);
2424 
2425 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2426 	if (!sc->nvm_data)
2427 		return EINVAL;
2428 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2429 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2430 
2431 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2432 		if (nvm_sections[i].data != NULL)
2433 			free(nvm_sections[i].data, M_DEVBUF);
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static int
2440 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2441 	const struct iwm_fw_desc *section)
2442 {
2443 	struct iwm_dma_info *dma = &sc->fw_dma;
2444 	uint8_t *v_addr;
2445 	bus_addr_t p_addr;
2446 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2447 	int ret = 0;
2448 
2449 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2450 		    "%s: [%d] uCode section being loaded...\n",
2451 		    __func__, section_num);
2452 
2453 	v_addr = dma->vaddr;
2454 	p_addr = dma->paddr;
2455 
2456 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2457 		uint32_t copy_size, dst_addr;
2458 		int extended_addr = FALSE;
2459 
2460 		copy_size = MIN(chunk_sz, section->len - offset);
2461 		dst_addr = section->offset + offset;
2462 
2463 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2464 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2465 			extended_addr = TRUE;
2466 
2467 		if (extended_addr)
2468 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2469 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2470 
2471 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2472 		    copy_size);
2473 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2474 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2475 						   copy_size);
2476 
2477 		if (extended_addr)
2478 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2479 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2480 
2481 		if (ret) {
2482 			device_printf(sc->sc_dev,
2483 			    "%s: Could not load the [%d] uCode section\n",
2484 			    __func__, section_num);
2485 			break;
2486 		}
2487 	}
2488 
2489 	return ret;
2490 }
2491 
2492 /*
2493  * ucode
2494  */
2495 static int
2496 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2497 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2498 {
2499 	sc->sc_fw_chunk_done = 0;
2500 
2501 	if (!iwm_nic_lock(sc))
2502 		return EBUSY;
2503 
2504 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2505 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2506 
2507 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2508 	    dst_addr);
2509 
2510 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2511 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2512 
2513 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2514 	    (iwm_get_dma_hi_addr(phy_addr)
2515 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2516 
2517 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2518 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2519 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2520 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2521 
2522 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2523 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2524 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2525 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2526 
2527 	iwm_nic_unlock(sc);
2528 
2529 	/* wait up to 5s for this segment to load */
2530 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2531 
2532 	if (!sc->sc_fw_chunk_done) {
2533 		device_printf(sc->sc_dev,
2534 		    "fw chunk addr 0x%x len %d failed to load\n",
2535 		    dst_addr, byte_cnt);
2536 		return ETIMEDOUT;
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static int
2543 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2544 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2545 {
2546 	int shift_param;
2547 	int i, ret = 0, sec_num = 0x1;
2548 	uint32_t val, last_read_idx = 0;
2549 
2550 	if (cpu == 1) {
2551 		shift_param = 0;
2552 		*first_ucode_section = 0;
2553 	} else {
2554 		shift_param = 16;
2555 		(*first_ucode_section)++;
2556 	}
2557 
2558 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2559 		last_read_idx = i;
2560 
2561 		/*
2562 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2563 		 * CPU1 to CPU2.
2564 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2565 		 * CPU2 non paged to CPU2 paging sec.
2566 		 */
2567 		if (!image->sec[i].data ||
2568 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2569 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2570 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2571 				    "Break since Data not valid or Empty section, sec = %d\n",
2572 				    i);
2573 			break;
2574 		}
2575 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2576 		if (ret)
2577 			return ret;
2578 
2579 		/* Notify the ucode of the loaded section number and status */
2580 		if (iwm_nic_lock(sc)) {
2581 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2582 			val = val | (sec_num << shift_param);
2583 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2584 			sec_num = (sec_num << 1) | 0x1;
2585 			iwm_nic_unlock(sc);
2586 		}
2587 	}
2588 
2589 	*first_ucode_section = last_read_idx;
2590 
2591 	iwm_enable_interrupts(sc);
2592 
2593 	if (iwm_nic_lock(sc)) {
2594 		if (cpu == 1)
2595 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2596 		else
2597 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2598 		iwm_nic_unlock(sc);
2599 	}
2600 
2601 	return 0;
2602 }
2603 
2604 static int
2605 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2606 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2607 {
2608 	int shift_param;
2609 	int i, ret = 0;
2610 	uint32_t last_read_idx = 0;
2611 
2612 	if (cpu == 1) {
2613 		shift_param = 0;
2614 		*first_ucode_section = 0;
2615 	} else {
2616 		shift_param = 16;
2617 		(*first_ucode_section)++;
2618 	}
2619 
2620 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2621 		last_read_idx = i;
2622 
2623 		/*
2624 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2625 		 * CPU1 to CPU2.
2626 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2627 		 * CPU2 non paged to CPU2 paging sec.
2628 		 */
2629 		if (!image->sec[i].data ||
2630 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2631 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2632 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2633 				    "Break since Data not valid or Empty section, sec = %d\n",
2634 				     i);
2635 			break;
2636 		}
2637 
2638 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2639 		if (ret)
2640 			return ret;
2641 	}
2642 
2643 	*first_ucode_section = last_read_idx;
2644 
2645 	return 0;
2646 
2647 }
2648 
2649 static int
2650 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2651 {
2652 	int ret = 0;
2653 	int first_ucode_section;
2654 
2655 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2656 		     image->is_dual_cpus ? "Dual" : "Single");
2657 
2658 	/* load to FW the binary non secured sections of CPU1 */
2659 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2660 	if (ret)
2661 		return ret;
2662 
2663 	if (image->is_dual_cpus) {
2664 		/* set CPU2 header address */
2665 		if (iwm_nic_lock(sc)) {
2666 			iwm_write_prph(sc,
2667 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2668 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2669 			iwm_nic_unlock(sc);
2670 		}
2671 
2672 		/* load to FW the binary sections of CPU2 */
2673 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2674 						 &first_ucode_section);
2675 		if (ret)
2676 			return ret;
2677 	}
2678 
2679 	iwm_enable_interrupts(sc);
2680 
2681 	/* release CPU reset */
2682 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2683 
2684 	return 0;
2685 }
2686 
2687 int
2688 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2689 	const struct iwm_fw_img *image)
2690 {
2691 	int ret = 0;
2692 	int first_ucode_section;
2693 
2694 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2695 		    image->is_dual_cpus ? "Dual" : "Single");
2696 
2697 	/* configure the ucode to be ready to get the secured image */
2698 	/* release CPU reset */
2699 	if (iwm_nic_lock(sc)) {
2700 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2701 		    IWM_RELEASE_CPU_RESET_BIT);
2702 		iwm_nic_unlock(sc);
2703 	}
2704 
2705 	/* load to FW the binary Secured sections of CPU1 */
2706 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2707 	    &first_ucode_section);
2708 	if (ret)
2709 		return ret;
2710 
2711 	/* load to FW the binary sections of CPU2 */
2712 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2713 	    &first_ucode_section);
2714 }
2715 
2716 /* XXX Get rid of this definition */
2717 static inline void
2718 iwm_enable_fw_load_int(struct iwm_softc *sc)
2719 {
2720 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2721 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2722 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2723 }
2724 
2725 /* XXX Add proper rfkill support code */
2726 static int
2727 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2728 {
2729 	int ret;
2730 
2731 	/* This may fail if AMT took ownership of the device */
2732 	if (iwm_prepare_card_hw(sc)) {
2733 		device_printf(sc->sc_dev,
2734 		    "%s: Exit HW not ready\n", __func__);
2735 		ret = EIO;
2736 		goto out;
2737 	}
2738 
2739 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2740 
2741 	iwm_disable_interrupts(sc);
2742 
2743 	/* make sure rfkill handshake bits are cleared */
2744 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2745 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2746 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2747 
2748 	/* clear (again), then enable host interrupts */
2749 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2750 
2751 	ret = iwm_nic_init(sc);
2752 	if (ret) {
2753 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2754 		goto out;
2755 	}
2756 
2757 	/*
2758 	 * Now, we load the firmware and don't want to be interrupted, even
2759 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2760 	 * FH_TX interrupt which is needed to load the firmware). If the
2761 	 * RF-Kill switch is toggled, we will find out after having loaded
2762 	 * the firmware and return the proper value to the caller.
2763 	 */
2764 	iwm_enable_fw_load_int(sc);
2765 
2766 	/* really make sure rfkill handshake bits are cleared */
2767 	/* maybe we should write a few times more?  just to make sure */
2768 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2769 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2770 
2771 	/* Load the given image to the HW */
2772 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2773 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2774 	else
2775 		ret = iwm_pcie_load_given_ucode(sc, fw);
2776 
2777 	/* XXX re-check RF-Kill state */
2778 
2779 out:
2780 	return ret;
2781 }
2782 
2783 static int
2784 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2785 {
2786 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2787 		.valid = htole32(valid_tx_ant),
2788 	};
2789 
2790 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2791 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2792 }
2793 
2794 /* iwlwifi: mvm/fw.c */
2795 static int
2796 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2797 {
2798 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2799 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2800 
2801 	/* Set parameters */
2802 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2803 	phy_cfg_cmd.calib_control.event_trigger =
2804 	    sc->sc_default_calib[ucode_type].event_trigger;
2805 	phy_cfg_cmd.calib_control.flow_trigger =
2806 	    sc->sc_default_calib[ucode_type].flow_trigger;
2807 
2808 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2809 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2810 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2811 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2812 }
2813 
2814 static int
2815 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2816 {
2817 	struct iwm_alive_data *alive_data = data;
2818 	struct iwm_alive_resp_v3 *palive3;
2819 	struct iwm_alive_resp *palive;
2820 	struct iwm_umac_alive *umac;
2821 	struct iwm_lmac_alive *lmac1;
2822 	struct iwm_lmac_alive *lmac2 = NULL;
2823 	uint16_t status;
2824 
2825 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2826 		palive = (void *)pkt->data;
2827 		umac = &palive->umac_data;
2828 		lmac1 = &palive->lmac_data[0];
2829 		lmac2 = &palive->lmac_data[1];
2830 		status = le16toh(palive->status);
2831 	} else {
2832 		palive3 = (void *)pkt->data;
2833 		umac = &palive3->umac_data;
2834 		lmac1 = &palive3->lmac_data;
2835 		status = le16toh(palive3->status);
2836 	}
2837 
2838 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2839 	if (lmac2)
2840 		sc->error_event_table[1] =
2841 			le32toh(lmac2->error_event_table_ptr);
2842 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2843 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2844 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2845 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2846 	if (sc->umac_error_event_table)
2847 		sc->support_umac_log = TRUE;
2848 
2849 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2850 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2851 		    status, lmac1->ver_type, lmac1->ver_subtype);
2852 
2853 	if (lmac2)
2854 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2855 
2856 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2857 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2858 		    le32toh(umac->umac_major),
2859 		    le32toh(umac->umac_minor));
2860 
2861 	return TRUE;
2862 }
2863 
2864 static int
2865 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2866 	struct iwm_rx_packet *pkt, void *data)
2867 {
2868 	struct iwm_phy_db *phy_db = data;
2869 
2870 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2871 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2872 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2873 			    __func__, pkt->hdr.code);
2874 		}
2875 		return TRUE;
2876 	}
2877 
2878 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2879 		device_printf(sc->sc_dev,
2880 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2881 	}
2882 
2883 	return FALSE;
2884 }
2885 
2886 static int
2887 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2888 	enum iwm_ucode_type ucode_type)
2889 {
2890 	struct iwm_notification_wait alive_wait;
2891 	struct iwm_alive_data alive_data;
2892 	const struct iwm_fw_img *fw;
2893 	enum iwm_ucode_type old_type = sc->cur_ucode;
2894 	int error;
2895 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2896 
2897 	fw = &sc->sc_fw.img[ucode_type];
2898 	sc->cur_ucode = ucode_type;
2899 	sc->ucode_loaded = FALSE;
2900 
2901 	memset(&alive_data, 0, sizeof(alive_data));
2902 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2903 				   alive_cmd, nitems(alive_cmd),
2904 				   iwm_alive_fn, &alive_data);
2905 
2906 	error = iwm_start_fw(sc, fw);
2907 	if (error) {
2908 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2909 		sc->cur_ucode = old_type;
2910 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2911 		return error;
2912 	}
2913 
2914 	/*
2915 	 * Some things may run in the background now, but we
2916 	 * just wait for the ALIVE notification here.
2917 	 */
2918 	IWM_UNLOCK(sc);
2919 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2920 				      IWM_UCODE_ALIVE_TIMEOUT);
2921 	IWM_LOCK(sc);
2922 	if (error) {
2923 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2924 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2925 			if (iwm_nic_lock(sc)) {
2926 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2927 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2928 				iwm_nic_unlock(sc);
2929 			}
2930 			device_printf(sc->sc_dev,
2931 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2932 			    a, b);
2933 		}
2934 		sc->cur_ucode = old_type;
2935 		return error;
2936 	}
2937 
2938 	if (!alive_data.valid) {
2939 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2940 		    __func__);
2941 		sc->cur_ucode = old_type;
2942 		return EIO;
2943 	}
2944 
2945 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2946 
2947 	/*
2948 	 * configure and operate fw paging mechanism.
2949 	 * driver configures the paging flow only once, CPU2 paging image
2950 	 * included in the IWM_UCODE_INIT image.
2951 	 */
2952 	if (fw->paging_mem_size) {
2953 		error = iwm_save_fw_paging(sc, fw);
2954 		if (error) {
2955 			device_printf(sc->sc_dev,
2956 			    "%s: failed to save the FW paging image\n",
2957 			    __func__);
2958 			return error;
2959 		}
2960 
2961 		error = iwm_send_paging_cmd(sc, fw);
2962 		if (error) {
2963 			device_printf(sc->sc_dev,
2964 			    "%s: failed to send the paging cmd\n", __func__);
2965 			iwm_free_fw_paging(sc);
2966 			return error;
2967 		}
2968 	}
2969 
2970 	if (!error)
2971 		sc->ucode_loaded = TRUE;
2972 	return error;
2973 }
2974 
2975 /*
2976  * mvm misc bits
2977  */
2978 
2979 /*
2980  * follows iwlwifi/fw.c
2981  */
2982 static int
2983 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2984 {
2985 	struct iwm_notification_wait calib_wait;
2986 	static const uint16_t init_complete[] = {
2987 		IWM_INIT_COMPLETE_NOTIF,
2988 		IWM_CALIB_RES_NOTIF_PHY_DB
2989 	};
2990 	int ret;
2991 
2992 	/* do not operate with rfkill switch turned on */
2993 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2994 		device_printf(sc->sc_dev,
2995 		    "radio is disabled by hardware switch\n");
2996 		return EPERM;
2997 	}
2998 
2999 	iwm_init_notification_wait(sc->sc_notif_wait,
3000 				   &calib_wait,
3001 				   init_complete,
3002 				   nitems(init_complete),
3003 				   iwm_wait_phy_db_entry,
3004 				   sc->sc_phy_db);
3005 
3006 	/* Will also start the device */
3007 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3008 	if (ret) {
3009 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3010 		    ret);
3011 		goto error;
3012 	}
3013 
3014 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3015 		ret = iwm_send_bt_init_conf(sc);
3016 		if (ret) {
3017 			device_printf(sc->sc_dev,
3018 			    "failed to send bt coex configuration: %d\n", ret);
3019 			goto error;
3020 		}
3021 	}
3022 
3023 	if (justnvm) {
3024 		/* Read nvm */
3025 		ret = iwm_nvm_init(sc);
3026 		if (ret) {
3027 			device_printf(sc->sc_dev, "failed to read nvm\n");
3028 			goto error;
3029 		}
3030 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3031 		goto error;
3032 	}
3033 
3034 	/* Send TX valid antennas before triggering calibrations */
3035 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3036 	if (ret) {
3037 		device_printf(sc->sc_dev,
3038 		    "failed to send antennas before calibration: %d\n", ret);
3039 		goto error;
3040 	}
3041 
3042 	/*
3043 	 * Send phy configurations command to init uCode
3044 	 * to start the 16.0 uCode init image internal calibrations.
3045 	 */
3046 	ret = iwm_send_phy_cfg_cmd(sc);
3047 	if (ret) {
3048 		device_printf(sc->sc_dev,
3049 		    "%s: Failed to run INIT calibrations: %d\n",
3050 		    __func__, ret);
3051 		goto error;
3052 	}
3053 
3054 	/*
3055 	 * Nothing to do but wait for the init complete notification
3056 	 * from the firmware.
3057 	 */
3058 	IWM_UNLOCK(sc);
3059 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3060 	    IWM_UCODE_CALIB_TIMEOUT);
3061 	IWM_LOCK(sc);
3062 
3063 
3064 	goto out;
3065 
3066 error:
3067 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3068 out:
3069 	return ret;
3070 }
3071 
3072 static int
3073 iwm_config_ltr(struct iwm_softc *sc)
3074 {
3075 	struct iwm_ltr_config_cmd cmd = {
3076 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3077 	};
3078 
3079 	if (!sc->sc_ltr_enabled)
3080 		return 0;
3081 
3082 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3083 }
3084 
3085 /*
3086  * receive side
3087  */
3088 
3089 /* (re)stock rx ring, called at init-time and at runtime */
3090 static int
3091 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3092 {
3093 	struct iwm_rx_ring *ring = &sc->rxq;
3094 	struct iwm_rx_data *data = &ring->data[idx];
3095 	struct mbuf *m;
3096 	bus_dmamap_t dmamap;
3097 	bus_dma_segment_t seg;
3098 	int nsegs, error;
3099 
3100 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3101 	if (m == NULL)
3102 		return ENOBUFS;
3103 
3104 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3105 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3106 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3107 	if (error != 0) {
3108 		device_printf(sc->sc_dev,
3109 		    "%s: can't map mbuf, error %d\n", __func__, error);
3110 		m_freem(m);
3111 		return error;
3112 	}
3113 
3114 	if (data->m != NULL)
3115 		bus_dmamap_unload(ring->data_dmat, data->map);
3116 
3117 	/* Swap ring->spare_map with data->map */
3118 	dmamap = data->map;
3119 	data->map = ring->spare_map;
3120 	ring->spare_map = dmamap;
3121 
3122 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3123 	data->m = m;
3124 
3125 	/* Update RX descriptor. */
3126 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3127 	if (sc->cfg->mqrx_supported)
3128 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3129 	else
3130 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3131 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3132 	    BUS_DMASYNC_PREWRITE);
3133 
3134 	return 0;
3135 }
3136 
3137 static void
3138 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3139 {
3140 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3141 
3142 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3143 
3144 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3145 }
3146 
3147 /*
3148  * Retrieve the average noise (in dBm) among receivers.
3149  */
3150 static int
3151 iwm_get_noise(struct iwm_softc *sc,
3152     const struct iwm_statistics_rx_non_phy *stats)
3153 {
3154 	int i, total, nbant, noise;
3155 
3156 	total = nbant = noise = 0;
3157 	for (i = 0; i < 3; i++) {
3158 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3159 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3160 		    __func__,
3161 		    i,
3162 		    noise);
3163 
3164 		if (noise) {
3165 			total += noise;
3166 			nbant++;
3167 		}
3168 	}
3169 
3170 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3171 	    __func__, nbant, total);
3172 #if 0
3173 	/* There should be at least one antenna but check anyway. */
3174 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3175 #else
3176 	/* For now, just hard-code it to -96 to be safe */
3177 	return (-96);
3178 #endif
3179 }
3180 
3181 static void
3182 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3183 {
3184 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3185 
3186 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3187 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3188 }
3189 
3190 /* iwlwifi: mvm/rx.c */
3191 /*
3192  * iwm_get_signal_strength - use new rx PHY INFO API
3193  * values are reported by the fw as positive values - need to negate
3194  * to obtain their dBM.  Account for missing antennas by replacing 0
3195  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3196  */
3197 static int
3198 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3199     struct iwm_rx_phy_info *phy_info)
3200 {
3201 	int energy_a, energy_b, energy_c, max_energy;
3202 	uint32_t val;
3203 
3204 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3205 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3206 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3207 	energy_a = energy_a ? -energy_a : -256;
3208 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3209 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3210 	energy_b = energy_b ? -energy_b : -256;
3211 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3212 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3213 	energy_c = energy_c ? -energy_c : -256;
3214 	max_energy = MAX(energy_a, energy_b);
3215 	max_energy = MAX(max_energy, energy_c);
3216 
3217 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218 	    "energy In A %d B %d C %d , and max %d\n",
3219 	    energy_a, energy_b, energy_c, max_energy);
3220 
3221 	return max_energy;
3222 }
3223 
3224 static int
3225 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3226     struct iwm_rx_mpdu_desc *desc)
3227 {
3228 	int energy_a, energy_b;
3229 
3230 	energy_a = desc->v1.energy_a;
3231 	energy_b = desc->v1.energy_b;
3232 	energy_a = energy_a ? -energy_a : -256;
3233 	energy_b = energy_b ? -energy_b : -256;
3234 	return MAX(energy_a, energy_b);
3235 }
3236 
3237 /*
3238  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3239  *
3240  * Handles the actual data of the Rx packet from the fw
3241  */
3242 static bool
3243 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3244     bool stolen)
3245 {
3246 	struct ieee80211com *ic = &sc->sc_ic;
3247 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3248 	struct ieee80211_frame *wh;
3249 	struct ieee80211_rx_stats rxs;
3250 	struct iwm_rx_phy_info *phy_info;
3251 	struct iwm_rx_mpdu_res_start *rx_res;
3252 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3253 	uint32_t len;
3254 	uint32_t rx_pkt_status;
3255 	int rssi;
3256 
3257 	phy_info = &sc->sc_last_phy_info;
3258 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3259 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3260 	len = le16toh(rx_res->byte_count);
3261 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3262 
3263 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3264 		device_printf(sc->sc_dev,
3265 		    "dsp size out of range [0,20]: %d\n",
3266 		    phy_info->cfg_phy_cnt);
3267 		return false;
3268 	}
3269 
3270 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3271 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3272 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3273 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3274 		return false;
3275 	}
3276 
3277 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3278 
3279 	/* Map it to relative value */
3280 	rssi = rssi - sc->sc_noise;
3281 
3282 	/* replenish ring for the buffer we're going to feed to the sharks */
3283 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3284 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3285 		    __func__);
3286 		return false;
3287 	}
3288 
3289 	m->m_data = pkt->data + sizeof(*rx_res);
3290 	m->m_pkthdr.len = m->m_len = len;
3291 
3292 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3293 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3294 
3295 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3296 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3297 	    __func__,
3298 	    le16toh(phy_info->channel),
3299 	    le16toh(phy_info->phy_flags));
3300 
3301 	/*
3302 	 * Populate an RX state struct with the provided information.
3303 	 */
3304 	bzero(&rxs, sizeof(rxs));
3305 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3306 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3307 	rxs.c_ieee = le16toh(phy_info->channel);
3308 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3309 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3310 	} else {
3311 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3312 	}
3313 
3314 	/* rssi is in 1/2db units */
3315 	rxs.c_rssi = rssi * 2;
3316 	rxs.c_nf = sc->sc_noise;
3317 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3318 		return false;
3319 
3320 	if (ieee80211_radiotap_active_vap(vap)) {
3321 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3322 
3323 		tap->wr_flags = 0;
3324 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3325 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3326 		tap->wr_chan_freq = htole16(rxs.c_freq);
3327 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3328 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3329 		tap->wr_dbm_antsignal = (int8_t)rssi;
3330 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3331 		tap->wr_tsft = phy_info->system_timestamp;
3332 		switch (phy_info->rate) {
3333 		/* CCK rates. */
3334 		case  10: tap->wr_rate =   2; break;
3335 		case  20: tap->wr_rate =   4; break;
3336 		case  55: tap->wr_rate =  11; break;
3337 		case 110: tap->wr_rate =  22; break;
3338 		/* OFDM rates. */
3339 		case 0xd: tap->wr_rate =  12; break;
3340 		case 0xf: tap->wr_rate =  18; break;
3341 		case 0x5: tap->wr_rate =  24; break;
3342 		case 0x7: tap->wr_rate =  36; break;
3343 		case 0x9: tap->wr_rate =  48; break;
3344 		case 0xb: tap->wr_rate =  72; break;
3345 		case 0x1: tap->wr_rate =  96; break;
3346 		case 0x3: tap->wr_rate = 108; break;
3347 		/* Unknown rate: should not happen. */
3348 		default:  tap->wr_rate =   0;
3349 		}
3350 	}
3351 
3352 	return true;
3353 }
3354 
3355 static bool
3356 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3357     bool stolen)
3358 {
3359 	struct ieee80211com *ic = &sc->sc_ic;
3360 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3361 	struct ieee80211_frame *wh;
3362 	struct ieee80211_rx_stats rxs;
3363 	struct iwm_rx_mpdu_desc *desc;
3364 	struct iwm_rx_packet *pkt;
3365 	int rssi;
3366 	uint32_t hdrlen, len, rate_n_flags;
3367 	uint16_t phy_info;
3368 	uint8_t channel;
3369 
3370 	pkt = mtodo(m, offset);
3371 	desc = (void *)pkt->data;
3372 
3373 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3374 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3375 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3376 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3377 		return false;
3378 	}
3379 
3380 	channel = desc->v1.channel;
3381 	len = le16toh(desc->mpdu_len);
3382 	phy_info = le16toh(desc->phy_info);
3383 	rate_n_flags = desc->v1.rate_n_flags;
3384 
3385 	wh = mtodo(m, sizeof(*desc));
3386 	m->m_data = pkt->data + sizeof(*desc);
3387 	m->m_pkthdr.len = m->m_len = len;
3388 	m->m_len = len;
3389 
3390 	/* Account for padding following the frame header. */
3391 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3392 		hdrlen = ieee80211_anyhdrsize(wh);
3393 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3394 		m->m_data = mtodo(m, 2);
3395 		wh = mtod(m, struct ieee80211_frame *);
3396 	}
3397 
3398 	/* Map it to relative value */
3399 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3400 	rssi = rssi - sc->sc_noise;
3401 
3402 	/* replenish ring for the buffer we're going to feed to the sharks */
3403 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3404 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3405 		    __func__);
3406 		return false;
3407 	}
3408 
3409 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3410 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3411 
3412 	/*
3413 	 * Populate an RX state struct with the provided information.
3414 	 */
3415 	bzero(&rxs, sizeof(rxs));
3416 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3417 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3418 	rxs.c_ieee = channel;
3419 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3420 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3421 
3422 	/* rssi is in 1/2db units */
3423 	rxs.c_rssi = rssi * 2;
3424 	rxs.c_nf = sc->sc_noise;
3425 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3426 		return false;
3427 
3428 	if (ieee80211_radiotap_active_vap(vap)) {
3429 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3430 
3431 		tap->wr_flags = 0;
3432 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3433 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3434 		tap->wr_chan_freq = htole16(rxs.c_freq);
3435 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3436 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3437 		tap->wr_dbm_antsignal = (int8_t)rssi;
3438 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3439 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3440 		switch ((rate_n_flags & 0xff)) {
3441 		/* CCK rates. */
3442 		case  10: tap->wr_rate =   2; break;
3443 		case  20: tap->wr_rate =   4; break;
3444 		case  55: tap->wr_rate =  11; break;
3445 		case 110: tap->wr_rate =  22; break;
3446 		/* OFDM rates. */
3447 		case 0xd: tap->wr_rate =  12; break;
3448 		case 0xf: tap->wr_rate =  18; break;
3449 		case 0x5: tap->wr_rate =  24; break;
3450 		case 0x7: tap->wr_rate =  36; break;
3451 		case 0x9: tap->wr_rate =  48; break;
3452 		case 0xb: tap->wr_rate =  72; break;
3453 		case 0x1: tap->wr_rate =  96; break;
3454 		case 0x3: tap->wr_rate = 108; break;
3455 		/* Unknown rate: should not happen. */
3456 		default:  tap->wr_rate =   0;
3457 		}
3458 	}
3459 
3460 	return true;
3461 }
3462 
3463 static bool
3464 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3465     bool stolen)
3466 {
3467   	struct epoch_tracker et;
3468 	struct ieee80211com *ic;
3469 	struct ieee80211_frame *wh;
3470 	struct ieee80211_node *ni;
3471 	bool ret;
3472 
3473 	ic = &sc->sc_ic;
3474 
3475 	ret = sc->cfg->mqrx_supported ?
3476 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3477 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3478 	if (!ret) {
3479 		counter_u64_add(ic->ic_ierrors, 1);
3480 		return (ret);
3481 	}
3482 
3483 	wh = mtod(m, struct ieee80211_frame *);
3484 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3485 
3486 	IWM_UNLOCK(sc);
3487 
3488 	NET_EPOCH_ENTER(et);
3489 	if (ni != NULL) {
3490 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3491 		ieee80211_input_mimo(ni, m);
3492 		ieee80211_free_node(ni);
3493 	} else {
3494 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3495 		ieee80211_input_mimo_all(ic, m);
3496 	}
3497 	NET_EPOCH_EXIT(et);
3498 
3499 	IWM_LOCK(sc);
3500 
3501 	return true;
3502 }
3503 
3504 static int
3505 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3506 	struct iwm_node *in)
3507 {
3508 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3509 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3510 	struct ieee80211_node *ni = &in->in_ni;
3511 	struct ieee80211vap *vap = ni->ni_vap;
3512 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3513 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3514 	boolean_t rate_matched;
3515 	uint8_t tx_resp_rate;
3516 
3517 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3518 
3519 	/* Update rate control statistics. */
3520 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3521 	    __func__,
3522 	    (int) le16toh(tx_resp->status.status),
3523 	    (int) le16toh(tx_resp->status.sequence),
3524 	    tx_resp->frame_count,
3525 	    tx_resp->bt_kill_count,
3526 	    tx_resp->failure_rts,
3527 	    tx_resp->failure_frame,
3528 	    le32toh(tx_resp->initial_rate),
3529 	    (int) le16toh(tx_resp->wireless_media_time));
3530 
3531 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3532 
3533 	/* For rate control, ignore frames sent at different initial rate */
3534 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3535 
3536 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3537 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3538 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3539 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3540 	}
3541 
3542 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3543 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3544 	txs->short_retries = tx_resp->failure_rts;
3545 	txs->long_retries = tx_resp->failure_frame;
3546 	if (status != IWM_TX_STATUS_SUCCESS &&
3547 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3548 		switch (status) {
3549 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3550 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3551 			break;
3552 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3553 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3554 			break;
3555 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3556 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3557 			break;
3558 		default:
3559 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3560 			break;
3561 		}
3562 	} else {
3563 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3564 	}
3565 
3566 	if (rate_matched) {
3567 		ieee80211_ratectl_tx_complete(ni, txs);
3568 
3569 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3570 		new_rate = vap->iv_bss->ni_txrate;
3571 		if (new_rate != 0 && new_rate != cur_rate) {
3572 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3573 			iwm_setrates(sc, in, rix);
3574 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3575 		}
3576  	}
3577 
3578 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3579 }
3580 
3581 static void
3582 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3583 {
3584 	struct iwm_cmd_header *cmd_hdr;
3585 	struct iwm_tx_ring *ring;
3586 	struct iwm_tx_data *txd;
3587 	struct iwm_node *in;
3588 	struct mbuf *m;
3589 	int idx, qid, qmsk, status;
3590 
3591 	cmd_hdr = &pkt->hdr;
3592 	idx = cmd_hdr->idx;
3593 	qid = cmd_hdr->qid;
3594 
3595 	ring = &sc->txq[qid];
3596 	txd = &ring->data[idx];
3597 	in = txd->in;
3598 	m = txd->m;
3599 
3600 	KASSERT(txd->done == 0, ("txd not done"));
3601 	KASSERT(txd->in != NULL, ("txd without node"));
3602 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3603 
3604 	sc->sc_tx_timer = 0;
3605 
3606 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3607 
3608 	/* Unmap and free mbuf. */
3609 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3610 	bus_dmamap_unload(ring->data_dmat, txd->map);
3611 
3612 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3613 	    "free txd %p, in %p\n", txd, txd->in);
3614 	txd->done = 1;
3615 	txd->m = NULL;
3616 	txd->in = NULL;
3617 
3618 	ieee80211_tx_complete(&in->in_ni, m, status);
3619 
3620 	qmsk = 1 << qid;
3621 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3622 		sc->qfullmsk &= ~qmsk;
3623 		if (sc->qfullmsk == 0)
3624 			iwm_start(sc);
3625 	}
3626 }
3627 
3628 /*
3629  * transmit side
3630  */
3631 
3632 /*
3633  * Process a "command done" firmware notification.  This is where we wakeup
3634  * processes waiting for a synchronous command completion.
3635  * from if_iwn
3636  */
3637 static void
3638 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3639 {
3640 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3641 	struct iwm_tx_data *data;
3642 
3643 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3644 		return;	/* Not a command ack. */
3645 	}
3646 
3647 	/* XXX wide commands? */
3648 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3649 	    "cmd notification type 0x%x qid %d idx %d\n",
3650 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3651 
3652 	data = &ring->data[pkt->hdr.idx];
3653 
3654 	/* If the command was mapped in an mbuf, free it. */
3655 	if (data->m != NULL) {
3656 		bus_dmamap_sync(ring->data_dmat, data->map,
3657 		    BUS_DMASYNC_POSTWRITE);
3658 		bus_dmamap_unload(ring->data_dmat, data->map);
3659 		m_freem(data->m);
3660 		data->m = NULL;
3661 	}
3662 	wakeup(&ring->desc[pkt->hdr.idx]);
3663 
3664 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3665 		device_printf(sc->sc_dev,
3666 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3667 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3668 		/* XXX call iwm_force_nmi() */
3669 	}
3670 
3671 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3672 	ring->queued--;
3673 	if (ring->queued == 0)
3674 		iwm_pcie_clear_cmd_in_flight(sc);
3675 }
3676 
3677 #if 0
3678 /*
3679  * necessary only for block ack mode
3680  */
3681 void
3682 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3683 	uint16_t len)
3684 {
3685 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3686 	uint16_t w_val;
3687 
3688 	scd_bc_tbl = sc->sched_dma.vaddr;
3689 
3690 	len += 8; /* magic numbers came naturally from paris */
3691 	len = roundup(len, 4) / 4;
3692 
3693 	w_val = htole16(sta_id << 12 | len);
3694 
3695 	/* Update TX scheduler. */
3696 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3697 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3698 	    BUS_DMASYNC_PREWRITE);
3699 
3700 	/* I really wonder what this is ?!? */
3701 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3702 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3703 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3704 		    BUS_DMASYNC_PREWRITE);
3705 	}
3706 }
3707 #endif
3708 
3709 static int
3710 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3711 {
3712 	int i;
3713 
3714 	for (i = 0; i < nitems(iwm_rates); i++) {
3715 		if (iwm_rates[i].rate == rate)
3716 			return (i);
3717 	}
3718 	/* XXX error? */
3719 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3720 	    "%s: couldn't find an entry for rate=%d\n",
3721 	    __func__,
3722 	    rate);
3723 	return (0);
3724 }
3725 
3726 /*
3727  * Fill in the rate related information for a transmit command.
3728  */
3729 static const struct iwm_rate *
3730 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3731 	struct mbuf *m, struct iwm_tx_cmd *tx)
3732 {
3733 	struct ieee80211_node *ni = &in->in_ni;
3734 	struct ieee80211_frame *wh;
3735 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3736 	const struct iwm_rate *rinfo;
3737 	int type;
3738 	int ridx, rate_flags;
3739 
3740 	wh = mtod(m, struct ieee80211_frame *);
3741 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3742 
3743 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3744 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3745 
3746 	if (type == IEEE80211_FC0_TYPE_MGT ||
3747 	    type == IEEE80211_FC0_TYPE_CTL ||
3748 	    (m->m_flags & M_EAPOL) != 0) {
3749 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3750 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3751 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3752 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3753 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3754 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3755 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3756 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3757 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3758 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3759 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3760 	} else {
3761 		/* for data frames, use RS table */
3762 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3763 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3764 		if (ridx == -1)
3765 			ridx = 0;
3766 
3767 		/* This is the index into the programmed table */
3768 		tx->initial_rate_index = 0;
3769 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3770 	}
3771 
3772 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3773 	    "%s: frame type=%d txrate %d\n",
3774 	        __func__, type, iwm_rates[ridx].rate);
3775 
3776 	rinfo = &iwm_rates[ridx];
3777 
3778 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3779 	    __func__, ridx,
3780 	    rinfo->rate,
3781 	    !! (IWM_RIDX_IS_CCK(ridx))
3782 	    );
3783 
3784 	/* XXX TODO: hard-coded TX antenna? */
3785 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3786 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3787 	else
3788 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3789 	if (IWM_RIDX_IS_CCK(ridx))
3790 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3791 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3792 
3793 	return rinfo;
3794 }
3795 
3796 #define TB0_SIZE 16
3797 static int
3798 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3799 {
3800 	struct ieee80211com *ic = &sc->sc_ic;
3801 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3802 	struct iwm_node *in = IWM_NODE(ni);
3803 	struct iwm_tx_ring *ring;
3804 	struct iwm_tx_data *data;
3805 	struct iwm_tfd *desc;
3806 	struct iwm_device_cmd *cmd;
3807 	struct iwm_tx_cmd *tx;
3808 	struct ieee80211_frame *wh;
3809 	struct ieee80211_key *k = NULL;
3810 	struct mbuf *m1;
3811 	const struct iwm_rate *rinfo;
3812 	uint32_t flags;
3813 	u_int hdrlen;
3814 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3815 	int nsegs;
3816 	uint8_t tid, type;
3817 	int i, totlen, error, pad;
3818 
3819 	wh = mtod(m, struct ieee80211_frame *);
3820 	hdrlen = ieee80211_anyhdrsize(wh);
3821 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3822 	tid = 0;
3823 	ring = &sc->txq[ac];
3824 	desc = &ring->desc[ring->cur];
3825 	data = &ring->data[ring->cur];
3826 
3827 	/* Fill out iwm_tx_cmd to send to the firmware */
3828 	cmd = &ring->cmd[ring->cur];
3829 	cmd->hdr.code = IWM_TX_CMD;
3830 	cmd->hdr.flags = 0;
3831 	cmd->hdr.qid = ring->qid;
3832 	cmd->hdr.idx = ring->cur;
3833 
3834 	tx = (void *)cmd->data;
3835 	memset(tx, 0, sizeof(*tx));
3836 
3837 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3838 
3839 	/* Encrypt the frame if need be. */
3840 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3841 		/* Retrieve key for TX && do software encryption. */
3842 		k = ieee80211_crypto_encap(ni, m);
3843 		if (k == NULL) {
3844 			m_freem(m);
3845 			return (ENOBUFS);
3846 		}
3847 		/* 802.11 header may have moved. */
3848 		wh = mtod(m, struct ieee80211_frame *);
3849 	}
3850 
3851 	if (ieee80211_radiotap_active_vap(vap)) {
3852 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3853 
3854 		tap->wt_flags = 0;
3855 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3856 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3857 		tap->wt_rate = rinfo->rate;
3858 		if (k != NULL)
3859 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3860 		ieee80211_radiotap_tx(vap, m);
3861 	}
3862 
3863 	flags = 0;
3864 	totlen = m->m_pkthdr.len;
3865 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3866 		flags |= IWM_TX_CMD_FLG_ACK;
3867 	}
3868 
3869 	if (type == IEEE80211_FC0_TYPE_DATA &&
3870 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3871 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3872 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3873 	}
3874 
3875 	tx->sta_id = IWM_STATION_ID;
3876 
3877 	if (type == IEEE80211_FC0_TYPE_MGT) {
3878 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3879 
3880 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3881 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3882 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3883 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3884 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3885 		} else {
3886 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3887 		}
3888 	} else {
3889 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3890 	}
3891 
3892 	if (hdrlen & 3) {
3893 		/* First segment length must be a multiple of 4. */
3894 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3895 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
3896 		pad = 4 - (hdrlen & 3);
3897 	} else {
3898 		tx->offload_assist = 0;
3899 		pad = 0;
3900 	}
3901 
3902 	tx->len = htole16(totlen);
3903 	tx->tid_tspec = tid;
3904 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3905 
3906 	/* Set physical address of "scratch area". */
3907 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3908 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3909 
3910 	/* Copy 802.11 header in TX command. */
3911 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3912 
3913 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3914 
3915 	tx->sec_ctl = 0;
3916 	tx->tx_flags |= htole32(flags);
3917 
3918 	/* Trim 802.11 header. */
3919 	m_adj(m, hdrlen);
3920 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3921 	    segs, &nsegs, BUS_DMA_NOWAIT);
3922 	if (error != 0) {
3923 		if (error != EFBIG) {
3924 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3925 			    error);
3926 			m_freem(m);
3927 			return error;
3928 		}
3929 		/* Too many DMA segments, linearize mbuf. */
3930 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3931 		if (m1 == NULL) {
3932 			device_printf(sc->sc_dev,
3933 			    "%s: could not defrag mbuf\n", __func__);
3934 			m_freem(m);
3935 			return (ENOBUFS);
3936 		}
3937 		m = m1;
3938 
3939 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3940 		    segs, &nsegs, BUS_DMA_NOWAIT);
3941 		if (error != 0) {
3942 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3943 			    error);
3944 			m_freem(m);
3945 			return error;
3946 		}
3947 	}
3948 	data->m = m;
3949 	data->in = in;
3950 	data->done = 0;
3951 
3952 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3953 	    "sending txd %p, in %p\n", data, data->in);
3954 	KASSERT(data->in != NULL, ("node is NULL"));
3955 
3956 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3957 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3958 	    ring->qid, ring->cur, totlen, nsegs,
3959 	    le32toh(tx->tx_flags),
3960 	    le32toh(tx->rate_n_flags),
3961 	    tx->initial_rate_index
3962 	    );
3963 
3964 	/* Fill TX descriptor. */
3965 	memset(desc, 0, sizeof(*desc));
3966 	desc->num_tbs = 2 + nsegs;
3967 
3968 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3969 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3970 	    (TB0_SIZE << 4));
3971 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3972 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3973 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3974 	    hdrlen + pad - TB0_SIZE) << 4));
3975 
3976 	/* Other DMA segments are for data payload. */
3977 	for (i = 0; i < nsegs; i++) {
3978 		seg = &segs[i];
3979 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3980 		desc->tbs[i + 2].hi_n_len =
3981 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3982 		    (seg->ds_len << 4);
3983 	}
3984 
3985 	bus_dmamap_sync(ring->data_dmat, data->map,
3986 	    BUS_DMASYNC_PREWRITE);
3987 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3988 	    BUS_DMASYNC_PREWRITE);
3989 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3990 	    BUS_DMASYNC_PREWRITE);
3991 
3992 #if 0
3993 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3994 #endif
3995 
3996 	/* Kick TX ring. */
3997 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3998 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3999 
4000 	/* Mark TX ring as full if we reach a certain threshold. */
4001 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4002 		sc->qfullmsk |= 1 << ring->qid;
4003 	}
4004 
4005 	return 0;
4006 }
4007 
4008 static int
4009 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4010     const struct ieee80211_bpf_params *params)
4011 {
4012 	struct ieee80211com *ic = ni->ni_ic;
4013 	struct iwm_softc *sc = ic->ic_softc;
4014 	int error = 0;
4015 
4016 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4017 	    "->%s begin\n", __func__);
4018 
4019 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4020 		m_freem(m);
4021 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4022 		    "<-%s not RUNNING\n", __func__);
4023 		return (ENETDOWN);
4024         }
4025 
4026 	IWM_LOCK(sc);
4027 	/* XXX fix this */
4028         if (params == NULL) {
4029 		error = iwm_tx(sc, m, ni, 0);
4030 	} else {
4031 		error = iwm_tx(sc, m, ni, 0);
4032 	}
4033 	if (sc->sc_tx_timer == 0)
4034 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4035 	sc->sc_tx_timer = 5;
4036 	IWM_UNLOCK(sc);
4037 
4038         return (error);
4039 }
4040 
4041 /*
4042  * mvm/tx.c
4043  */
4044 
4045 /*
4046  * Note that there are transports that buffer frames before they reach
4047  * the firmware. This means that after flush_tx_path is called, the
4048  * queue might not be empty. The race-free way to handle this is to:
4049  * 1) set the station as draining
4050  * 2) flush the Tx path
4051  * 3) wait for the transport queues to be empty
4052  */
4053 int
4054 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4055 {
4056 	int ret;
4057 	struct iwm_tx_path_flush_cmd flush_cmd = {
4058 		.queues_ctl = htole32(tfd_msk),
4059 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4060 	};
4061 
4062 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4063 	    sizeof(flush_cmd), &flush_cmd);
4064 	if (ret)
4065                 device_printf(sc->sc_dev,
4066 		    "Flushing tx queue failed: %d\n", ret);
4067 	return ret;
4068 }
4069 
4070 /*
4071  * BEGIN mvm/quota.c
4072  */
4073 
4074 static int
4075 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4076 {
4077 	struct iwm_time_quota_cmd cmd;
4078 	int i, idx, ret, num_active_macs, quota, quota_rem;
4079 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4080 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4081 	uint16_t id;
4082 
4083 	memset(&cmd, 0, sizeof(cmd));
4084 
4085 	/* currently, PHY ID == binding ID */
4086 	if (ivp) {
4087 		id = ivp->phy_ctxt->id;
4088 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4089 		colors[id] = ivp->phy_ctxt->color;
4090 
4091 		if (1)
4092 			n_ifs[id] = 1;
4093 	}
4094 
4095 	/*
4096 	 * The FW's scheduling session consists of
4097 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4098 	 * equally between all the bindings that require quota
4099 	 */
4100 	num_active_macs = 0;
4101 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4102 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4103 		num_active_macs += n_ifs[i];
4104 	}
4105 
4106 	quota = 0;
4107 	quota_rem = 0;
4108 	if (num_active_macs) {
4109 		quota = IWM_MAX_QUOTA / num_active_macs;
4110 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4111 	}
4112 
4113 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4114 		if (colors[i] < 0)
4115 			continue;
4116 
4117 		cmd.quotas[idx].id_and_color =
4118 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4119 
4120 		if (n_ifs[i] <= 0) {
4121 			cmd.quotas[idx].quota = htole32(0);
4122 			cmd.quotas[idx].max_duration = htole32(0);
4123 		} else {
4124 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4125 			cmd.quotas[idx].max_duration = htole32(0);
4126 		}
4127 		idx++;
4128 	}
4129 
4130 	/* Give the remainder of the session to the first binding */
4131 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4132 
4133 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4134 	    sizeof(cmd), &cmd);
4135 	if (ret)
4136 		device_printf(sc->sc_dev,
4137 		    "%s: Failed to send quota: %d\n", __func__, ret);
4138 	return ret;
4139 }
4140 
4141 /*
4142  * END mvm/quota.c
4143  */
4144 
4145 /*
4146  * ieee80211 routines
4147  */
4148 
4149 /*
4150  * Change to AUTH state in 80211 state machine.  Roughly matches what
4151  * Linux does in bss_info_changed().
4152  */
4153 static int
4154 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4155 {
4156 	struct ieee80211_node *ni;
4157 	struct iwm_node *in;
4158 	struct iwm_vap *iv = IWM_VAP(vap);
4159 	uint32_t duration;
4160 	int error;
4161 
4162 	/*
4163 	 * XXX i have a feeling that the vap node is being
4164 	 * freed from underneath us. Grr.
4165 	 */
4166 	ni = ieee80211_ref_node(vap->iv_bss);
4167 	in = IWM_NODE(ni);
4168 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4169 	    "%s: called; vap=%p, bss ni=%p\n",
4170 	    __func__,
4171 	    vap,
4172 	    ni);
4173 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4174 	    __func__, ether_sprintf(ni->ni_bssid));
4175 
4176 	in->in_assoc = 0;
4177 	iv->iv_auth = 1;
4178 
4179 	/*
4180 	 * Firmware bug - it'll crash if the beacon interval is less
4181 	 * than 16. We can't avoid connecting at all, so refuse the
4182 	 * station state change, this will cause net80211 to abandon
4183 	 * attempts to connect to this AP, and eventually wpa_s will
4184 	 * blacklist the AP...
4185 	 */
4186 	if (ni->ni_intval < 16) {
4187 		device_printf(sc->sc_dev,
4188 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4189 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4190 		error = EINVAL;
4191 		goto out;
4192 	}
4193 
4194 	error = iwm_allow_mcast(vap, sc);
4195 	if (error) {
4196 		device_printf(sc->sc_dev,
4197 		    "%s: failed to set multicast\n", __func__);
4198 		goto out;
4199 	}
4200 
4201 	/*
4202 	 * This is where it deviates from what Linux does.
4203 	 *
4204 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4205 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4206 	 * and always does a mac_ctx_changed().
4207 	 *
4208 	 * The openbsd port doesn't attempt to do that - it reset things
4209 	 * at odd states and does the add here.
4210 	 *
4211 	 * So, until the state handling is fixed (ie, we never reset
4212 	 * the NIC except for a firmware failure, which should drag
4213 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4214 	 * contexts that are required), let's do a dirty hack here.
4215 	 */
4216 	if (iv->is_uploaded) {
4217 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4218 			device_printf(sc->sc_dev,
4219 			    "%s: failed to update MAC\n", __func__);
4220 			goto out;
4221 		}
4222 	} else {
4223 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4224 			device_printf(sc->sc_dev,
4225 			    "%s: failed to add MAC\n", __func__);
4226 			goto out;
4227 		}
4228 	}
4229 	sc->sc_firmware_state = 1;
4230 
4231 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4232 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4233 		device_printf(sc->sc_dev,
4234 		    "%s: failed update phy ctxt\n", __func__);
4235 		goto out;
4236 	}
4237 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4238 
4239 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4240 		device_printf(sc->sc_dev,
4241 		    "%s: binding update cmd\n", __func__);
4242 		goto out;
4243 	}
4244 	sc->sc_firmware_state = 2;
4245 	/*
4246 	 * Authentication becomes unreliable when powersaving is left enabled
4247 	 * here. Powersaving will be activated again when association has
4248 	 * finished or is aborted.
4249 	 */
4250 	iv->ps_disabled = TRUE;
4251 	error = iwm_power_update_mac(sc);
4252 	iv->ps_disabled = FALSE;
4253 	if (error != 0) {
4254 		device_printf(sc->sc_dev,
4255 		    "%s: failed to update power management\n",
4256 		    __func__);
4257 		goto out;
4258 	}
4259 	if ((error = iwm_add_sta(sc, in)) != 0) {
4260 		device_printf(sc->sc_dev,
4261 		    "%s: failed to add sta\n", __func__);
4262 		goto out;
4263 	}
4264 	sc->sc_firmware_state = 3;
4265 
4266 	/*
4267 	 * Prevent the FW from wandering off channel during association
4268 	 * by "protecting" the session with a time event.
4269 	 */
4270 	/* XXX duration is in units of TU, not MS */
4271 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4272 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4273 
4274 	error = 0;
4275 out:
4276 	if (error != 0)
4277 		iv->iv_auth = 0;
4278 	ieee80211_free_node(ni);
4279 	return (error);
4280 }
4281 
4282 static struct ieee80211_node *
4283 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4284 {
4285 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4286 	    M_NOWAIT | M_ZERO);
4287 }
4288 
4289 static uint8_t
4290 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4291 {
4292 	uint8_t plcp = rate_n_flags & 0xff;
4293 	int i;
4294 
4295 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4296 		if (iwm_rates[i].plcp == plcp)
4297 			return iwm_rates[i].rate;
4298 	}
4299 	return 0;
4300 }
4301 
4302 uint8_t
4303 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4304 {
4305 	int i;
4306 	uint8_t rval;
4307 
4308 	for (i = 0; i < rs->rs_nrates; i++) {
4309 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4310 		if (rval == iwm_rates[ridx].rate)
4311 			return rs->rs_rates[i];
4312 	}
4313 
4314 	return 0;
4315 }
4316 
4317 static int
4318 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4319 {
4320 	int i;
4321 
4322 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4323 		if (iwm_rates[i].rate == rate)
4324 			return i;
4325 	}
4326 
4327 	device_printf(sc->sc_dev,
4328 	    "%s: WARNING: device rate for %u not found!\n",
4329 	    __func__, rate);
4330 
4331 	return -1;
4332 }
4333 
4334 
4335 static void
4336 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4337 {
4338 	struct ieee80211_node *ni = &in->in_ni;
4339 	struct iwm_lq_cmd *lq = &in->in_lq;
4340 	struct ieee80211_rateset *rs = &ni->ni_rates;
4341 	int nrates = rs->rs_nrates;
4342 	int i, ridx, tab = 0;
4343 //	int txant = 0;
4344 
4345 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4346 
4347 	if (nrates > nitems(lq->rs_table)) {
4348 		device_printf(sc->sc_dev,
4349 		    "%s: node supports %d rates, driver handles "
4350 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4351 		return;
4352 	}
4353 	if (nrates == 0) {
4354 		device_printf(sc->sc_dev,
4355 		    "%s: node supports 0 rates, odd!\n", __func__);
4356 		return;
4357 	}
4358 	nrates = imin(rix + 1, nrates);
4359 
4360 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4361 	    "%s: nrates=%d\n", __func__, nrates);
4362 
4363 	/* then construct a lq_cmd based on those */
4364 	memset(lq, 0, sizeof(*lq));
4365 	lq->sta_id = IWM_STATION_ID;
4366 
4367 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4368 	if (ni->ni_flags & IEEE80211_NODE_HT)
4369 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4370 
4371 	/*
4372 	 * are these used? (we don't do SISO or MIMO)
4373 	 * need to set them to non-zero, though, or we get an error.
4374 	 */
4375 	lq->single_stream_ant_msk = 1;
4376 	lq->dual_stream_ant_msk = 1;
4377 
4378 	/*
4379 	 * Build the actual rate selection table.
4380 	 * The lowest bits are the rates.  Additionally,
4381 	 * CCK needs bit 9 to be set.  The rest of the bits
4382 	 * we add to the table select the tx antenna
4383 	 * Note that we add the rates in the highest rate first
4384 	 * (opposite of ni_rates).
4385 	 */
4386 	for (i = 0; i < nrates; i++) {
4387 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4388 		int nextant;
4389 
4390 		/* Map 802.11 rate to HW rate index. */
4391 		ridx = iwm_rate2ridx(sc, rate);
4392 		if (ridx == -1)
4393 			continue;
4394 
4395 #if 0
4396 		if (txant == 0)
4397 			txant = iwm_get_valid_tx_ant(sc);
4398 		nextant = 1<<(ffs(txant)-1);
4399 		txant &= ~nextant;
4400 #else
4401 		nextant = iwm_get_valid_tx_ant(sc);
4402 #endif
4403 		tab = iwm_rates[ridx].plcp;
4404 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4405 		if (IWM_RIDX_IS_CCK(ridx))
4406 			tab |= IWM_RATE_MCS_CCK_MSK;
4407 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4408 		    "station rate i=%d, rate=%d, hw=%x\n",
4409 		    i, iwm_rates[ridx].rate, tab);
4410 		lq->rs_table[i] = htole32(tab);
4411 	}
4412 	/* then fill the rest with the lowest possible rate */
4413 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4414 		KASSERT(tab != 0, ("invalid tab"));
4415 		lq->rs_table[i] = htole32(tab);
4416 	}
4417 }
4418 
4419 static void
4420 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4421 {
4422 	struct iwm_vap *ivp = IWM_VAP(vap);
4423 	int error;
4424 
4425 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4426 	sc->sc_tx_timer = 0;
4427 
4428 	ivp->iv_auth = 0;
4429 	if (sc->sc_firmware_state == 3) {
4430 		iwm_xmit_queue_drain(sc);
4431 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4432 		error = iwm_rm_sta(sc, vap, TRUE);
4433 		if (error) {
4434 			device_printf(sc->sc_dev,
4435 			    "%s: Failed to remove station: %d\n",
4436 			    __func__, error);
4437 		}
4438 	}
4439 	if (sc->sc_firmware_state == 3) {
4440 		error = iwm_mac_ctxt_changed(sc, vap);
4441 		if (error) {
4442 			device_printf(sc->sc_dev,
4443 			    "%s: Failed to change mac context: %d\n",
4444 			    __func__, error);
4445 		}
4446 	}
4447 	if (sc->sc_firmware_state == 3) {
4448 		error = iwm_sf_update(sc, vap, FALSE);
4449 		if (error) {
4450 			device_printf(sc->sc_dev,
4451 			    "%s: Failed to update smart FIFO: %d\n",
4452 			    __func__, error);
4453 		}
4454 	}
4455 	if (sc->sc_firmware_state == 3) {
4456 		error = iwm_rm_sta_id(sc, vap);
4457 		if (error) {
4458 			device_printf(sc->sc_dev,
4459 			    "%s: Failed to remove station id: %d\n",
4460 			    __func__, error);
4461 		}
4462 	}
4463 	if (sc->sc_firmware_state == 3) {
4464 		error = iwm_update_quotas(sc, NULL);
4465 		if (error) {
4466 			device_printf(sc->sc_dev,
4467 			    "%s: Failed to update PHY quota: %d\n",
4468 			    __func__, error);
4469 		}
4470 	}
4471 	if (sc->sc_firmware_state == 3) {
4472 		/* XXX Might need to specify bssid correctly. */
4473 		error = iwm_mac_ctxt_changed(sc, vap);
4474 		if (error) {
4475 			device_printf(sc->sc_dev,
4476 			    "%s: Failed to change mac context: %d\n",
4477 			    __func__, error);
4478 		}
4479 	}
4480 	if (sc->sc_firmware_state == 3) {
4481 		sc->sc_firmware_state = 2;
4482 	}
4483 	if (sc->sc_firmware_state > 1) {
4484 		error = iwm_binding_remove_vif(sc, ivp);
4485 		if (error) {
4486 			device_printf(sc->sc_dev,
4487 			    "%s: Failed to remove channel ctx: %d\n",
4488 			    __func__, error);
4489 		}
4490 	}
4491 	if (sc->sc_firmware_state > 1) {
4492 		sc->sc_firmware_state = 1;
4493 	}
4494 	ivp->phy_ctxt = NULL;
4495 	if (sc->sc_firmware_state > 0) {
4496 		error = iwm_mac_ctxt_changed(sc, vap);
4497 		if (error) {
4498 			device_printf(sc->sc_dev,
4499 			    "%s: Failed to change mac context: %d\n",
4500 			    __func__, error);
4501 		}
4502 	}
4503 	if (sc->sc_firmware_state > 0) {
4504 		error = iwm_power_update_mac(sc);
4505 		if (error != 0) {
4506 			device_printf(sc->sc_dev,
4507 			    "%s: failed to update power management\n",
4508 			    __func__);
4509 		}
4510 	}
4511 	sc->sc_firmware_state = 0;
4512 }
4513 
4514 static int
4515 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4516 {
4517 	struct iwm_vap *ivp = IWM_VAP(vap);
4518 	struct ieee80211com *ic = vap->iv_ic;
4519 	struct iwm_softc *sc = ic->ic_softc;
4520 	struct iwm_node *in;
4521 	int error;
4522 
4523 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4524 	    "switching state %s -> %s arg=0x%x\n",
4525 	    ieee80211_state_name[vap->iv_state],
4526 	    ieee80211_state_name[nstate],
4527 	    arg);
4528 
4529 	IEEE80211_UNLOCK(ic);
4530 	IWM_LOCK(sc);
4531 
4532 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4533 	    (nstate == IEEE80211_S_AUTH ||
4534 	     nstate == IEEE80211_S_ASSOC ||
4535 	     nstate == IEEE80211_S_RUN)) {
4536 		/* Stop blinking for a scan, when authenticating. */
4537 		iwm_led_blink_stop(sc);
4538 	}
4539 
4540 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4541 		iwm_led_disable(sc);
4542 		/* disable beacon filtering if we're hopping out of RUN */
4543 		iwm_disable_beacon_filter(sc);
4544 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4545 			in->in_assoc = 0;
4546 	}
4547 
4548 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4549 	     vap->iv_state == IEEE80211_S_ASSOC ||
4550 	     vap->iv_state == IEEE80211_S_RUN) &&
4551 	    (nstate == IEEE80211_S_INIT ||
4552 	     nstate == IEEE80211_S_SCAN ||
4553 	     nstate == IEEE80211_S_AUTH)) {
4554 		iwm_stop_session_protection(sc, ivp);
4555 	}
4556 
4557 	if ((vap->iv_state == IEEE80211_S_RUN ||
4558 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4559 	    nstate == IEEE80211_S_INIT) {
4560 		/*
4561 		 * In this case, iv_newstate() wants to send an 80211 frame on
4562 		 * the network that we are leaving. So we need to call it,
4563 		 * before tearing down all the firmware state.
4564 		 */
4565 		IWM_UNLOCK(sc);
4566 		IEEE80211_LOCK(ic);
4567 		ivp->iv_newstate(vap, nstate, arg);
4568 		IEEE80211_UNLOCK(ic);
4569 		IWM_LOCK(sc);
4570 		iwm_bring_down_firmware(sc, vap);
4571 		IWM_UNLOCK(sc);
4572 		IEEE80211_LOCK(ic);
4573 		return 0;
4574 	}
4575 
4576 	switch (nstate) {
4577 	case IEEE80211_S_INIT:
4578 	case IEEE80211_S_SCAN:
4579 		break;
4580 
4581 	case IEEE80211_S_AUTH:
4582 		iwm_bring_down_firmware(sc, vap);
4583 		if ((error = iwm_auth(vap, sc)) != 0) {
4584 			device_printf(sc->sc_dev,
4585 			    "%s: could not move to auth state: %d\n",
4586 			    __func__, error);
4587 			iwm_bring_down_firmware(sc, vap);
4588 			IWM_UNLOCK(sc);
4589 			IEEE80211_LOCK(ic);
4590 			return 1;
4591 		}
4592 		break;
4593 
4594 	case IEEE80211_S_ASSOC:
4595 		/*
4596 		 * EBS may be disabled due to previous failures reported by FW.
4597 		 * Reset EBS status here assuming environment has been changed.
4598 		 */
4599 		sc->last_ebs_successful = TRUE;
4600 		break;
4601 
4602 	case IEEE80211_S_RUN:
4603 		in = IWM_NODE(vap->iv_bss);
4604 		/* Update the association state, now we have it all */
4605 		/* (eg associd comes in at this point */
4606 		error = iwm_update_sta(sc, in);
4607 		if (error != 0) {
4608 			device_printf(sc->sc_dev,
4609 			    "%s: failed to update STA\n", __func__);
4610 			IWM_UNLOCK(sc);
4611 			IEEE80211_LOCK(ic);
4612 			return error;
4613 		}
4614 		in->in_assoc = 1;
4615 		error = iwm_mac_ctxt_changed(sc, vap);
4616 		if (error != 0) {
4617 			device_printf(sc->sc_dev,
4618 			    "%s: failed to update MAC: %d\n", __func__, error);
4619 		}
4620 
4621 		iwm_sf_update(sc, vap, FALSE);
4622 		iwm_enable_beacon_filter(sc, ivp);
4623 		iwm_power_update_mac(sc);
4624 		iwm_update_quotas(sc, ivp);
4625 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4626 		iwm_setrates(sc, in, rix);
4627 
4628 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4629 			device_printf(sc->sc_dev,
4630 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4631 		}
4632 
4633 		iwm_led_enable(sc);
4634 		break;
4635 
4636 	default:
4637 		break;
4638 	}
4639 	IWM_UNLOCK(sc);
4640 	IEEE80211_LOCK(ic);
4641 
4642 	return (ivp->iv_newstate(vap, nstate, arg));
4643 }
4644 
4645 void
4646 iwm_endscan_cb(void *arg, int pending)
4647 {
4648 	struct iwm_softc *sc = arg;
4649 	struct ieee80211com *ic = &sc->sc_ic;
4650 
4651 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4652 	    "%s: scan ended\n",
4653 	    __func__);
4654 
4655 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4656 }
4657 
4658 static int
4659 iwm_send_bt_init_conf(struct iwm_softc *sc)
4660 {
4661 	struct iwm_bt_coex_cmd bt_cmd;
4662 
4663 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4664 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4665 
4666 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4667 	    &bt_cmd);
4668 }
4669 
4670 static boolean_t
4671 iwm_is_lar_supported(struct iwm_softc *sc)
4672 {
4673 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4674 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4675 
4676 	if (iwm_lar_disable)
4677 		return FALSE;
4678 
4679 	/*
4680 	 * Enable LAR only if it is supported by the FW (TLV) &&
4681 	 * enabled in the NVM
4682 	 */
4683 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4684 		return nvm_lar && tlv_lar;
4685 	else
4686 		return tlv_lar;
4687 }
4688 
4689 static boolean_t
4690 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4691 {
4692 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4693 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4694 }
4695 
4696 static int
4697 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4698 {
4699 	struct iwm_mcc_update_cmd mcc_cmd;
4700 	struct iwm_host_cmd hcmd = {
4701 		.id = IWM_MCC_UPDATE_CMD,
4702 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4703 		.data = { &mcc_cmd },
4704 	};
4705 	int ret;
4706 #ifdef IWM_DEBUG
4707 	struct iwm_rx_packet *pkt;
4708 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4709 	struct iwm_mcc_update_resp *mcc_resp;
4710 	int n_channels;
4711 	uint16_t mcc;
4712 #endif
4713 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4714 
4715 	if (!iwm_is_lar_supported(sc)) {
4716 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4717 		    __func__);
4718 		return 0;
4719 	}
4720 
4721 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4722 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4723 	if (iwm_is_wifi_mcc_supported(sc))
4724 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4725 	else
4726 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4727 
4728 	if (resp_v2)
4729 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4730 	else
4731 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4732 
4733 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4734 	    "send MCC update to FW with '%c%c' src = %d\n",
4735 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4736 
4737 	ret = iwm_send_cmd(sc, &hcmd);
4738 	if (ret)
4739 		return ret;
4740 
4741 #ifdef IWM_DEBUG
4742 	pkt = hcmd.resp_pkt;
4743 
4744 	/* Extract MCC response */
4745 	if (resp_v2) {
4746 		mcc_resp = (void *)pkt->data;
4747 		mcc = mcc_resp->mcc;
4748 		n_channels =  le32toh(mcc_resp->n_channels);
4749 	} else {
4750 		mcc_resp_v1 = (void *)pkt->data;
4751 		mcc = mcc_resp_v1->mcc;
4752 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4753 	}
4754 
4755 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4756 	if (mcc == 0)
4757 		mcc = 0x3030;  /* "00" - world */
4758 
4759 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4760 	    "regulatory domain '%c%c' (%d channels available)\n",
4761 	    mcc >> 8, mcc & 0xff, n_channels);
4762 #endif
4763 	iwm_free_resp(sc, &hcmd);
4764 
4765 	return 0;
4766 }
4767 
4768 static void
4769 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4770 {
4771 	struct iwm_host_cmd cmd = {
4772 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4773 		.len = { sizeof(uint32_t), },
4774 		.data = { &backoff, },
4775 	};
4776 
4777 	if (iwm_send_cmd(sc, &cmd) != 0) {
4778 		device_printf(sc->sc_dev,
4779 		    "failed to change thermal tx backoff\n");
4780 	}
4781 }
4782 
4783 static int
4784 iwm_init_hw(struct iwm_softc *sc)
4785 {
4786 	struct ieee80211com *ic = &sc->sc_ic;
4787 	int error, i, ac;
4788 
4789 	sc->sf_state = IWM_SF_UNINIT;
4790 
4791 	if ((error = iwm_start_hw(sc)) != 0) {
4792 		printf("iwm_start_hw: failed %d\n", error);
4793 		return error;
4794 	}
4795 
4796 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4797 		printf("iwm_run_init_ucode: failed %d\n", error);
4798 		return error;
4799 	}
4800 
4801 	/*
4802 	 * should stop and start HW since that INIT
4803 	 * image just loaded
4804 	 */
4805 	iwm_stop_device(sc);
4806 	sc->sc_ps_disabled = FALSE;
4807 	if ((error = iwm_start_hw(sc)) != 0) {
4808 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4809 		return error;
4810 	}
4811 
4812 	/* omstart, this time with the regular firmware */
4813 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4814 	if (error) {
4815 		device_printf(sc->sc_dev, "could not load firmware\n");
4816 		goto error;
4817 	}
4818 
4819 	error = iwm_sf_update(sc, NULL, FALSE);
4820 	if (error)
4821 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4822 
4823 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4824 		device_printf(sc->sc_dev, "bt init conf failed\n");
4825 		goto error;
4826 	}
4827 
4828 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4829 	if (error != 0) {
4830 		device_printf(sc->sc_dev, "antenna config failed\n");
4831 		goto error;
4832 	}
4833 
4834 	/* Send phy db control command and then phy db calibration */
4835 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4836 		goto error;
4837 
4838 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4839 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4840 		goto error;
4841 	}
4842 
4843 	/* Add auxiliary station for scanning */
4844 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4845 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4846 		goto error;
4847 	}
4848 
4849 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4850 		/*
4851 		 * The channel used here isn't relevant as it's
4852 		 * going to be overwritten in the other flows.
4853 		 * For now use the first channel we have.
4854 		 */
4855 		if ((error = iwm_phy_ctxt_add(sc,
4856 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4857 			goto error;
4858 	}
4859 
4860 	/* Initialize tx backoffs to the minimum. */
4861 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4862 		iwm_tt_tx_backoff(sc, 0);
4863 
4864 	if (iwm_config_ltr(sc) != 0)
4865 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4866 
4867 	error = iwm_power_update_device(sc);
4868 	if (error)
4869 		goto error;
4870 
4871 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4872 		goto error;
4873 
4874 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4875 		if ((error = iwm_config_umac_scan(sc)) != 0)
4876 			goto error;
4877 	}
4878 
4879 	/* Enable Tx queues. */
4880 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4881 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4882 		    iwm_ac_to_tx_fifo[ac]);
4883 		if (error)
4884 			goto error;
4885 	}
4886 
4887 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4888 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4889 		goto error;
4890 	}
4891 
4892 	return 0;
4893 
4894  error:
4895 	iwm_stop_device(sc);
4896 	return error;
4897 }
4898 
4899 /* Allow multicast from our BSSID. */
4900 static int
4901 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4902 {
4903 	struct ieee80211_node *ni = vap->iv_bss;
4904 	struct iwm_mcast_filter_cmd *cmd;
4905 	size_t size;
4906 	int error;
4907 
4908 	size = roundup(sizeof(*cmd), 4);
4909 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4910 	if (cmd == NULL)
4911 		return ENOMEM;
4912 	cmd->filter_own = 1;
4913 	cmd->port_id = 0;
4914 	cmd->count = 0;
4915 	cmd->pass_all = 1;
4916 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4917 
4918 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4919 	    IWM_CMD_SYNC, size, cmd);
4920 	free(cmd, M_DEVBUF);
4921 
4922 	return (error);
4923 }
4924 
4925 /*
4926  * ifnet interfaces
4927  */
4928 
4929 static void
4930 iwm_init(struct iwm_softc *sc)
4931 {
4932 	int error;
4933 
4934 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4935 		return;
4936 	}
4937 	sc->sc_generation++;
4938 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4939 
4940 	if ((error = iwm_init_hw(sc)) != 0) {
4941 		printf("iwm_init_hw failed %d\n", error);
4942 		iwm_stop(sc);
4943 		return;
4944 	}
4945 
4946 	/*
4947 	 * Ok, firmware loaded and we are jogging
4948 	 */
4949 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4950 }
4951 
4952 static int
4953 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4954 {
4955 	struct iwm_softc *sc;
4956 	int error;
4957 
4958 	sc = ic->ic_softc;
4959 
4960 	IWM_LOCK(sc);
4961 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4962 		IWM_UNLOCK(sc);
4963 		return (ENXIO);
4964 	}
4965 	error = mbufq_enqueue(&sc->sc_snd, m);
4966 	if (error) {
4967 		IWM_UNLOCK(sc);
4968 		return (error);
4969 	}
4970 	iwm_start(sc);
4971 	IWM_UNLOCK(sc);
4972 	return (0);
4973 }
4974 
4975 /*
4976  * Dequeue packets from sendq and call send.
4977  */
4978 static void
4979 iwm_start(struct iwm_softc *sc)
4980 {
4981 	struct ieee80211_node *ni;
4982 	struct mbuf *m;
4983 	int ac = 0;
4984 
4985 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4986 	while (sc->qfullmsk == 0 &&
4987 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4988 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4989 		if (iwm_tx(sc, m, ni, ac) != 0) {
4990 			if_inc_counter(ni->ni_vap->iv_ifp,
4991 			    IFCOUNTER_OERRORS, 1);
4992 			ieee80211_free_node(ni);
4993 			continue;
4994 		}
4995 		if (sc->sc_tx_timer == 0) {
4996 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4997 			    sc);
4998 		}
4999 		sc->sc_tx_timer = 15;
5000 	}
5001 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5002 }
5003 
5004 static void
5005 iwm_stop(struct iwm_softc *sc)
5006 {
5007 
5008 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5009 	sc->sc_flags |= IWM_FLAG_STOPPED;
5010 	sc->sc_generation++;
5011 	iwm_led_blink_stop(sc);
5012 	sc->sc_tx_timer = 0;
5013 	iwm_stop_device(sc);
5014 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5015 }
5016 
5017 static void
5018 iwm_watchdog(void *arg)
5019 {
5020 	struct iwm_softc *sc = arg;
5021 	struct ieee80211com *ic = &sc->sc_ic;
5022 
5023 	if (sc->sc_attached == 0)
5024 		return;
5025 
5026 	if (sc->sc_tx_timer > 0) {
5027 		if (--sc->sc_tx_timer == 0) {
5028 			device_printf(sc->sc_dev, "device timeout\n");
5029 #ifdef IWM_DEBUG
5030 			iwm_nic_error(sc);
5031 #endif
5032 			ieee80211_restart_all(ic);
5033 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5034 			return;
5035 		}
5036 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5037 	}
5038 }
5039 
5040 static void
5041 iwm_parent(struct ieee80211com *ic)
5042 {
5043 	struct iwm_softc *sc = ic->ic_softc;
5044 	int startall = 0;
5045 	int rfkill = 0;
5046 
5047 	IWM_LOCK(sc);
5048 	if (ic->ic_nrunning > 0) {
5049 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5050 			iwm_init(sc);
5051 			rfkill = iwm_check_rfkill(sc);
5052 			if (!rfkill)
5053 				startall = 1;
5054 		}
5055 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5056 		iwm_stop(sc);
5057 	IWM_UNLOCK(sc);
5058 	if (startall)
5059 		ieee80211_start_all(ic);
5060 	else if (rfkill)
5061 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5062 }
5063 
5064 static void
5065 iwm_rftoggle_task(void *arg, int npending __unused)
5066 {
5067 	struct iwm_softc *sc = arg;
5068 	struct ieee80211com *ic = &sc->sc_ic;
5069 	int rfkill;
5070 
5071 	IWM_LOCK(sc);
5072 	rfkill = iwm_check_rfkill(sc);
5073 	IWM_UNLOCK(sc);
5074 	if (rfkill) {
5075 		device_printf(sc->sc_dev,
5076 		    "%s: rfkill switch, disabling interface\n", __func__);
5077 		ieee80211_suspend_all(ic);
5078 		ieee80211_notify_radio(ic, 0);
5079 	} else {
5080 		device_printf(sc->sc_dev,
5081 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
5082 		ieee80211_resume_all(ic);
5083 		ieee80211_notify_radio(ic, 1);
5084 	}
5085 }
5086 
5087 /*
5088  * The interrupt side of things
5089  */
5090 
5091 /*
5092  * error dumping routines are from iwlwifi/mvm/utils.c
5093  */
5094 
5095 /*
5096  * Note: This structure is read from the device with IO accesses,
5097  * and the reading already does the endian conversion. As it is
5098  * read with uint32_t-sized accesses, any members with a different size
5099  * need to be ordered correctly though!
5100  */
5101 struct iwm_error_event_table {
5102 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5103 	uint32_t error_id;		/* type of error */
5104 	uint32_t trm_hw_status0;	/* TRM HW status */
5105 	uint32_t trm_hw_status1;	/* TRM HW status */
5106 	uint32_t blink2;		/* branch link */
5107 	uint32_t ilink1;		/* interrupt link */
5108 	uint32_t ilink2;		/* interrupt link */
5109 	uint32_t data1;		/* error-specific data */
5110 	uint32_t data2;		/* error-specific data */
5111 	uint32_t data3;		/* error-specific data */
5112 	uint32_t bcon_time;		/* beacon timer */
5113 	uint32_t tsf_low;		/* network timestamp function timer */
5114 	uint32_t tsf_hi;		/* network timestamp function timer */
5115 	uint32_t gp1;		/* GP1 timer register */
5116 	uint32_t gp2;		/* GP2 timer register */
5117 	uint32_t fw_rev_type;	/* firmware revision type */
5118 	uint32_t major;		/* uCode version major */
5119 	uint32_t minor;		/* uCode version minor */
5120 	uint32_t hw_ver;		/* HW Silicon version */
5121 	uint32_t brd_ver;		/* HW board version */
5122 	uint32_t log_pc;		/* log program counter */
5123 	uint32_t frame_ptr;		/* frame pointer */
5124 	uint32_t stack_ptr;		/* stack pointer */
5125 	uint32_t hcmd;		/* last host command header */
5126 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5127 				 * rxtx_flag */
5128 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5129 				 * host_flag */
5130 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5131 				 * enc_flag */
5132 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5133 				 * time_flag */
5134 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5135 				 * wico interrupt */
5136 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5137 	uint32_t wait_event;		/* wait event() caller address */
5138 	uint32_t l2p_control;	/* L2pControlField */
5139 	uint32_t l2p_duration;	/* L2pDurationField */
5140 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5141 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5142 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5143 				 * (LMPM_PMG_SEL) */
5144 	uint32_t u_timestamp;	/* indicate when the date and time of the
5145 				 * compilation */
5146 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5147 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5148 
5149 /*
5150  * UMAC error struct - relevant starting from family 8000 chip.
5151  * Note: This structure is read from the device with IO accesses,
5152  * and the reading already does the endian conversion. As it is
5153  * read with u32-sized accesses, any members with a different size
5154  * need to be ordered correctly though!
5155  */
5156 struct iwm_umac_error_event_table {
5157 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5158 	uint32_t error_id;	/* type of error */
5159 	uint32_t blink1;	/* branch link */
5160 	uint32_t blink2;	/* branch link */
5161 	uint32_t ilink1;	/* interrupt link */
5162 	uint32_t ilink2;	/* interrupt link */
5163 	uint32_t data1;		/* error-specific data */
5164 	uint32_t data2;		/* error-specific data */
5165 	uint32_t data3;		/* error-specific data */
5166 	uint32_t umac_major;
5167 	uint32_t umac_minor;
5168 	uint32_t frame_pointer;	/* core register 27*/
5169 	uint32_t stack_pointer;	/* core register 28 */
5170 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5171 	uint32_t nic_isr_pref;	/* ISR status register */
5172 } __packed;
5173 
5174 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5175 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5176 
5177 #ifdef IWM_DEBUG
5178 struct {
5179 	const char *name;
5180 	uint8_t num;
5181 } advanced_lookup[] = {
5182 	{ "NMI_INTERRUPT_WDG", 0x34 },
5183 	{ "SYSASSERT", 0x35 },
5184 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5185 	{ "BAD_COMMAND", 0x38 },
5186 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5187 	{ "FATAL_ERROR", 0x3D },
5188 	{ "NMI_TRM_HW_ERR", 0x46 },
5189 	{ "NMI_INTERRUPT_TRM", 0x4C },
5190 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5191 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5192 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5193 	{ "NMI_INTERRUPT_HOST", 0x66 },
5194 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5195 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5196 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5197 	{ "ADVANCED_SYSASSERT", 0 },
5198 };
5199 
5200 static const char *
5201 iwm_desc_lookup(uint32_t num)
5202 {
5203 	int i;
5204 
5205 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5206 		if (advanced_lookup[i].num == num)
5207 			return advanced_lookup[i].name;
5208 
5209 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5210 	return advanced_lookup[i].name;
5211 }
5212 
5213 static void
5214 iwm_nic_umac_error(struct iwm_softc *sc)
5215 {
5216 	struct iwm_umac_error_event_table table;
5217 	uint32_t base;
5218 
5219 	base = sc->umac_error_event_table;
5220 
5221 	if (base < 0x800000) {
5222 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5223 		    base);
5224 		return;
5225 	}
5226 
5227 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5228 		device_printf(sc->sc_dev, "reading errlog failed\n");
5229 		return;
5230 	}
5231 
5232 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5233 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5234 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5235 		    sc->sc_flags, table.valid);
5236 	}
5237 
5238 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5239 		iwm_desc_lookup(table.error_id));
5240 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5241 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5242 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5243 	    table.ilink1);
5244 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5245 	    table.ilink2);
5246 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5247 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5248 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5249 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5250 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5251 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5252 	    table.frame_pointer);
5253 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5254 	    table.stack_pointer);
5255 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5256 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5257 	    table.nic_isr_pref);
5258 }
5259 
5260 /*
5261  * Support for dumping the error log seemed like a good idea ...
5262  * but it's mostly hex junk and the only sensible thing is the
5263  * hw/ucode revision (which we know anyway).  Since it's here,
5264  * I'll just leave it in, just in case e.g. the Intel guys want to
5265  * help us decipher some "ADVANCED_SYSASSERT" later.
5266  */
5267 static void
5268 iwm_nic_error(struct iwm_softc *sc)
5269 {
5270 	struct iwm_error_event_table table;
5271 	uint32_t base;
5272 
5273 	device_printf(sc->sc_dev, "dumping device error log\n");
5274 	base = sc->error_event_table[0];
5275 	if (base < 0x800000) {
5276 		device_printf(sc->sc_dev,
5277 		    "Invalid error log pointer 0x%08x\n", base);
5278 		return;
5279 	}
5280 
5281 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5282 		device_printf(sc->sc_dev, "reading errlog failed\n");
5283 		return;
5284 	}
5285 
5286 	if (!table.valid) {
5287 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5288 		return;
5289 	}
5290 
5291 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5292 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5293 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5294 		    sc->sc_flags, table.valid);
5295 	}
5296 
5297 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5298 	    iwm_desc_lookup(table.error_id));
5299 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5300 	    table.trm_hw_status0);
5301 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5302 	    table.trm_hw_status1);
5303 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5304 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5305 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5306 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5307 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5308 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5309 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5310 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5311 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5312 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5313 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5314 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5315 	    table.fw_rev_type);
5316 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5317 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5318 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5319 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5320 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5321 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5322 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5323 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5324 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5325 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5326 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5327 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5328 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5329 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5330 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5331 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5332 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5333 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5334 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5335 
5336 	if (sc->umac_error_event_table)
5337 		iwm_nic_umac_error(sc);
5338 }
5339 #endif
5340 
5341 static void
5342 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5343 {
5344 	struct ieee80211com *ic = &sc->sc_ic;
5345 	struct iwm_cmd_response *cresp;
5346 	struct mbuf *m1;
5347 	uint32_t offset = 0;
5348 	uint32_t maxoff = IWM_RBUF_SIZE;
5349 	uint32_t nextoff;
5350 	boolean_t stolen = FALSE;
5351 
5352 #define HAVEROOM(a)	\
5353     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5354 
5355 	while (HAVEROOM(offset)) {
5356 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5357 		    offset);
5358 		int qid, idx, code, len;
5359 
5360 		qid = pkt->hdr.qid;
5361 		idx = pkt->hdr.idx;
5362 
5363 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5364 
5365 		/*
5366 		 * randomly get these from the firmware, no idea why.
5367 		 * they at least seem harmless, so just ignore them for now
5368 		 */
5369 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5370 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5371 			break;
5372 		}
5373 
5374 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5375 		    "rx packet qid=%d idx=%d type=%x\n",
5376 		    qid & ~0x80, pkt->hdr.idx, code);
5377 
5378 		len = iwm_rx_packet_len(pkt);
5379 		len += sizeof(uint32_t); /* account for status word */
5380 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5381 
5382 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5383 
5384 		switch (code) {
5385 		case IWM_REPLY_RX_PHY_CMD:
5386 			iwm_rx_rx_phy_cmd(sc, pkt);
5387 			break;
5388 
5389 		case IWM_REPLY_RX_MPDU_CMD: {
5390 			/*
5391 			 * If this is the last frame in the RX buffer, we
5392 			 * can directly feed the mbuf to the sharks here.
5393 			 */
5394 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5395 			    struct iwm_rx_packet *, nextoff);
5396 			if (!HAVEROOM(nextoff) ||
5397 			    (nextpkt->hdr.code == 0 &&
5398 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5399 			     nextpkt->hdr.idx == 0) ||
5400 			    (nextpkt->len_n_flags ==
5401 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5402 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5403 					stolen = FALSE;
5404 					/* Make sure we abort the loop */
5405 					nextoff = maxoff;
5406 				}
5407 				break;
5408 			}
5409 
5410 			/*
5411 			 * Use m_copym instead of m_split, because that
5412 			 * makes it easier to keep a valid rx buffer in
5413 			 * the ring, when iwm_rx_mpdu() fails.
5414 			 *
5415 			 * We need to start m_copym() at offset 0, to get the
5416 			 * M_PKTHDR flag preserved.
5417 			 */
5418 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5419 			if (m1) {
5420 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5421 					stolen = TRUE;
5422 				else
5423 					m_freem(m1);
5424 			}
5425 			break;
5426 		}
5427 
5428 		case IWM_TX_CMD:
5429 			iwm_rx_tx_cmd(sc, pkt);
5430 			break;
5431 
5432 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5433 			struct iwm_missed_beacons_notif *resp;
5434 			int missed;
5435 
5436 			/* XXX look at mac_id to determine interface ID */
5437 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5438 
5439 			resp = (void *)pkt->data;
5440 			missed = le32toh(resp->consec_missed_beacons);
5441 
5442 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5443 			    "%s: MISSED_BEACON: mac_id=%d, "
5444 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5445 			    "num_rx=%d\n",
5446 			    __func__,
5447 			    le32toh(resp->mac_id),
5448 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5449 			    le32toh(resp->consec_missed_beacons),
5450 			    le32toh(resp->num_expected_beacons),
5451 			    le32toh(resp->num_recvd_beacons));
5452 
5453 			/* Be paranoid */
5454 			if (vap == NULL)
5455 				break;
5456 
5457 			/* XXX no net80211 locking? */
5458 			if (vap->iv_state == IEEE80211_S_RUN &&
5459 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5460 				if (missed > vap->iv_bmissthreshold) {
5461 					/* XXX bad locking; turn into task */
5462 					IWM_UNLOCK(sc);
5463 					ieee80211_beacon_miss(ic);
5464 					IWM_LOCK(sc);
5465 				}
5466 			}
5467 
5468 			break;
5469 		}
5470 
5471 		case IWM_MFUART_LOAD_NOTIFICATION:
5472 			break;
5473 
5474 		case IWM_ALIVE:
5475 			break;
5476 
5477 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5478 			break;
5479 
5480 		case IWM_STATISTICS_NOTIFICATION:
5481 			iwm_handle_rx_statistics(sc, pkt);
5482 			break;
5483 
5484 		case IWM_NVM_ACCESS_CMD:
5485 		case IWM_MCC_UPDATE_CMD:
5486 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5487 				memcpy(sc->sc_cmd_resp,
5488 				    pkt, sizeof(sc->sc_cmd_resp));
5489 			}
5490 			break;
5491 
5492 		case IWM_MCC_CHUB_UPDATE_CMD: {
5493 			struct iwm_mcc_chub_notif *notif;
5494 			notif = (void *)pkt->data;
5495 
5496 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5497 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5498 			sc->sc_fw_mcc[2] = '\0';
5499 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5500 			    "fw source %d sent CC '%s'\n",
5501 			    notif->source_id, sc->sc_fw_mcc);
5502 			break;
5503 		}
5504 
5505 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5506 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5507 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5508 			struct iwm_dts_measurement_notif_v1 *notif;
5509 
5510 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5511 				device_printf(sc->sc_dev,
5512 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5513 				break;
5514 			}
5515 			notif = (void *)pkt->data;
5516 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5517 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5518 			    notif->temp);
5519 			break;
5520 		}
5521 
5522 		case IWM_PHY_CONFIGURATION_CMD:
5523 		case IWM_TX_ANT_CONFIGURATION_CMD:
5524 		case IWM_ADD_STA:
5525 		case IWM_MAC_CONTEXT_CMD:
5526 		case IWM_REPLY_SF_CFG_CMD:
5527 		case IWM_POWER_TABLE_CMD:
5528 		case IWM_LTR_CONFIG:
5529 		case IWM_PHY_CONTEXT_CMD:
5530 		case IWM_BINDING_CONTEXT_CMD:
5531 		case IWM_TIME_EVENT_CMD:
5532 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5533 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5534 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5535 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5536 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5537 		case IWM_REPLY_BEACON_FILTERING_CMD:
5538 		case IWM_MAC_PM_POWER_TABLE:
5539 		case IWM_TIME_QUOTA_CMD:
5540 		case IWM_REMOVE_STA:
5541 		case IWM_TXPATH_FLUSH:
5542 		case IWM_LQ_CMD:
5543 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5544 				 IWM_FW_PAGING_BLOCK_CMD):
5545 		case IWM_BT_CONFIG:
5546 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5547 			cresp = (void *)pkt->data;
5548 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5549 				memcpy(sc->sc_cmd_resp,
5550 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5551 			}
5552 			break;
5553 
5554 		/* ignore */
5555 		case IWM_PHY_DB_CMD:
5556 			break;
5557 
5558 		case IWM_INIT_COMPLETE_NOTIF:
5559 			break;
5560 
5561 		case IWM_SCAN_OFFLOAD_COMPLETE:
5562 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5563 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5564 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5565 				ieee80211_runtask(ic, &sc->sc_es_task);
5566 			}
5567 			break;
5568 
5569 		case IWM_SCAN_ITERATION_COMPLETE: {
5570 			struct iwm_lmac_scan_complete_notif *notif;
5571 			notif = (void *)pkt->data;
5572 			break;
5573 		}
5574 
5575 		case IWM_SCAN_COMPLETE_UMAC:
5576 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5577 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5578 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5579 				ieee80211_runtask(ic, &sc->sc_es_task);
5580 			}
5581 			break;
5582 
5583 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5584 			struct iwm_umac_scan_iter_complete_notif *notif;
5585 			notif = (void *)pkt->data;
5586 
5587 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5588 			    "complete, status=0x%x, %d channels scanned\n",
5589 			    notif->status, notif->scanned_channels);
5590 			break;
5591 		}
5592 
5593 		case IWM_REPLY_ERROR: {
5594 			struct iwm_error_resp *resp;
5595 			resp = (void *)pkt->data;
5596 
5597 			device_printf(sc->sc_dev,
5598 			    "firmware error 0x%x, cmd 0x%x\n",
5599 			    le32toh(resp->error_type),
5600 			    resp->cmd_id);
5601 			break;
5602 		}
5603 
5604 		case IWM_TIME_EVENT_NOTIFICATION:
5605 			iwm_rx_time_event_notif(sc, pkt);
5606 			break;
5607 
5608 		/*
5609 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5610 		 * messages. Just ignore them for now.
5611 		 */
5612 		case IWM_DEBUG_LOG_MSG:
5613 			break;
5614 
5615 		case IWM_MCAST_FILTER_CMD:
5616 			break;
5617 
5618 		case IWM_SCD_QUEUE_CFG: {
5619 			struct iwm_scd_txq_cfg_rsp *rsp;
5620 			rsp = (void *)pkt->data;
5621 
5622 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5623 			    "queue cfg token=0x%x sta_id=%d "
5624 			    "tid=%d scd_queue=%d\n",
5625 			    rsp->token, rsp->sta_id, rsp->tid,
5626 			    rsp->scd_queue);
5627 			break;
5628 		}
5629 
5630 		default:
5631 			device_printf(sc->sc_dev,
5632 			    "code %x, frame %d/%d %x unhandled\n",
5633 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5634 			break;
5635 		}
5636 
5637 		/*
5638 		 * Why test bit 0x80?  The Linux driver:
5639 		 *
5640 		 * There is one exception:  uCode sets bit 15 when it
5641 		 * originates the response/notification, i.e. when the
5642 		 * response/notification is not a direct response to a
5643 		 * command sent by the driver.  For example, uCode issues
5644 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5645 		 * it is not a direct response to any driver command.
5646 		 *
5647 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5648 		 * uses a slightly different format for pkt->hdr, and "qid"
5649 		 * is actually the upper byte of a two-byte field.
5650 		 */
5651 		if (!(qid & (1 << 7)))
5652 			iwm_cmd_done(sc, pkt);
5653 
5654 		offset = nextoff;
5655 	}
5656 	if (stolen)
5657 		m_freem(m);
5658 #undef HAVEROOM
5659 }
5660 
5661 /*
5662  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5663  * Basic structure from if_iwn
5664  */
5665 static void
5666 iwm_notif_intr(struct iwm_softc *sc)
5667 {
5668 	int count;
5669 	uint32_t wreg;
5670 	uint16_t hw;
5671 
5672 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5673 	    BUS_DMASYNC_POSTREAD);
5674 
5675 	if (sc->cfg->mqrx_supported) {
5676 		count = IWM_RX_MQ_RING_COUNT;
5677 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5678 	} else {
5679 		count = IWM_RX_LEGACY_RING_COUNT;
5680 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5681 	}
5682 
5683 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5684 
5685 	/*
5686 	 * Process responses
5687 	 */
5688 	while (sc->rxq.cur != hw) {
5689 		struct iwm_rx_ring *ring = &sc->rxq;
5690 		struct iwm_rx_data *data = &ring->data[ring->cur];
5691 
5692 		bus_dmamap_sync(ring->data_dmat, data->map,
5693 		    BUS_DMASYNC_POSTREAD);
5694 
5695 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5696 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5697 		iwm_handle_rxb(sc, data->m);
5698 
5699 		ring->cur = (ring->cur + 1) % count;
5700 	}
5701 
5702 	/*
5703 	 * Tell the firmware that it can reuse the ring entries that
5704 	 * we have just processed.
5705 	 * Seems like the hardware gets upset unless we align
5706 	 * the write by 8??
5707 	 */
5708 	hw = (hw == 0) ? count - 1 : hw - 1;
5709 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5710 }
5711 
5712 static void
5713 iwm_intr(void *arg)
5714 {
5715 	struct iwm_softc *sc = arg;
5716 	int handled = 0;
5717 	int r1, r2, rv = 0;
5718 	int isperiodic = 0;
5719 
5720 	IWM_LOCK(sc);
5721 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5722 
5723 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5724 		uint32_t *ict = sc->ict_dma.vaddr;
5725 		int tmp;
5726 
5727 		tmp = htole32(ict[sc->ict_cur]);
5728 		if (!tmp)
5729 			goto out_ena;
5730 
5731 		/*
5732 		 * ok, there was something.  keep plowing until we have all.
5733 		 */
5734 		r1 = r2 = 0;
5735 		while (tmp) {
5736 			r1 |= tmp;
5737 			ict[sc->ict_cur] = 0;
5738 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5739 			tmp = htole32(ict[sc->ict_cur]);
5740 		}
5741 
5742 		/* this is where the fun begins.  don't ask */
5743 		if (r1 == 0xffffffff)
5744 			r1 = 0;
5745 
5746 		/* i am not expected to understand this */
5747 		if (r1 & 0xc0000)
5748 			r1 |= 0x8000;
5749 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5750 	} else {
5751 		r1 = IWM_READ(sc, IWM_CSR_INT);
5752 		/* "hardware gone" (where, fishing?) */
5753 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5754 			goto out;
5755 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5756 	}
5757 	if (r1 == 0 && r2 == 0) {
5758 		goto out_ena;
5759 	}
5760 
5761 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5762 
5763 	/* Safely ignore these bits for debug checks below */
5764 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5765 
5766 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5767 		int i;
5768 		struct ieee80211com *ic = &sc->sc_ic;
5769 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5770 
5771 #ifdef IWM_DEBUG
5772 		iwm_nic_error(sc);
5773 #endif
5774 		/* Dump driver status (TX and RX rings) while we're here. */
5775 		device_printf(sc->sc_dev, "driver status:\n");
5776 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5777 			struct iwm_tx_ring *ring = &sc->txq[i];
5778 			device_printf(sc->sc_dev,
5779 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5780 			    "queued=%-3d\n",
5781 			    i, ring->qid, ring->cur, ring->queued);
5782 		}
5783 		device_printf(sc->sc_dev,
5784 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5785 		device_printf(sc->sc_dev,
5786 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5787 
5788 		/* Reset our firmware state tracking. */
5789 		sc->sc_firmware_state = 0;
5790 		/* Don't stop the device; just do a VAP restart */
5791 		IWM_UNLOCK(sc);
5792 
5793 		if (vap == NULL) {
5794 			printf("%s: null vap\n", __func__);
5795 			return;
5796 		}
5797 
5798 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5799 		    "restarting\n", __func__, vap->iv_state);
5800 
5801 		ieee80211_restart_all(ic);
5802 		return;
5803 	}
5804 
5805 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5806 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5807 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5808 		iwm_stop(sc);
5809 		rv = 1;
5810 		goto out;
5811 	}
5812 
5813 	/* firmware chunk loaded */
5814 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5815 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5816 		handled |= IWM_CSR_INT_BIT_FH_TX;
5817 		sc->sc_fw_chunk_done = 1;
5818 		wakeup(&sc->sc_fw);
5819 	}
5820 
5821 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5822 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5823 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5824 	}
5825 
5826 	/*
5827 	 * The Linux driver uses periodic interrupts to avoid races.
5828 	 * We cargo-cult like it's going out of fashion.
5829 	 */
5830 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5831 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5832 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5833 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5834 			IWM_WRITE_1(sc,
5835 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5836 		isperiodic = 1;
5837 	}
5838 
5839 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5840 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5841 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5842 
5843 		iwm_notif_intr(sc);
5844 
5845 		/* enable periodic interrupt, see above */
5846 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5847 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5848 			    IWM_CSR_INT_PERIODIC_ENA);
5849 	}
5850 
5851 	if (__predict_false(r1 & ~handled))
5852 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5853 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5854 	rv = 1;
5855 
5856  out_ena:
5857 	iwm_restore_interrupts(sc);
5858  out:
5859 	IWM_UNLOCK(sc);
5860 	return;
5861 }
5862 
5863 /*
5864  * Autoconf glue-sniffing
5865  */
5866 #define	PCI_VENDOR_INTEL		0x8086
5867 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5868 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5869 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5870 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5871 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5872 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5873 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5874 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5875 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5876 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5877 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5878 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5879 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5880 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5881 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5882 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5883 
5884 static const struct iwm_devices {
5885 	uint16_t		device;
5886 	const struct iwm_cfg	*cfg;
5887 } iwm_devices[] = {
5888 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5889 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5890 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5891 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5892 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5893 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5894 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5895 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5896 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5897 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5898 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5899 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5900 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5901 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5902 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5903 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5904 };
5905 
5906 static int
5907 iwm_probe(device_t dev)
5908 {
5909 	int i;
5910 
5911 	for (i = 0; i < nitems(iwm_devices); i++) {
5912 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5913 		    pci_get_device(dev) == iwm_devices[i].device) {
5914 			device_set_desc(dev, iwm_devices[i].cfg->name);
5915 			return (BUS_PROBE_DEFAULT);
5916 		}
5917 	}
5918 
5919 	return (ENXIO);
5920 }
5921 
5922 static int
5923 iwm_dev_check(device_t dev)
5924 {
5925 	struct iwm_softc *sc;
5926 	uint16_t devid;
5927 	int i;
5928 
5929 	sc = device_get_softc(dev);
5930 
5931 	devid = pci_get_device(dev);
5932 	for (i = 0; i < nitems(iwm_devices); i++) {
5933 		if (iwm_devices[i].device == devid) {
5934 			sc->cfg = iwm_devices[i].cfg;
5935 			return (0);
5936 		}
5937 	}
5938 	device_printf(dev, "unknown adapter type\n");
5939 	return ENXIO;
5940 }
5941 
5942 /* PCI registers */
5943 #define PCI_CFG_RETRY_TIMEOUT	0x041
5944 
5945 static int
5946 iwm_pci_attach(device_t dev)
5947 {
5948 	struct iwm_softc *sc;
5949 	int count, error, rid;
5950 	uint16_t reg;
5951 
5952 	sc = device_get_softc(dev);
5953 
5954 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5955 	 * PCI Tx retries from interfering with C3 CPU state */
5956 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5957 
5958 	/* Enable bus-mastering and hardware bug workaround. */
5959 	pci_enable_busmaster(dev);
5960 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5961 	/* if !MSI */
5962 	if (reg & PCIM_STATUS_INTxSTATE) {
5963 		reg &= ~PCIM_STATUS_INTxSTATE;
5964 	}
5965 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5966 
5967 	rid = PCIR_BAR(0);
5968 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5969 	    RF_ACTIVE);
5970 	if (sc->sc_mem == NULL) {
5971 		device_printf(sc->sc_dev, "can't map mem space\n");
5972 		return (ENXIO);
5973 	}
5974 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5975 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5976 
5977 	/* Install interrupt handler. */
5978 	count = 1;
5979 	rid = 0;
5980 	if (pci_alloc_msi(dev, &count) == 0)
5981 		rid = 1;
5982 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5983 	    (rid != 0 ? 0 : RF_SHAREABLE));
5984 	if (sc->sc_irq == NULL) {
5985 		device_printf(dev, "can't map interrupt\n");
5986 			return (ENXIO);
5987 	}
5988 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5989 	    NULL, iwm_intr, sc, &sc->sc_ih);
5990 	if (sc->sc_ih == NULL) {
5991 		device_printf(dev, "can't establish interrupt");
5992 			return (ENXIO);
5993 	}
5994 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5995 
5996 	return (0);
5997 }
5998 
5999 static void
6000 iwm_pci_detach(device_t dev)
6001 {
6002 	struct iwm_softc *sc = device_get_softc(dev);
6003 
6004 	if (sc->sc_irq != NULL) {
6005 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6006 		bus_release_resource(dev, SYS_RES_IRQ,
6007 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6008 		pci_release_msi(dev);
6009         }
6010 	if (sc->sc_mem != NULL)
6011 		bus_release_resource(dev, SYS_RES_MEMORY,
6012 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6013 }
6014 
6015 static int
6016 iwm_attach(device_t dev)
6017 {
6018 	struct iwm_softc *sc = device_get_softc(dev);
6019 	struct ieee80211com *ic = &sc->sc_ic;
6020 	int error;
6021 	int txq_i, i;
6022 
6023 	sc->sc_dev = dev;
6024 	sc->sc_attached = 1;
6025 	IWM_LOCK_INIT(sc);
6026 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6027 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6028 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6029 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6030 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
6031 
6032 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
6033 	    taskqueue_thread_enqueue, &sc->sc_tq);
6034 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
6035 	if (error != 0) {
6036 		device_printf(dev, "can't start taskq thread, error %d\n",
6037 		    error);
6038 		goto fail;
6039 	}
6040 
6041 	error = iwm_dev_check(dev);
6042 	if (error != 0)
6043 		goto fail;
6044 
6045 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6046 	if (sc->sc_notif_wait == NULL) {
6047 		device_printf(dev, "failed to init notification wait struct\n");
6048 		goto fail;
6049 	}
6050 
6051 	sc->sf_state = IWM_SF_UNINIT;
6052 
6053 	/* Init phy db */
6054 	sc->sc_phy_db = iwm_phy_db_init(sc);
6055 	if (!sc->sc_phy_db) {
6056 		device_printf(dev, "Cannot init phy_db\n");
6057 		goto fail;
6058 	}
6059 
6060 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6061 	sc->last_ebs_successful = TRUE;
6062 
6063 	/* PCI attach */
6064 	error = iwm_pci_attach(dev);
6065 	if (error != 0)
6066 		goto fail;
6067 
6068 	sc->sc_wantresp = -1;
6069 
6070 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6071 	/*
6072 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6073 	 * changed, and now the revision step also includes bit 0-1 (no more
6074 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6075 	 * in the old format.
6076 	 */
6077 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6078 		int ret;
6079 		uint32_t hw_step;
6080 
6081 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6082 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6083 
6084 		if (iwm_prepare_card_hw(sc) != 0) {
6085 			device_printf(dev, "could not initialize hardware\n");
6086 			goto fail;
6087 		}
6088 
6089 		/*
6090 		 * In order to recognize C step the driver should read the
6091 		 * chip version id located at the AUX bus MISC address.
6092 		 */
6093 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6094 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6095 		DELAY(2);
6096 
6097 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6098 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6099 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6100 				   25000);
6101 		if (!ret) {
6102 			device_printf(sc->sc_dev,
6103 			    "Failed to wake up the nic\n");
6104 			goto fail;
6105 		}
6106 
6107 		if (iwm_nic_lock(sc)) {
6108 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6109 			hw_step |= IWM_ENABLE_WFPM;
6110 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6111 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6112 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6113 			if (hw_step == 0x3)
6114 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6115 						(IWM_SILICON_C_STEP << 2);
6116 			iwm_nic_unlock(sc);
6117 		} else {
6118 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6119 			goto fail;
6120 		}
6121 	}
6122 
6123 	/* special-case 7265D, it has the same PCI IDs. */
6124 	if (sc->cfg == &iwm7265_cfg &&
6125 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6126 		sc->cfg = &iwm7265d_cfg;
6127 	}
6128 
6129 	/* Allocate DMA memory for firmware transfers. */
6130 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6131 		device_printf(dev, "could not allocate memory for firmware\n");
6132 		goto fail;
6133 	}
6134 
6135 	/* Allocate "Keep Warm" page. */
6136 	if ((error = iwm_alloc_kw(sc)) != 0) {
6137 		device_printf(dev, "could not allocate keep warm page\n");
6138 		goto fail;
6139 	}
6140 
6141 	/* We use ICT interrupts */
6142 	if ((error = iwm_alloc_ict(sc)) != 0) {
6143 		device_printf(dev, "could not allocate ICT table\n");
6144 		goto fail;
6145 	}
6146 
6147 	/* Allocate TX scheduler "rings". */
6148 	if ((error = iwm_alloc_sched(sc)) != 0) {
6149 		device_printf(dev, "could not allocate TX scheduler rings\n");
6150 		goto fail;
6151 	}
6152 
6153 	/* Allocate TX rings */
6154 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6155 		if ((error = iwm_alloc_tx_ring(sc,
6156 		    &sc->txq[txq_i], txq_i)) != 0) {
6157 			device_printf(dev,
6158 			    "could not allocate TX ring %d\n",
6159 			    txq_i);
6160 			goto fail;
6161 		}
6162 	}
6163 
6164 	/* Allocate RX ring. */
6165 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6166 		device_printf(dev, "could not allocate RX ring\n");
6167 		goto fail;
6168 	}
6169 
6170 	/* Clear pending interrupts. */
6171 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6172 
6173 	ic->ic_softc = sc;
6174 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6175 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6176 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6177 
6178 	/* Set device capabilities. */
6179 	ic->ic_caps =
6180 	    IEEE80211_C_STA |
6181 	    IEEE80211_C_WPA |		/* WPA/RSN */
6182 	    IEEE80211_C_WME |
6183 	    IEEE80211_C_PMGT |
6184 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6185 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6186 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6187 	    ;
6188 	/* Advertise full-offload scanning */
6189 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6190 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6191 		sc->sc_phyctxt[i].id = i;
6192 		sc->sc_phyctxt[i].color = 0;
6193 		sc->sc_phyctxt[i].ref = 0;
6194 		sc->sc_phyctxt[i].channel = NULL;
6195 	}
6196 
6197 	/* Default noise floor */
6198 	sc->sc_noise = -96;
6199 
6200 	/* Max RSSI */
6201 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6202 
6203 #ifdef IWM_DEBUG
6204 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6205 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6206 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6207 #endif
6208 
6209 	error = iwm_read_firmware(sc);
6210 	if (error) {
6211 		goto fail;
6212 	} else if (sc->sc_fw.fw_fp == NULL) {
6213 		/*
6214 		 * XXX Add a solution for properly deferring firmware load
6215 		 *     during bootup.
6216 		 */
6217 		goto fail;
6218 	} else {
6219 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6220 		sc->sc_preinit_hook.ich_arg = sc;
6221 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6222 			device_printf(dev,
6223 			    "config_intrhook_establish failed\n");
6224 			goto fail;
6225 		}
6226 	}
6227 
6228 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6229 	    "<-%s\n", __func__);
6230 
6231 	return 0;
6232 
6233 	/* Free allocated memory if something failed during attachment. */
6234 fail:
6235 	iwm_detach_local(sc, 0);
6236 
6237 	return ENXIO;
6238 }
6239 
6240 static int
6241 iwm_is_valid_ether_addr(uint8_t *addr)
6242 {
6243 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6244 
6245 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6246 		return (FALSE);
6247 
6248 	return (TRUE);
6249 }
6250 
6251 static int
6252 iwm_wme_update(struct ieee80211com *ic)
6253 {
6254 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6255 	struct iwm_softc *sc = ic->ic_softc;
6256 	struct chanAccParams chp;
6257 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6258 	struct iwm_vap *ivp = IWM_VAP(vap);
6259 	struct iwm_node *in;
6260 	struct wmeParams tmp[WME_NUM_AC];
6261 	int aci, error;
6262 
6263 	if (vap == NULL)
6264 		return (0);
6265 
6266 	ieee80211_wme_ic_getparams(ic, &chp);
6267 
6268 	IEEE80211_LOCK(ic);
6269 	for (aci = 0; aci < WME_NUM_AC; aci++)
6270 		tmp[aci] = chp.cap_wmeParams[aci];
6271 	IEEE80211_UNLOCK(ic);
6272 
6273 	IWM_LOCK(sc);
6274 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6275 		const struct wmeParams *ac = &tmp[aci];
6276 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6277 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6278 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6279 		ivp->queue_params[aci].edca_txop =
6280 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6281 	}
6282 	ivp->have_wme = TRUE;
6283 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6284 		in = IWM_NODE(vap->iv_bss);
6285 		if (in->in_assoc) {
6286 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6287 				device_printf(sc->sc_dev,
6288 				    "%s: failed to update MAC\n", __func__);
6289 			}
6290 		}
6291 	}
6292 	IWM_UNLOCK(sc);
6293 
6294 	return (0);
6295 #undef IWM_EXP2
6296 }
6297 
6298 static void
6299 iwm_preinit(void *arg)
6300 {
6301 	struct iwm_softc *sc = arg;
6302 	device_t dev = sc->sc_dev;
6303 	struct ieee80211com *ic = &sc->sc_ic;
6304 	int error;
6305 
6306 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6307 	    "->%s\n", __func__);
6308 
6309 	IWM_LOCK(sc);
6310 	if ((error = iwm_start_hw(sc)) != 0) {
6311 		device_printf(dev, "could not initialize hardware\n");
6312 		IWM_UNLOCK(sc);
6313 		goto fail;
6314 	}
6315 
6316 	error = iwm_run_init_ucode(sc, 1);
6317 	iwm_stop_device(sc);
6318 	if (error) {
6319 		IWM_UNLOCK(sc);
6320 		goto fail;
6321 	}
6322 	device_printf(dev,
6323 	    "hw rev 0x%x, fw ver %s, address %s\n",
6324 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6325 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6326 
6327 	/* not all hardware can do 5GHz band */
6328 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6329 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6330 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6331 	IWM_UNLOCK(sc);
6332 
6333 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6334 	    ic->ic_channels);
6335 
6336 	/*
6337 	 * At this point we've committed - if we fail to do setup,
6338 	 * we now also have to tear down the net80211 state.
6339 	 */
6340 	ieee80211_ifattach(ic);
6341 	ic->ic_vap_create = iwm_vap_create;
6342 	ic->ic_vap_delete = iwm_vap_delete;
6343 	ic->ic_raw_xmit = iwm_raw_xmit;
6344 	ic->ic_node_alloc = iwm_node_alloc;
6345 	ic->ic_scan_start = iwm_scan_start;
6346 	ic->ic_scan_end = iwm_scan_end;
6347 	ic->ic_update_mcast = iwm_update_mcast;
6348 	ic->ic_getradiocaps = iwm_init_channel_map;
6349 	ic->ic_set_channel = iwm_set_channel;
6350 	ic->ic_scan_curchan = iwm_scan_curchan;
6351 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6352 	ic->ic_wme.wme_update = iwm_wme_update;
6353 	ic->ic_parent = iwm_parent;
6354 	ic->ic_transmit = iwm_transmit;
6355 	iwm_radiotap_attach(sc);
6356 	if (bootverbose)
6357 		ieee80211_announce(ic);
6358 
6359 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6360 	    "<-%s\n", __func__);
6361 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6362 
6363 	return;
6364 fail:
6365 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6366 	iwm_detach_local(sc, 0);
6367 }
6368 
6369 /*
6370  * Attach the interface to 802.11 radiotap.
6371  */
6372 static void
6373 iwm_radiotap_attach(struct iwm_softc *sc)
6374 {
6375         struct ieee80211com *ic = &sc->sc_ic;
6376 
6377 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6378 	    "->%s begin\n", __func__);
6379         ieee80211_radiotap_attach(ic,
6380             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6381                 IWM_TX_RADIOTAP_PRESENT,
6382             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6383                 IWM_RX_RADIOTAP_PRESENT);
6384 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6385 	    "->%s end\n", __func__);
6386 }
6387 
6388 static struct ieee80211vap *
6389 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6390     enum ieee80211_opmode opmode, int flags,
6391     const uint8_t bssid[IEEE80211_ADDR_LEN],
6392     const uint8_t mac[IEEE80211_ADDR_LEN])
6393 {
6394 	struct iwm_vap *ivp;
6395 	struct ieee80211vap *vap;
6396 
6397 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6398 		return NULL;
6399 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6400 	vap = &ivp->iv_vap;
6401 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6402 	vap->iv_bmissthreshold = 10;            /* override default */
6403 	/* Override with driver methods. */
6404 	ivp->iv_newstate = vap->iv_newstate;
6405 	vap->iv_newstate = iwm_newstate;
6406 
6407 	ivp->id = IWM_DEFAULT_MACID;
6408 	ivp->color = IWM_DEFAULT_COLOR;
6409 
6410 	ivp->have_wme = FALSE;
6411 	ivp->ps_disabled = FALSE;
6412 
6413 	ieee80211_ratectl_init(vap);
6414 	/* Complete setup. */
6415 	ieee80211_vap_attach(vap, ieee80211_media_change,
6416 	    ieee80211_media_status, mac);
6417 	ic->ic_opmode = opmode;
6418 
6419 	return vap;
6420 }
6421 
6422 static void
6423 iwm_vap_delete(struct ieee80211vap *vap)
6424 {
6425 	struct iwm_vap *ivp = IWM_VAP(vap);
6426 
6427 	ieee80211_ratectl_deinit(vap);
6428 	ieee80211_vap_detach(vap);
6429 	free(ivp, M_80211_VAP);
6430 }
6431 
6432 static void
6433 iwm_xmit_queue_drain(struct iwm_softc *sc)
6434 {
6435 	struct mbuf *m;
6436 	struct ieee80211_node *ni;
6437 
6438 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6439 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6440 		ieee80211_free_node(ni);
6441 		m_freem(m);
6442 	}
6443 }
6444 
6445 static void
6446 iwm_scan_start(struct ieee80211com *ic)
6447 {
6448 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6449 	struct iwm_softc *sc = ic->ic_softc;
6450 	int error;
6451 
6452 	IWM_LOCK(sc);
6453 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6454 		/* This should not be possible */
6455 		device_printf(sc->sc_dev,
6456 		    "%s: Previous scan not completed yet\n", __func__);
6457 	}
6458 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6459 		error = iwm_umac_scan(sc);
6460 	else
6461 		error = iwm_lmac_scan(sc);
6462 	if (error != 0) {
6463 		device_printf(sc->sc_dev, "could not initiate scan\n");
6464 		IWM_UNLOCK(sc);
6465 		ieee80211_cancel_scan(vap);
6466 	} else {
6467 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6468 		iwm_led_blink_start(sc);
6469 		IWM_UNLOCK(sc);
6470 	}
6471 }
6472 
6473 static void
6474 iwm_scan_end(struct ieee80211com *ic)
6475 {
6476 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6477 	struct iwm_softc *sc = ic->ic_softc;
6478 
6479 	IWM_LOCK(sc);
6480 	iwm_led_blink_stop(sc);
6481 	if (vap->iv_state == IEEE80211_S_RUN)
6482 		iwm_led_enable(sc);
6483 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6484 		/*
6485 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6486 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6487 		 * taskqueue.
6488 		 */
6489 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6490 		iwm_scan_stop_wait(sc);
6491 	}
6492 	IWM_UNLOCK(sc);
6493 
6494 	/*
6495 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6496 	 * This is to make sure that it won't call ieee80211_scan_done
6497 	 * when we have already started the next scan.
6498 	 */
6499 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6500 }
6501 
6502 static void
6503 iwm_update_mcast(struct ieee80211com *ic)
6504 {
6505 }
6506 
6507 static void
6508 iwm_set_channel(struct ieee80211com *ic)
6509 {
6510 }
6511 
6512 static void
6513 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6514 {
6515 }
6516 
6517 static void
6518 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6519 {
6520 }
6521 
6522 void
6523 iwm_init_task(void *arg1)
6524 {
6525 	struct iwm_softc *sc = arg1;
6526 
6527 	IWM_LOCK(sc);
6528 	while (sc->sc_flags & IWM_FLAG_BUSY)
6529 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6530 	sc->sc_flags |= IWM_FLAG_BUSY;
6531 	iwm_stop(sc);
6532 	if (sc->sc_ic.ic_nrunning > 0)
6533 		iwm_init(sc);
6534 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6535 	wakeup(&sc->sc_flags);
6536 	IWM_UNLOCK(sc);
6537 }
6538 
6539 static int
6540 iwm_resume(device_t dev)
6541 {
6542 	struct iwm_softc *sc = device_get_softc(dev);
6543 	int do_reinit = 0;
6544 
6545 	/*
6546 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6547 	 * PCI Tx retries from interfering with C3 CPU state.
6548 	 */
6549 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6550 
6551 	if (!sc->sc_attached)
6552 		return 0;
6553 
6554 	iwm_init_task(device_get_softc(dev));
6555 
6556 	IWM_LOCK(sc);
6557 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6558 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6559 		do_reinit = 1;
6560 	}
6561 	IWM_UNLOCK(sc);
6562 
6563 	if (do_reinit)
6564 		ieee80211_resume_all(&sc->sc_ic);
6565 
6566 	return 0;
6567 }
6568 
6569 static int
6570 iwm_suspend(device_t dev)
6571 {
6572 	int do_stop = 0;
6573 	struct iwm_softc *sc = device_get_softc(dev);
6574 
6575 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6576 
6577 	if (!sc->sc_attached)
6578 		return (0);
6579 
6580 	ieee80211_suspend_all(&sc->sc_ic);
6581 
6582 	if (do_stop) {
6583 		IWM_LOCK(sc);
6584 		iwm_stop(sc);
6585 		sc->sc_flags |= IWM_FLAG_SCANNING;
6586 		IWM_UNLOCK(sc);
6587 	}
6588 
6589 	return (0);
6590 }
6591 
6592 static int
6593 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6594 {
6595 	struct iwm_fw_info *fw = &sc->sc_fw;
6596 	device_t dev = sc->sc_dev;
6597 	int i;
6598 
6599 	if (!sc->sc_attached)
6600 		return 0;
6601 	sc->sc_attached = 0;
6602 	if (do_net80211) {
6603 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6604 	}
6605 	iwm_stop_device(sc);
6606 	taskqueue_drain_all(sc->sc_tq);
6607 	taskqueue_free(sc->sc_tq);
6608 	if (do_net80211) {
6609 		IWM_LOCK(sc);
6610 		iwm_xmit_queue_drain(sc);
6611 		IWM_UNLOCK(sc);
6612 		ieee80211_ifdetach(&sc->sc_ic);
6613 	}
6614 	callout_drain(&sc->sc_led_blink_to);
6615 	callout_drain(&sc->sc_watchdog_to);
6616 
6617 	iwm_phy_db_free(sc->sc_phy_db);
6618 	sc->sc_phy_db = NULL;
6619 
6620 	iwm_free_nvm_data(sc->nvm_data);
6621 
6622 	/* Free descriptor rings */
6623 	iwm_free_rx_ring(sc, &sc->rxq);
6624 	for (i = 0; i < nitems(sc->txq); i++)
6625 		iwm_free_tx_ring(sc, &sc->txq[i]);
6626 
6627 	/* Free firmware */
6628 	if (fw->fw_fp != NULL)
6629 		iwm_fw_info_free(fw);
6630 
6631 	/* Free scheduler */
6632 	iwm_dma_contig_free(&sc->sched_dma);
6633 	iwm_dma_contig_free(&sc->ict_dma);
6634 	iwm_dma_contig_free(&sc->kw_dma);
6635 	iwm_dma_contig_free(&sc->fw_dma);
6636 
6637 	iwm_free_fw_paging(sc);
6638 
6639 	/* Finished with the hardware - detach things */
6640 	iwm_pci_detach(dev);
6641 
6642 	if (sc->sc_notif_wait != NULL) {
6643 		iwm_notification_wait_free(sc->sc_notif_wait);
6644 		sc->sc_notif_wait = NULL;
6645 	}
6646 
6647 	IWM_LOCK_DESTROY(sc);
6648 
6649 	return (0);
6650 }
6651 
6652 static int
6653 iwm_detach(device_t dev)
6654 {
6655 	struct iwm_softc *sc = device_get_softc(dev);
6656 
6657 	return (iwm_detach_local(sc, 1));
6658 }
6659 
6660 static device_method_t iwm_pci_methods[] = {
6661         /* Device interface */
6662         DEVMETHOD(device_probe,         iwm_probe),
6663         DEVMETHOD(device_attach,        iwm_attach),
6664         DEVMETHOD(device_detach,        iwm_detach),
6665         DEVMETHOD(device_suspend,       iwm_suspend),
6666         DEVMETHOD(device_resume,        iwm_resume),
6667 
6668         DEVMETHOD_END
6669 };
6670 
6671 static driver_t iwm_pci_driver = {
6672         "iwm",
6673         iwm_pci_methods,
6674         sizeof (struct iwm_softc)
6675 };
6676 
6677 static devclass_t iwm_devclass;
6678 
6679 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6680 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6681     iwm_devices, nitems(iwm_devices));
6682 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6683 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6684 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6685