xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358 static void	iwm_endscan_cb(void *, int);
359 static int	iwm_send_bt_init_conf(struct iwm_softc *);
360 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int	iwm_init_hw(struct iwm_softc *);
365 static void	iwm_init(struct iwm_softc *);
366 static void	iwm_start(struct iwm_softc *);
367 static void	iwm_stop(struct iwm_softc *);
368 static void	iwm_watchdog(void *);
369 static void	iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 		iwm_desc_lookup(uint32_t);
373 static void	iwm_nic_error(struct iwm_softc *);
374 static void	iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void	iwm_notif_intr(struct iwm_softc *);
378 static void	iwm_intr(void *);
379 static int	iwm_attach(device_t);
380 static int	iwm_is_valid_ether_addr(uint8_t *);
381 static void	iwm_preinit(void *);
382 static int	iwm_detach_local(struct iwm_softc *sc, int);
383 static void	iwm_init_task(void *);
384 static void	iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 		iwm_vap_create(struct ieee80211com *,
387 		               const char [IFNAMSIZ], int,
388 		               enum ieee80211_opmode, int,
389 		               const uint8_t [IEEE80211_ADDR_LEN],
390 		               const uint8_t [IEEE80211_ADDR_LEN]);
391 static void	iwm_vap_delete(struct ieee80211vap *);
392 static void	iwm_xmit_queue_drain(struct iwm_softc *);
393 static void	iwm_scan_start(struct ieee80211com *);
394 static void	iwm_scan_end(struct ieee80211com *);
395 static void	iwm_update_mcast(struct ieee80211com *);
396 static void	iwm_set_channel(struct ieee80211com *);
397 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
399 static int	iwm_detach(device_t);
400 
401 static int	iwm_lar_disable = 0;
402 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403 
404 /*
405  * Firmware parser.
406  */
407 
408 static int
409 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410 {
411 	const struct iwm_fw_cscheme_list *l = (const void *)data;
412 
413 	if (dlen < sizeof(*l) ||
414 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 		return EINVAL;
416 
417 	/* we don't actually store anything for now, always use s/w crypto */
418 
419 	return 0;
420 }
421 
422 static int
423 iwm_firmware_store_section(struct iwm_softc *sc,
424     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425 {
426 	struct iwm_fw_img *fws;
427 	struct iwm_fw_desc *fwone;
428 
429 	if (type >= IWM_UCODE_TYPE_MAX)
430 		return EINVAL;
431 	if (dlen < sizeof(uint32_t))
432 		return EINVAL;
433 
434 	fws = &sc->sc_fw.img[type];
435 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
436 		return EINVAL;
437 
438 	fwone = &fws->sec[fws->fw_count];
439 
440 	/* first 32bit are device load offset */
441 	memcpy(&fwone->offset, data, sizeof(uint32_t));
442 
443 	/* rest is data */
444 	fwone->data = data + sizeof(uint32_t);
445 	fwone->len = dlen - sizeof(uint32_t);
446 
447 	fws->fw_count++;
448 
449 	return 0;
450 }
451 
452 #define IWM_DEFAULT_SCAN_CHANNELS 40
453 
454 /* iwlwifi: iwl-drv.c */
455 struct iwm_tlv_calib_data {
456 	uint32_t ucode_type;
457 	struct iwm_tlv_calib_ctrl calib;
458 } __packed;
459 
460 static int
461 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462 {
463 	const struct iwm_tlv_calib_data *def_calib = data;
464 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
465 
466 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
467 		device_printf(sc->sc_dev,
468 		    "Wrong ucode_type %u for default "
469 		    "calibration.\n", ucode_type);
470 		return EINVAL;
471 	}
472 
473 	sc->sc_default_calib[ucode_type].flow_trigger =
474 	    def_calib->calib.flow_trigger;
475 	sc->sc_default_calib[ucode_type].event_trigger =
476 	    def_calib->calib.event_trigger;
477 
478 	return 0;
479 }
480 
481 static int
482 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 			struct iwm_ucode_capabilities *capa)
484 {
485 	const struct iwm_ucode_api *ucode_api = (const void *)data;
486 	uint32_t api_index = le32toh(ucode_api->api_index);
487 	uint32_t api_flags = le32toh(ucode_api->api_flags);
488 	int i;
489 
490 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 		device_printf(sc->sc_dev,
492 		    "api flags index %d larger than supported by driver\n",
493 		    api_index);
494 		/* don't return an error so we can load FW that has more bits */
495 		return 0;
496 	}
497 
498 	for (i = 0; i < 32; i++) {
499 		if (api_flags & (1U << i))
500 			setbit(capa->enabled_api, i + 32 * api_index);
501 	}
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 			   struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_capa->api_index);
512 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "capa flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_capa, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static void
532 iwm_fw_info_free(struct iwm_fw_info *fw)
533 {
534 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 	fw->fw_fp = NULL;
536 	memset(fw->img, 0, sizeof(fw->img));
537 }
538 
539 static int
540 iwm_read_firmware(struct iwm_softc *sc)
541 {
542 	struct iwm_fw_info *fw = &sc->sc_fw;
543 	const struct iwm_tlv_ucode_header *uhdr;
544 	const struct iwm_ucode_tlv *tlv;
545 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 	enum iwm_ucode_tlv_type tlv_type;
547 	const struct firmware *fwp;
548 	const uint8_t *data;
549 	uint32_t tlv_len;
550 	uint32_t usniffer_img;
551 	const uint8_t *tlv_data;
552 	uint32_t paging_mem_size;
553 	int num_of_cpus;
554 	int error = 0;
555 	size_t len;
556 
557 	/*
558 	 * Load firmware into driver memory.
559 	 * fw_fp will be set.
560 	 */
561 	fwp = firmware_get(sc->cfg->fw_name);
562 	if (fwp == NULL) {
563 		device_printf(sc->sc_dev,
564 		    "could not read firmware %s (error %d)\n",
565 		    sc->cfg->fw_name, error);
566 		goto out;
567 	}
568 	fw->fw_fp = fwp;
569 
570 	/* (Re-)Initialize default values. */
571 	capa->flags = 0;
572 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
573 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
574 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577 
578 	/*
579 	 * Parse firmware contents
580 	 */
581 
582 	uhdr = (const void *)fw->fw_fp->data;
583 	if (*(const uint32_t *)fw->fw_fp->data != 0
584 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 		device_printf(sc->sc_dev, "invalid firmware %s\n",
586 		    sc->cfg->fw_name);
587 		error = EINVAL;
588 		goto out;
589 	}
590 
591 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_API(le32toh(uhdr->ver)));
595 	data = uhdr->data;
596 	len = fw->fw_fp->datasize - sizeof(*uhdr);
597 
598 	while (len >= sizeof(*tlv)) {
599 		len -= sizeof(*tlv);
600 		tlv = (const void *)data;
601 
602 		tlv_len = le32toh(tlv->length);
603 		tlv_type = le32toh(tlv->type);
604 		tlv_data = tlv->data;
605 
606 		if (len < tlv_len) {
607 			device_printf(sc->sc_dev,
608 			    "firmware too short: %zu bytes\n",
609 			    len);
610 			error = EINVAL;
611 			goto parse_out;
612 		}
613 		len -= roundup2(tlv_len, 4);
614 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
615 
616 		switch ((int)tlv_type) {
617 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
618 			if (tlv_len != sizeof(uint32_t)) {
619 				device_printf(sc->sc_dev,
620 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 				    __func__, tlv_len);
622 				error = EINVAL;
623 				goto parse_out;
624 			}
625 			capa->max_probe_length =
626 			    le32_to_cpup((const uint32_t *)tlv_data);
627 			/* limit it to something sensible */
628 			if (capa->max_probe_length >
629 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
630 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 				    "ridiculous\n", __func__);
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			break;
637 		case IWM_UCODE_TLV_PAN:
638 			if (tlv_len) {
639 				device_printf(sc->sc_dev,
640 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 				    __func__, tlv_len);
642 				error = EINVAL;
643 				goto parse_out;
644 			}
645 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
646 			break;
647 		case IWM_UCODE_TLV_FLAGS:
648 			if (tlv_len < sizeof(uint32_t)) {
649 				device_printf(sc->sc_dev,
650 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 				    __func__, tlv_len);
652 				error = EINVAL;
653 				goto parse_out;
654 			}
655 			if (tlv_len % sizeof(uint32_t)) {
656 				device_printf(sc->sc_dev,
657 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 				    __func__, tlv_len);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			/*
663 			 * Apparently there can be many flags, but Linux driver
664 			 * parses only the first one, and so do we.
665 			 *
666 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 			 * Intentional or a bug?  Observations from
668 			 * current firmware file:
669 			 *  1) TLV_PAN is parsed first
670 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
671 			 * ==> this resets TLV_PAN to itself... hnnnk
672 			 */
673 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 			break;
675 		case IWM_UCODE_TLV_CSCHEME:
676 			if ((error = iwm_store_cscheme(sc,
677 			    tlv_data, tlv_len)) != 0) {
678 				device_printf(sc->sc_dev,
679 				    "%s: iwm_store_cscheme(): returned %d\n",
680 				    __func__, error);
681 				goto parse_out;
682 			}
683 			break;
684 		case IWM_UCODE_TLV_NUM_OF_CPU:
685 			if (tlv_len != sizeof(uint32_t)) {
686 				device_printf(sc->sc_dev,
687 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 				    __func__, tlv_len);
689 				error = EINVAL;
690 				goto parse_out;
691 			}
692 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 			if (num_of_cpus == 2) {
694 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
695 					TRUE;
696 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
697 					TRUE;
698 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
699 					TRUE;
700 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 				device_printf(sc->sc_dev,
702 				    "%s: Driver supports only 1 or 2 CPUs\n",
703 				    __func__);
704 				error = EINVAL;
705 				goto parse_out;
706 			}
707 			break;
708 		case IWM_UCODE_TLV_SEC_RT:
709 			if ((error = iwm_firmware_store_section(sc,
710 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 				    __func__, error);
714 				goto parse_out;
715 			}
716 			break;
717 		case IWM_UCODE_TLV_SEC_INIT:
718 			if ((error = iwm_firmware_store_section(sc,
719 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 				device_printf(sc->sc_dev,
721 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 				    __func__, error);
723 				goto parse_out;
724 			}
725 			break;
726 		case IWM_UCODE_TLV_SEC_WOWLAN:
727 			if ((error = iwm_firmware_store_section(sc,
728 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 				device_printf(sc->sc_dev,
730 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 				    __func__, error);
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_DEF_CALIB:
736 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 				    __func__, tlv_len,
740 				    sizeof(struct iwm_tlv_calib_data));
741 				error = EINVAL;
742 				goto parse_out;
743 			}
744 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: iwm_set_default_calib() failed: %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_PHY_SKU:
752 			if (tlv_len != sizeof(uint32_t)) {
753 				error = EINVAL;
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 				    __func__, tlv_len);
757 				goto parse_out;
758 			}
759 			sc->sc_fw.phy_config =
760 			    le32_to_cpup((const uint32_t *)tlv_data);
761 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
762 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
763 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
764 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
765 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
766 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
767 			break;
768 
769 		case IWM_UCODE_TLV_API_CHANGES_SET: {
770 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 				error = EINVAL;
772 				goto parse_out;
773 			}
774 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 				error = EINVAL;
776 				goto parse_out;
777 			}
778 			break;
779 		}
780 
781 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
782 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 				error = EINVAL;
784 				goto parse_out;
785 			}
786 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 				error = EINVAL;
788 				goto parse_out;
789 			}
790 			break;
791 		}
792 
793 		case IWM_UCODE_TLV_CMD_VERSIONS:
794 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
795 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
796 			/* ignore, not used by current driver */
797 			break;
798 
799 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
800 			if ((error = iwm_firmware_store_section(sc,
801 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
802 			    tlv_len)) != 0)
803 				goto parse_out;
804 			break;
805 
806 		case IWM_UCODE_TLV_PAGING:
807 			if (tlv_len != sizeof(uint32_t)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812 
813 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 			    "%s: Paging: paging enabled (size = %u bytes)\n",
815 			    __func__, paging_mem_size);
816 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 				device_printf(sc->sc_dev,
818 					"%s: Paging: driver supports up to %u bytes for paging image\n",
819 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 				error = EINVAL;
821 				goto out;
822 			}
823 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 				device_printf(sc->sc_dev,
825 				    "%s: Paging: image isn't multiple %u\n",
826 				    __func__, IWM_FW_PAGING_SIZE);
827 				error = EINVAL;
828 				goto out;
829 			}
830 
831 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
832 			    paging_mem_size;
833 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 			sc->sc_fw.img[usniffer_img].paging_mem_size =
835 			    paging_mem_size;
836 			break;
837 
838 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
839 			if (tlv_len != sizeof(uint32_t)) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			capa->n_scan_channels =
844 			    le32_to_cpup((const uint32_t *)tlv_data);
845 			break;
846 
847 		case IWM_UCODE_TLV_FW_VERSION:
848 			if (tlv_len != sizeof(uint32_t) * 3) {
849 				error = EINVAL;
850 				goto parse_out;
851 			}
852 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 			    "%u.%u.%u",
854 			    le32toh(((const uint32_t *)tlv_data)[0]),
855 			    le32toh(((const uint32_t *)tlv_data)[1]),
856 			    le32toh(((const uint32_t *)tlv_data)[2]));
857 			break;
858 
859 		case IWM_UCODE_TLV_FW_MEM_SEG:
860 			break;
861 
862 		default:
863 			device_printf(sc->sc_dev,
864 			    "%s: unknown firmware section %d, abort\n",
865 			    __func__, tlv_type);
866 			error = EINVAL;
867 			goto parse_out;
868 		}
869 	}
870 
871 	KASSERT(error == 0, ("unhandled error"));
872 
873  parse_out:
874 	if (error) {
875 		device_printf(sc->sc_dev, "firmware parse error %d, "
876 		    "section type %d\n", error, tlv_type);
877 	}
878 
879  out:
880 	if (error) {
881 		if (fw->fw_fp != NULL)
882 			iwm_fw_info_free(fw);
883 	}
884 
885 	return error;
886 }
887 
888 /*
889  * DMA resource routines
890  */
891 
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896 	/* Must be aligned on a 16-byte boundary. */
897 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
899 }
900 
901 /* tx scheduler rings.  not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905 	/* TX scheduler rings must be aligned on a 1KB boundary. */
906 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909 
910 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916 
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924 
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928 	bus_size_t size;
929 	size_t descsz;
930 	int count, i, error;
931 
932 	ring->cur = 0;
933 	if (sc->cfg->mqrx_supported) {
934 		count = IWM_RX_MQ_RING_COUNT;
935 		descsz = sizeof(uint64_t);
936 	} else {
937 		count = IWM_RX_LEGACY_RING_COUNT;
938 		descsz = sizeof(uint32_t);
939 	}
940 
941 	/* Allocate RX descriptors (256-byte aligned). */
942 	size = count * descsz;
943 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 	    256);
945 	if (error != 0) {
946 		device_printf(sc->sc_dev,
947 		    "could not allocate RX ring DMA memory\n");
948 		goto fail;
949 	}
950 	ring->desc = ring->free_desc_dma.vaddr;
951 
952 	/* Allocate RX status area (16-byte aligned). */
953 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 	    sizeof(*ring->stat), 16);
955 	if (error != 0) {
956 		device_printf(sc->sc_dev,
957 		    "could not allocate RX status DMA memory\n");
958 		goto fail;
959 	}
960 	ring->stat = ring->stat_dma.vaddr;
961 
962 	if (sc->cfg->mqrx_supported) {
963 		size = count * sizeof(uint32_t);
964 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 		    size, 256);
966 		if (error != 0) {
967 			device_printf(sc->sc_dev,
968 			    "could not allocate RX ring DMA memory\n");
969 			goto fail;
970 		}
971 	}
972 
973         /* Create RX buffer DMA tag. */
974         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977         if (error != 0) {
978                 device_printf(sc->sc_dev,
979                     "%s: could not create RX buf DMA tag, error %d\n",
980                     __func__, error);
981                 goto fail;
982         }
983 
984 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 	if (error != 0) {
987 		device_printf(sc->sc_dev,
988 		    "%s: could not create RX buf DMA map, error %d\n",
989 		    __func__, error);
990 		goto fail;
991 	}
992 
993 	/*
994 	 * Allocate and map RX buffers.
995 	 */
996 	for (i = 0; i < count; i++) {
997 		struct iwm_rx_data *data = &ring->data[i];
998 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 		if (error != 0) {
1000 			device_printf(sc->sc_dev,
1001 			    "%s: could not create RX buf DMA map, error %d\n",
1002 			    __func__, error);
1003 			goto fail;
1004 		}
1005 		data->m = NULL;
1006 
1007 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 			goto fail;
1009 		}
1010 	}
1011 	return 0;
1012 
1013 fail:	iwm_free_rx_ring(sc, ring);
1014 	return error;
1015 }
1016 
1017 static void
1018 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019 {
1020 	/* Reset the ring state */
1021 	ring->cur = 0;
1022 
1023 	/*
1024 	 * The hw rx ring index in shared memory must also be cleared,
1025 	 * otherwise the discrepancy can cause reprocessing chaos.
1026 	 */
1027 	if (sc->rxq.stat)
1028 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029 }
1030 
1031 static void
1032 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033 {
1034 	int count, i;
1035 
1036 	iwm_dma_contig_free(&ring->free_desc_dma);
1037 	iwm_dma_contig_free(&ring->stat_dma);
1038 	iwm_dma_contig_free(&ring->used_desc_dma);
1039 
1040 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1041 	    IWM_RX_LEGACY_RING_COUNT;
1042 
1043 	for (i = 0; i < count; i++) {
1044 		struct iwm_rx_data *data = &ring->data[i];
1045 
1046 		if (data->m != NULL) {
1047 			bus_dmamap_sync(ring->data_dmat, data->map,
1048 			    BUS_DMASYNC_POSTREAD);
1049 			bus_dmamap_unload(ring->data_dmat, data->map);
1050 			m_freem(data->m);
1051 			data->m = NULL;
1052 		}
1053 		if (data->map != NULL) {
1054 			bus_dmamap_destroy(ring->data_dmat, data->map);
1055 			data->map = NULL;
1056 		}
1057 	}
1058 	if (ring->spare_map != NULL) {
1059 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 		ring->spare_map = NULL;
1061 	}
1062 	if (ring->data_dmat != NULL) {
1063 		bus_dma_tag_destroy(ring->data_dmat);
1064 		ring->data_dmat = NULL;
1065 	}
1066 }
1067 
1068 static int
1069 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070 {
1071 	bus_addr_t paddr;
1072 	bus_size_t size;
1073 	size_t maxsize;
1074 	int nsegments;
1075 	int i, error;
1076 
1077 	ring->qid = qid;
1078 	ring->queued = 0;
1079 	ring->cur = 0;
1080 
1081 	/* Allocate TX descriptors (256-byte aligned). */
1082 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 	if (error != 0) {
1085 		device_printf(sc->sc_dev,
1086 		    "could not allocate TX ring DMA memory\n");
1087 		goto fail;
1088 	}
1089 	ring->desc = ring->desc_dma.vaddr;
1090 
1091 	/*
1092 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 	 * to allocate commands space for other rings.
1094 	 */
1095 	if (qid > IWM_CMD_QUEUE)
1096 		return 0;
1097 
1098 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 	if (error != 0) {
1101 		device_printf(sc->sc_dev,
1102 		    "could not allocate TX cmd DMA memory\n");
1103 		goto fail;
1104 	}
1105 	ring->cmd = ring->cmd_dma.vaddr;
1106 
1107 	/* FW commands may require more mapped space than packets. */
1108 	if (qid == IWM_CMD_QUEUE) {
1109 		maxsize = IWM_RBUF_SIZE;
1110 		nsegments = 1;
1111 	} else {
1112 		maxsize = MCLBYTES;
1113 		nsegments = IWM_MAX_SCATTER - 2;
1114 	}
1115 
1116 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 	if (error != 0) {
1120 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 		goto fail;
1122 	}
1123 
1124 	paddr = ring->cmd_dma.paddr;
1125 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 		struct iwm_tx_data *data = &ring->data[i];
1127 
1128 		data->cmd_paddr = paddr;
1129 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 		    + offsetof(struct iwm_tx_cmd, scratch);
1131 		paddr += sizeof(struct iwm_device_cmd);
1132 
1133 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 		if (error != 0) {
1135 			device_printf(sc->sc_dev,
1136 			    "could not create TX buf DMA map\n");
1137 			goto fail;
1138 		}
1139 	}
1140 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 	    ("invalid physical address"));
1142 	return 0;
1143 
1144 fail:	iwm_free_tx_ring(sc, ring);
1145 	return error;
1146 }
1147 
1148 static void
1149 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150 {
1151 	int i;
1152 
1153 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 		struct iwm_tx_data *data = &ring->data[i];
1155 
1156 		if (data->m != NULL) {
1157 			bus_dmamap_sync(ring->data_dmat, data->map,
1158 			    BUS_DMASYNC_POSTWRITE);
1159 			bus_dmamap_unload(ring->data_dmat, data->map);
1160 			m_freem(data->m);
1161 			data->m = NULL;
1162 		}
1163 	}
1164 	/* Clear TX descriptors. */
1165 	memset(ring->desc, 0, ring->desc_dma.size);
1166 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 	    BUS_DMASYNC_PREWRITE);
1168 	sc->qfullmsk &= ~(1 << ring->qid);
1169 	ring->queued = 0;
1170 	ring->cur = 0;
1171 
1172 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1173 		iwm_pcie_clear_cmd_in_flight(sc);
1174 }
1175 
1176 static void
1177 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179 	int i;
1180 
1181 	iwm_dma_contig_free(&ring->desc_dma);
1182 	iwm_dma_contig_free(&ring->cmd_dma);
1183 
1184 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 		struct iwm_tx_data *data = &ring->data[i];
1186 
1187 		if (data->m != NULL) {
1188 			bus_dmamap_sync(ring->data_dmat, data->map,
1189 			    BUS_DMASYNC_POSTWRITE);
1190 			bus_dmamap_unload(ring->data_dmat, data->map);
1191 			m_freem(data->m);
1192 			data->m = NULL;
1193 		}
1194 		if (data->map != NULL) {
1195 			bus_dmamap_destroy(ring->data_dmat, data->map);
1196 			data->map = NULL;
1197 		}
1198 	}
1199 	if (ring->data_dmat != NULL) {
1200 		bus_dma_tag_destroy(ring->data_dmat);
1201 		ring->data_dmat = NULL;
1202 	}
1203 }
1204 
1205 /*
1206  * High-level hardware frobbing routines
1207  */
1208 
1209 static void
1210 iwm_enable_interrupts(struct iwm_softc *sc)
1211 {
1212 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1213 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1214 }
1215 
1216 static void
1217 iwm_restore_interrupts(struct iwm_softc *sc)
1218 {
1219 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221 
1222 static void
1223 iwm_disable_interrupts(struct iwm_softc *sc)
1224 {
1225 	/* disable interrupts */
1226 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1227 
1228 	/* acknowledge all interrupts */
1229 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1230 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1231 }
1232 
1233 static void
1234 iwm_ict_reset(struct iwm_softc *sc)
1235 {
1236 	iwm_disable_interrupts(sc);
1237 
1238 	/* Reset ICT table. */
1239 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 	sc->ict_cur = 0;
1241 
1242 	/* Set physical address of ICT table (4KB aligned). */
1243 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1244 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1245 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1247 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1248 
1249 	/* Switch to ICT interrupt mode in driver. */
1250 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1251 
1252 	/* Re-enable interrupts. */
1253 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1254 	iwm_enable_interrupts(sc);
1255 }
1256 
1257 /* iwlwifi pcie/trans.c */
1258 
1259 /*
1260  * Since this .. hard-resets things, it's time to actually
1261  * mark the first vap (if any) as having no mac context.
1262  * It's annoying, but since the driver is potentially being
1263  * stop/start'ed whilst active (thanks openbsd port!) we
1264  * have to correctly track this.
1265  */
1266 static void
1267 iwm_stop_device(struct iwm_softc *sc)
1268 {
1269 	struct ieee80211com *ic = &sc->sc_ic;
1270 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 	int chnl, qid;
1272 	uint32_t mask = 0;
1273 
1274 	/* tell the device to stop sending interrupts */
1275 	iwm_disable_interrupts(sc);
1276 
1277 	/*
1278 	 * FreeBSD-local: mark the first vap as not-uploaded,
1279 	 * so the next transition through auth/assoc
1280 	 * will correctly populate the MAC context.
1281 	 */
1282 	if (vap) {
1283 		struct iwm_vap *iv = IWM_VAP(vap);
1284 		iv->phy_ctxt = NULL;
1285 		iv->is_uploaded = 0;
1286 	}
1287 	sc->sc_firmware_state = 0;
1288 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289 
1290 	/* device going down, Stop using ICT table */
1291 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292 
1293 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1294 
1295 	if (iwm_nic_lock(sc)) {
1296 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1297 
1298 		/* Stop each Tx DMA channel */
1299 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 			IWM_WRITE(sc,
1301 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1302 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1303 		}
1304 
1305 		/* Wait for DMA channels to be idle */
1306 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 		    5000)) {
1308 			device_printf(sc->sc_dev,
1309 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1310 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1311 		}
1312 		iwm_nic_unlock(sc);
1313 	}
1314 	iwm_pcie_rx_stop(sc);
1315 
1316 	/* Stop RX ring. */
1317 	iwm_reset_rx_ring(sc, &sc->rxq);
1318 
1319 	/* Reset all TX rings. */
1320 	for (qid = 0; qid < nitems(sc->txq); qid++)
1321 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322 
1323 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1324 		/* Power-down device's busmaster DMA clocks */
1325 		if (iwm_nic_lock(sc)) {
1326 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1327 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1328 			iwm_nic_unlock(sc);
1329 		}
1330 		DELAY(5);
1331 	}
1332 
1333 	/* Make sure (redundant) we've released our request to stay awake */
1334 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336 
1337 	/* Stop the device, and put it in low power state */
1338 	iwm_apm_stop(sc);
1339 
1340 	/* stop and reset the on-board processor */
1341 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1342 	DELAY(5000);
1343 
1344 	/*
1345 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 	 */
1347 	iwm_disable_interrupts(sc);
1348 
1349 	/*
1350 	 * Even if we stop the HW, we still want the RF kill
1351 	 * interrupt
1352 	 */
1353 	iwm_enable_rfkill_int(sc);
1354 	iwm_check_rfkill(sc);
1355 
1356 	iwm_prepare_card_hw(sc);
1357 }
1358 
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_nic_config(struct iwm_softc *sc)
1362 {
1363 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 	uint32_t reg_val = 0;
1365 	uint32_t phy_config = iwm_get_phy_config(sc);
1366 
1367 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1368 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1369 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1370 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1371 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1372 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1373 
1374 	/* SKU control */
1375 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1377 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1378 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1379 
1380 	/* radio configuration */
1381 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384 
1385 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1386 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1393 	    reg_val);
1394 
1395 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 	    radio_cfg_step, radio_cfg_dash);
1398 
1399 	/*
1400 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 	 * to lose ownership and not being able to obtain it back.
1403 	 */
1404 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1405 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1406 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1407 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1408 	}
1409 }
1410 
1411 static int
1412 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1413 {
1414 	int enabled;
1415 
1416 	if (!iwm_nic_lock(sc))
1417 		return EBUSY;
1418 
1419 	/* Stop RX DMA. */
1420 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1421 	/* Disable RX used and free queue operation. */
1422 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1423 
1424 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1425 	    sc->rxq.free_desc_dma.paddr);
1426 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1427 	    sc->rxq.used_desc_dma.paddr);
1428 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1429 	    sc->rxq.stat_dma.paddr);
1430 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1433 
1434 	/* We configure only queue 0 for now. */
1435 	enabled = ((1 << 0) << 16) | (1 << 0);
1436 
1437 	/* Enable RX DMA, 4KB buffer size. */
1438 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1439 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1440 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1441 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1442 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1443 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1444 
1445 	/* Enable RX DMA snooping. */
1446 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1447 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1448 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1449 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1450 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1451 
1452 	/* Enable the configured queue(s). */
1453 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1458 
1459 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1466 {
1467 
1468 	/* Stop Rx DMA */
1469 	iwm_pcie_rx_stop(sc);
1470 
1471 	if (!iwm_nic_lock(sc))
1472 		return EBUSY;
1473 
1474 	/* reset and flush pointers */
1475 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1477 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1479 
1480 	/* Set physical address of RX ring (256-byte aligned). */
1481 	IWM_WRITE(sc,
1482 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1483 	    sc->rxq.free_desc_dma.paddr >> 8);
1484 
1485 	/* Set physical address of RX status (16-byte aligned). */
1486 	IWM_WRITE(sc,
1487 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1488 
1489 	/* Enable Rx DMA
1490 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 	 *      the credit mechanism in 5000 HW RX FIFO
1493 	 * Direct rx interrupts to hosts
1494 	 * Rx buffer size 4 or 8k or 12k
1495 	 * RB timeout 0x10
1496 	 * 256 RBDs
1497 	 */
1498 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1500 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1501 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1502 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1503 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505 
1506 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507 
1508 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 	if (sc->cfg->host_interrupt_operation_mode)
1510 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1511 
1512 	iwm_nic_unlock(sc);
1513 
1514 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1515 
1516 	return 0;
1517 }
1518 
1519 static int
1520 iwm_nic_rx_init(struct iwm_softc *sc)
1521 {
1522 	if (sc->cfg->mqrx_supported)
1523 		return iwm_nic_rx_mq_init(sc);
1524 	else
1525 		return iwm_nic_rx_legacy_init(sc);
1526 }
1527 
1528 static int
1529 iwm_nic_tx_init(struct iwm_softc *sc)
1530 {
1531 	int qid;
1532 
1533 	if (!iwm_nic_lock(sc))
1534 		return EBUSY;
1535 
1536 	/* Deactivate TX scheduler. */
1537 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1538 
1539 	/* Set physical address of "keep warm" page (16-byte aligned). */
1540 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1541 
1542 	/* Initialize TX rings. */
1543 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 		struct iwm_tx_ring *txq = &sc->txq[qid];
1545 
1546 		/* Set physical address of TX ring (256-byte aligned). */
1547 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1548 		    txq->desc_dma.paddr >> 8);
1549 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1551 		    __func__,
1552 		    qid, txq->desc,
1553 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1554 	}
1555 
1556 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1557 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1558 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1559 
1560 	iwm_nic_unlock(sc);
1561 
1562 	return 0;
1563 }
1564 
1565 static int
1566 iwm_nic_init(struct iwm_softc *sc)
1567 {
1568 	int error;
1569 
1570 	iwm_apm_init(sc);
1571 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1572 		iwm_set_pwr(sc);
1573 
1574 	iwm_nic_config(sc);
1575 
1576 	if ((error = iwm_nic_rx_init(sc)) != 0)
1577 		return error;
1578 
1579 	/*
1580 	 * Ditto for TX, from iwn
1581 	 */
1582 	if ((error = iwm_nic_tx_init(sc)) != 0)
1583 		return error;
1584 
1585 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 	    "%s: shadow registers enabled\n", __func__);
1587 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1588 
1589 	return 0;
1590 }
1591 
1592 int
1593 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594 {
1595 	int qmsk;
1596 
1597 	qmsk = 1 << qid;
1598 
1599 	if (!iwm_nic_lock(sc)) {
1600 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 		    __func__, qid);
1602 		return EBUSY;
1603 	}
1604 
1605 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606 
1607 	if (qid == IWM_CMD_QUEUE) {
1608 		/* Disable the scheduler. */
1609 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1610 
1611 		/* Stop the TX queue prior to configuration. */
1612 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1613 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1614 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1615 
1616 		iwm_nic_unlock(sc);
1617 
1618 		/* Disable aggregations for this queue. */
1619 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1620 
1621 		if (!iwm_nic_lock(sc)) {
1622 			device_printf(sc->sc_dev,
1623 			    "%s: cannot enable txq %d\n", __func__, qid);
1624 			return EBUSY;
1625 		}
1626 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1627 		iwm_nic_unlock(sc);
1628 
1629 		iwm_write_mem32(sc,
1630 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 		/* Set scheduler window size and frame limit. */
1632 		iwm_write_mem32(sc,
1633 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634 		    sizeof(uint32_t),
1635 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639 
1640 		if (!iwm_nic_lock(sc)) {
1641 			device_printf(sc->sc_dev,
1642 			    "%s: cannot enable txq %d\n", __func__, qid);
1643 			return EBUSY;
1644 		}
1645 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1646 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1647 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1648 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1649 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1650 
1651 		/* Enable the scheduler for this queue. */
1652 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1653 	} else {
1654 		struct iwm_scd_txq_cfg_cmd cmd;
1655 		int error;
1656 
1657 		iwm_nic_unlock(sc);
1658 
1659 		memset(&cmd, 0, sizeof(cmd));
1660 		cmd.scd_queue = qid;
1661 		cmd.enable = 1;
1662 		cmd.sta_id = sta_id;
1663 		cmd.tx_fifo = fifo;
1664 		cmd.aggregate = 0;
1665 		cmd.window = IWM_FRAME_LIMIT;
1666 
1667 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1668 		    sizeof(cmd), &cmd);
1669 		if (error) {
1670 			device_printf(sc->sc_dev,
1671 			    "cannot enable txq %d\n", qid);
1672 			return error;
1673 		}
1674 
1675 		if (!iwm_nic_lock(sc))
1676 			return EBUSY;
1677 	}
1678 
1679 	iwm_nic_unlock(sc);
1680 
1681 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 	    __func__, qid, fifo);
1683 
1684 	return 0;
1685 }
1686 
1687 static int
1688 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689 {
1690 	int error, chnl;
1691 
1692 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694 
1695 	if (!iwm_nic_lock(sc))
1696 		return EBUSY;
1697 
1698 	iwm_ict_reset(sc);
1699 
1700 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1701 	if (scd_base_addr != 0 &&
1702 	    scd_base_addr != sc->scd_base_addr) {
1703 		device_printf(sc->sc_dev,
1704 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 		    __func__, sc->scd_base_addr, scd_base_addr);
1706 	}
1707 
1708 	iwm_nic_unlock(sc);
1709 
1710 	/* reset context data, TX status and translation data */
1711 	error = iwm_write_mem(sc,
1712 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1713 	    NULL, clear_dwords);
1714 	if (error)
1715 		return EBUSY;
1716 
1717 	if (!iwm_nic_lock(sc))
1718 		return EBUSY;
1719 
1720 	/* Set physical address of TX scheduler rings (1KB aligned). */
1721 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1722 
1723 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1724 
1725 	iwm_nic_unlock(sc);
1726 
1727 	/* enable command channel */
1728 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 	if (error)
1730 		return error;
1731 
1732 	if (!iwm_nic_lock(sc))
1733 		return EBUSY;
1734 
1735 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736 
1737 	/* Enable DMA channels. */
1738 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1739 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1740 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1742 	}
1743 
1744 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1745 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1746 
1747 	iwm_nic_unlock(sc);
1748 
1749 	/* Enable L1-Active */
1750 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1751 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1752 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1753 	}
1754 
1755 	return error;
1756 }
1757 
1758 /*
1759  * NVM read access and content parsing.  We do not support
1760  * external NVM or writing NVM.
1761  * iwlwifi/mvm/nvm.c
1762  */
1763 
1764 /* Default NVM size to read */
1765 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1766 
1767 #define IWM_NVM_WRITE_OPCODE 1
1768 #define IWM_NVM_READ_OPCODE 0
1769 
1770 /* load nvm chunk response */
1771 enum {
1772 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1773 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1774 };
1775 
1776 static int
1777 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779 {
1780 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 		.offset = htole16(offset),
1782 		.length = htole16(length),
1783 		.type = htole16(section),
1784 		.op_code = IWM_NVM_READ_OPCODE,
1785 	};
1786 	struct iwm_nvm_access_resp *nvm_resp;
1787 	struct iwm_rx_packet *pkt;
1788 	struct iwm_host_cmd cmd = {
1789 		.id = IWM_NVM_ACCESS_CMD,
1790 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1791 		.data = { &nvm_access_cmd, },
1792 	};
1793 	int ret, bytes_read, offset_read;
1794 	uint8_t *resp_data;
1795 
1796 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797 
1798 	ret = iwm_send_cmd(sc, &cmd);
1799 	if (ret) {
1800 		device_printf(sc->sc_dev,
1801 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	pkt = cmd.resp_pkt;
1806 
1807 	/* Extract NVM response */
1808 	nvm_resp = (void *)pkt->data;
1809 	ret = le16toh(nvm_resp->status);
1810 	bytes_read = le16toh(nvm_resp->length);
1811 	offset_read = le16toh(nvm_resp->offset);
1812 	resp_data = nvm_resp->data;
1813 	if (ret) {
1814 		if ((offset != 0) &&
1815 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1816 			/*
1817 			 * meaning of NOT_VALID_ADDRESS:
1818 			 * driver try to read chunk from address that is
1819 			 * multiple of 2K and got an error since addr is empty.
1820 			 * meaning of (offset != 0): driver already
1821 			 * read valid data from another chunk so this case
1822 			 * is not an error.
1823 			 */
1824 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 				    offset);
1827 			*len = 0;
1828 			ret = 0;
1829 		} else {
1830 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 				    "NVM access command failed with status %d\n", ret);
1832 			ret = EIO;
1833 		}
1834 		goto exit;
1835 	}
1836 
1837 	if (offset_read != offset) {
1838 		device_printf(sc->sc_dev,
1839 		    "NVM ACCESS response with invalid offset %d\n",
1840 		    offset_read);
1841 		ret = EINVAL;
1842 		goto exit;
1843 	}
1844 
1845 	if (bytes_read > length) {
1846 		device_printf(sc->sc_dev,
1847 		    "NVM ACCESS response with too much data "
1848 		    "(%d bytes requested, %d bytes received)\n",
1849 		    length, bytes_read);
1850 		ret = EINVAL;
1851 		goto exit;
1852 	}
1853 
1854 	/* Write data to NVM */
1855 	memcpy(data + offset, resp_data, bytes_read);
1856 	*len = bytes_read;
1857 
1858  exit:
1859 	iwm_free_resp(sc, &cmd);
1860 	return ret;
1861 }
1862 
1863 /*
1864  * Reads an NVM section completely.
1865  * NICs prior to 7000 family don't have a real NVM, but just read
1866  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867  * by uCode, we need to manually check in this case that we don't
1868  * overflow and try to read more than the EEPROM size.
1869  * For 7000 family NICs, we supply the maximal size we can read, and
1870  * the uCode fills the response with as much data as we can,
1871  * without overflowing, so no check is needed.
1872  */
1873 static int
1874 iwm_nvm_read_section(struct iwm_softc *sc,
1875 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876 {
1877 	uint16_t seglen, length, offset = 0;
1878 	int ret;
1879 
1880 	/* Set nvm section read length */
1881 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1882 
1883 	seglen = length;
1884 
1885 	/* Read the NVM until exhausted (reading less than requested) */
1886 	while (seglen == length) {
1887 		/* Check no memory assumptions fail and cause an overflow */
1888 		if ((size_read + offset + length) >
1889 		    sc->cfg->eeprom_size) {
1890 			device_printf(sc->sc_dev,
1891 			    "EEPROM size is too small for NVM\n");
1892 			return ENOBUFS;
1893 		}
1894 
1895 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 		if (ret) {
1897 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 				    "Cannot read NVM from section %d offset %d, length %d\n",
1899 				    section, offset, length);
1900 			return ret;
1901 		}
1902 		offset += seglen;
1903 	}
1904 
1905 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 		    "NVM section %d read completed\n", section);
1907 	*len = offset;
1908 	return 0;
1909 }
1910 
1911 /*
1912  * BEGIN IWM_NVM_PARSE
1913  */
1914 
1915 /* iwlwifi/iwl-nvm-parse.c */
1916 
1917 /*
1918  * Translate EEPROM flags to net80211.
1919  */
1920 static uint32_t
1921 iwm_eeprom_channel_flags(uint16_t ch_flags)
1922 {
1923 	uint32_t nflags;
1924 
1925 	nflags = 0;
1926 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1927 		nflags |= IEEE80211_CHAN_PASSIVE;
1928 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1929 		nflags |= IEEE80211_CHAN_NOADHOC;
1930 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1931 		nflags |= IEEE80211_CHAN_DFS;
1932 		/* Just in case. */
1933 		nflags |= IEEE80211_CHAN_NOADHOC;
1934 	}
1935 
1936 	return (nflags);
1937 }
1938 
1939 static void
1940 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1941     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1942     const uint8_t bands[])
1943 {
1944 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1945 	uint32_t nflags;
1946 	uint16_t ch_flags;
1947 	uint8_t ieee;
1948 	int error;
1949 
1950 	for (; ch_idx < ch_num; ch_idx++) {
1951 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1952 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1953 			ieee = iwm_nvm_channels[ch_idx];
1954 		else
1955 			ieee = iwm_nvm_channels_8000[ch_idx];
1956 
1957 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1958 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1959 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1960 			    ieee, ch_flags,
1961 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1962 			    "5.2" : "2.4");
1963 			continue;
1964 		}
1965 
1966 		nflags = iwm_eeprom_channel_flags(ch_flags);
1967 		error = ieee80211_add_channel(chans, maxchans, nchans,
1968 		    ieee, 0, 0, nflags, bands);
1969 		if (error != 0)
1970 			break;
1971 
1972 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1973 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1974 		    ieee, ch_flags,
1975 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1976 		    "5.2" : "2.4");
1977 	}
1978 }
1979 
1980 static void
1981 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1982     struct ieee80211_channel chans[])
1983 {
1984 	struct iwm_softc *sc = ic->ic_softc;
1985 	struct iwm_nvm_data *data = sc->nvm_data;
1986 	uint8_t bands[IEEE80211_MODE_BYTES];
1987 	size_t ch_num;
1988 
1989 	memset(bands, 0, sizeof(bands));
1990 	/* 1-13: 11b/g channels. */
1991 	setbit(bands, IEEE80211_MODE_11B);
1992 	setbit(bands, IEEE80211_MODE_11G);
1993 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1994 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1995 
1996 	/* 14: 11b channel only. */
1997 	clrbit(bands, IEEE80211_MODE_11G);
1998 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1999 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2000 
2001 	if (data->sku_cap_band_52GHz_enable) {
2002 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2003 			ch_num = nitems(iwm_nvm_channels);
2004 		else
2005 			ch_num = nitems(iwm_nvm_channels_8000);
2006 		memset(bands, 0, sizeof(bands));
2007 		setbit(bands, IEEE80211_MODE_11A);
2008 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2009 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2010 	}
2011 }
2012 
2013 static void
2014 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2015 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2016 {
2017 	const uint8_t *hw_addr;
2018 
2019 	if (mac_override) {
2020 		static const uint8_t reserved_mac[] = {
2021 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2022 		};
2023 
2024 		hw_addr = (const uint8_t *)(mac_override +
2025 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2026 
2027 		/*
2028 		 * Store the MAC address from MAO section.
2029 		 * No byte swapping is required in MAO section
2030 		 */
2031 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2032 
2033 		/*
2034 		 * Force the use of the OTP MAC address in case of reserved MAC
2035 		 * address in the NVM, or if address is given but invalid.
2036 		 */
2037 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2038 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2039 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2040 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2041 			return;
2042 
2043 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2044 		    "%s: mac address from nvm override section invalid\n",
2045 		    __func__);
2046 	}
2047 
2048 	if (nvm_hw) {
2049 		/* read the mac address from WFMP registers */
2050 		uint32_t mac_addr0 =
2051 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2052 		uint32_t mac_addr1 =
2053 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2054 
2055 		hw_addr = (const uint8_t *)&mac_addr0;
2056 		data->hw_addr[0] = hw_addr[3];
2057 		data->hw_addr[1] = hw_addr[2];
2058 		data->hw_addr[2] = hw_addr[1];
2059 		data->hw_addr[3] = hw_addr[0];
2060 
2061 		hw_addr = (const uint8_t *)&mac_addr1;
2062 		data->hw_addr[4] = hw_addr[1];
2063 		data->hw_addr[5] = hw_addr[0];
2064 
2065 		return;
2066 	}
2067 
2068 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2069 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2070 }
2071 
2072 static int
2073 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2074 	    const uint16_t *phy_sku)
2075 {
2076 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2077 		return le16_to_cpup(nvm_sw + IWM_SKU);
2078 
2079 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2080 }
2081 
2082 static int
2083 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2084 {
2085 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2086 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2087 	else
2088 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2089 						IWM_NVM_VERSION_8000));
2090 }
2091 
2092 static int
2093 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2094 		  const uint16_t *phy_sku)
2095 {
2096         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2097                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2098 
2099         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2100 }
2101 
2102 static int
2103 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2104 {
2105 	int n_hw_addr;
2106 
2107 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2108 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2109 
2110 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2111 
2112         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2113 }
2114 
2115 static void
2116 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2117 		  uint32_t radio_cfg)
2118 {
2119 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2120 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2121 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2122 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2123 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2124 		return;
2125 	}
2126 
2127 	/* set the radio configuration for family 8000 */
2128 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2129 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2130 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2131 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2132 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2133 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2134 }
2135 
2136 static int
2137 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2138 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2139 {
2140 #ifdef notyet /* for FAMILY 9000 */
2141 	if (cfg->mac_addr_from_csr) {
2142 		iwm_set_hw_address_from_csr(sc, data);
2143         } else
2144 #endif
2145 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2146 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2147 
2148 		/* The byte order is little endian 16 bit, meaning 214365 */
2149 		data->hw_addr[0] = hw_addr[1];
2150 		data->hw_addr[1] = hw_addr[0];
2151 		data->hw_addr[2] = hw_addr[3];
2152 		data->hw_addr[3] = hw_addr[2];
2153 		data->hw_addr[4] = hw_addr[5];
2154 		data->hw_addr[5] = hw_addr[4];
2155 	} else {
2156 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2157 	}
2158 
2159 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2160 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2161 		return EINVAL;
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 static struct iwm_nvm_data *
2168 iwm_parse_nvm_data(struct iwm_softc *sc,
2169 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2170 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2171 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2172 {
2173 	struct iwm_nvm_data *data;
2174 	uint32_t sku, radio_cfg;
2175 	uint16_t lar_config;
2176 
2177 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2178 		data = malloc(sizeof(*data) +
2179 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2180 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2181 	} else {
2182 		data = malloc(sizeof(*data) +
2183 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2184 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2185 	}
2186 	if (!data)
2187 		return NULL;
2188 
2189 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2190 
2191 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2192 	iwm_set_radio_cfg(sc, data, radio_cfg);
2193 
2194 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2195 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2196 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2197 	data->sku_cap_11n_enable = 0;
2198 
2199 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2200 
2201 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2202 		/* TODO: use IWL_NVM_EXT */
2203 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2204 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2205 				       IWM_NVM_LAR_OFFSET_8000;
2206 
2207 		lar_config = le16_to_cpup(regulatory + lar_offset);
2208 		data->lar_enabled = !!(lar_config &
2209 				       IWM_NVM_LAR_ENABLED_8000);
2210 	}
2211 
2212 	/* If no valid mac address was found - bail out */
2213 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2214 		free(data, M_DEVBUF);
2215 		return NULL;
2216 	}
2217 
2218 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2219 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2220 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2221 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2222 	} else {
2223 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2224 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2225 	}
2226 
2227 	return data;
2228 }
2229 
2230 static void
2231 iwm_free_nvm_data(struct iwm_nvm_data *data)
2232 {
2233 	if (data != NULL)
2234 		free(data, M_DEVBUF);
2235 }
2236 
2237 static struct iwm_nvm_data *
2238 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2239 {
2240 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2241 
2242 	/* Checking for required sections */
2243 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2244 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2245 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2246 			device_printf(sc->sc_dev,
2247 			    "Can't parse empty OTP/NVM sections\n");
2248 			return NULL;
2249 		}
2250 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2251 		/* SW and REGULATORY sections are mandatory */
2252 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2253 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2254 			device_printf(sc->sc_dev,
2255 			    "Can't parse empty OTP/NVM sections\n");
2256 			return NULL;
2257 		}
2258 		/* MAC_OVERRIDE or at least HW section must exist */
2259 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2260 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2261 			device_printf(sc->sc_dev,
2262 			    "Can't parse mac_address, empty sections\n");
2263 			return NULL;
2264 		}
2265 
2266 		/* PHY_SKU section is mandatory in B0 */
2267 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2268 			device_printf(sc->sc_dev,
2269 			    "Can't parse phy_sku in B0, empty sections\n");
2270 			return NULL;
2271 		}
2272 	} else {
2273 		panic("unknown device family %d\n", sc->cfg->device_family);
2274 	}
2275 
2276 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2277 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2278 	calib = (const uint16_t *)
2279 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2280 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2281 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2282 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2283 	mac_override = (const uint16_t *)
2284 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2285 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2286 
2287 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2288 	    phy_sku, regulatory);
2289 }
2290 
2291 static int
2292 iwm_nvm_init(struct iwm_softc *sc)
2293 {
2294 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2295 	int i, ret, section;
2296 	uint32_t size_read = 0;
2297 	uint8_t *nvm_buffer, *temp;
2298 	uint16_t len;
2299 
2300 	memset(nvm_sections, 0, sizeof(nvm_sections));
2301 
2302 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2303 		return EINVAL;
2304 
2305 	/* load NVM values from nic */
2306 	/* Read From FW NVM */
2307 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2308 
2309 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2310 	if (!nvm_buffer)
2311 		return ENOMEM;
2312 	for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2313 		/* we override the constness for initial read */
2314 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2315 					   &len, size_read);
2316 		if (ret)
2317 			continue;
2318 		size_read += len;
2319 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2320 		if (!temp) {
2321 			ret = ENOMEM;
2322 			break;
2323 		}
2324 		memcpy(temp, nvm_buffer, len);
2325 
2326 		nvm_sections[section].data = temp;
2327 		nvm_sections[section].length = len;
2328 	}
2329 	if (!size_read)
2330 		device_printf(sc->sc_dev, "OTP is blank\n");
2331 	free(nvm_buffer, M_DEVBUF);
2332 
2333 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2334 	if (!sc->nvm_data)
2335 		return EINVAL;
2336 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2337 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2338 
2339 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2340 		if (nvm_sections[i].data != NULL)
2341 			free(nvm_sections[i].data, M_DEVBUF);
2342 	}
2343 
2344 	return 0;
2345 }
2346 
2347 static int
2348 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2349 	const struct iwm_fw_desc *section)
2350 {
2351 	struct iwm_dma_info *dma = &sc->fw_dma;
2352 	uint8_t *v_addr;
2353 	bus_addr_t p_addr;
2354 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2355 	int ret = 0;
2356 
2357 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2358 		    "%s: [%d] uCode section being loaded...\n",
2359 		    __func__, section_num);
2360 
2361 	v_addr = dma->vaddr;
2362 	p_addr = dma->paddr;
2363 
2364 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2365 		uint32_t copy_size, dst_addr;
2366 		int extended_addr = FALSE;
2367 
2368 		copy_size = MIN(chunk_sz, section->len - offset);
2369 		dst_addr = section->offset + offset;
2370 
2371 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2372 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2373 			extended_addr = TRUE;
2374 
2375 		if (extended_addr)
2376 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2377 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2378 
2379 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2380 		    copy_size);
2381 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2382 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2383 						   copy_size);
2384 
2385 		if (extended_addr)
2386 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2387 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2388 
2389 		if (ret) {
2390 			device_printf(sc->sc_dev,
2391 			    "%s: Could not load the [%d] uCode section\n",
2392 			    __func__, section_num);
2393 			break;
2394 		}
2395 	}
2396 
2397 	return ret;
2398 }
2399 
2400 /*
2401  * ucode
2402  */
2403 static int
2404 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2405 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2406 {
2407 	sc->sc_fw_chunk_done = 0;
2408 
2409 	if (!iwm_nic_lock(sc))
2410 		return EBUSY;
2411 
2412 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2413 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2414 
2415 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2416 	    dst_addr);
2417 
2418 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2419 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2420 
2421 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2422 	    (iwm_get_dma_hi_addr(phy_addr)
2423 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2424 
2425 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2426 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2427 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2428 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2429 
2430 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2432 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2433 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2434 
2435 	iwm_nic_unlock(sc);
2436 
2437 	/* wait up to 5s for this segment to load */
2438 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2439 
2440 	if (!sc->sc_fw_chunk_done) {
2441 		device_printf(sc->sc_dev,
2442 		    "fw chunk addr 0x%x len %d failed to load\n",
2443 		    dst_addr, byte_cnt);
2444 		return ETIMEDOUT;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 static int
2451 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2452 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2453 {
2454 	int shift_param;
2455 	int i, ret = 0, sec_num = 0x1;
2456 	uint32_t val, last_read_idx = 0;
2457 
2458 	if (cpu == 1) {
2459 		shift_param = 0;
2460 		*first_ucode_section = 0;
2461 	} else {
2462 		shift_param = 16;
2463 		(*first_ucode_section)++;
2464 	}
2465 
2466 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2467 		last_read_idx = i;
2468 
2469 		/*
2470 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2471 		 * CPU1 to CPU2.
2472 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2473 		 * CPU2 non paged to CPU2 paging sec.
2474 		 */
2475 		if (!image->sec[i].data ||
2476 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2477 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2478 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2479 				    "Break since Data not valid or Empty section, sec = %d\n",
2480 				    i);
2481 			break;
2482 		}
2483 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2484 		if (ret)
2485 			return ret;
2486 
2487 		/* Notify the ucode of the loaded section number and status */
2488 		if (iwm_nic_lock(sc)) {
2489 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2490 			val = val | (sec_num << shift_param);
2491 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2492 			sec_num = (sec_num << 1) | 0x1;
2493 			iwm_nic_unlock(sc);
2494 		}
2495 	}
2496 
2497 	*first_ucode_section = last_read_idx;
2498 
2499 	iwm_enable_interrupts(sc);
2500 
2501 	if (iwm_nic_lock(sc)) {
2502 		if (cpu == 1)
2503 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2504 		else
2505 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2506 		iwm_nic_unlock(sc);
2507 	}
2508 
2509 	return 0;
2510 }
2511 
2512 static int
2513 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2514 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2515 {
2516 	int shift_param;
2517 	int i, ret = 0;
2518 	uint32_t last_read_idx = 0;
2519 
2520 	if (cpu == 1) {
2521 		shift_param = 0;
2522 		*first_ucode_section = 0;
2523 	} else {
2524 		shift_param = 16;
2525 		(*first_ucode_section)++;
2526 	}
2527 
2528 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2529 		last_read_idx = i;
2530 
2531 		/*
2532 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2533 		 * CPU1 to CPU2.
2534 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2535 		 * CPU2 non paged to CPU2 paging sec.
2536 		 */
2537 		if (!image->sec[i].data ||
2538 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2539 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2540 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2541 				    "Break since Data not valid or Empty section, sec = %d\n",
2542 				     i);
2543 			break;
2544 		}
2545 
2546 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2547 		if (ret)
2548 			return ret;
2549 	}
2550 
2551 	*first_ucode_section = last_read_idx;
2552 
2553 	return 0;
2554 
2555 }
2556 
2557 static int
2558 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2559 {
2560 	int ret = 0;
2561 	int first_ucode_section;
2562 
2563 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2564 		     image->is_dual_cpus ? "Dual" : "Single");
2565 
2566 	/* load to FW the binary non secured sections of CPU1 */
2567 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2568 	if (ret)
2569 		return ret;
2570 
2571 	if (image->is_dual_cpus) {
2572 		/* set CPU2 header address */
2573 		if (iwm_nic_lock(sc)) {
2574 			iwm_write_prph(sc,
2575 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2576 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2577 			iwm_nic_unlock(sc);
2578 		}
2579 
2580 		/* load to FW the binary sections of CPU2 */
2581 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2582 						 &first_ucode_section);
2583 		if (ret)
2584 			return ret;
2585 	}
2586 
2587 	iwm_enable_interrupts(sc);
2588 
2589 	/* release CPU reset */
2590 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2591 
2592 	return 0;
2593 }
2594 
2595 int
2596 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2597 	const struct iwm_fw_img *image)
2598 {
2599 	int ret = 0;
2600 	int first_ucode_section;
2601 
2602 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2603 		    image->is_dual_cpus ? "Dual" : "Single");
2604 
2605 	/* configure the ucode to be ready to get the secured image */
2606 	/* release CPU reset */
2607 	if (iwm_nic_lock(sc)) {
2608 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2609 		    IWM_RELEASE_CPU_RESET_BIT);
2610 		iwm_nic_unlock(sc);
2611 	}
2612 
2613 	/* load to FW the binary Secured sections of CPU1 */
2614 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2615 	    &first_ucode_section);
2616 	if (ret)
2617 		return ret;
2618 
2619 	/* load to FW the binary sections of CPU2 */
2620 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2621 	    &first_ucode_section);
2622 }
2623 
2624 /* XXX Get rid of this definition */
2625 static inline void
2626 iwm_enable_fw_load_int(struct iwm_softc *sc)
2627 {
2628 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2629 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2630 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2631 }
2632 
2633 /* XXX Add proper rfkill support code */
2634 static int
2635 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2636 {
2637 	int ret;
2638 
2639 	/* This may fail if AMT took ownership of the device */
2640 	if (iwm_prepare_card_hw(sc)) {
2641 		device_printf(sc->sc_dev,
2642 		    "%s: Exit HW not ready\n", __func__);
2643 		ret = EIO;
2644 		goto out;
2645 	}
2646 
2647 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2648 
2649 	iwm_disable_interrupts(sc);
2650 
2651 	/* make sure rfkill handshake bits are cleared */
2652 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2653 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2654 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2655 
2656 	/* clear (again), then enable host interrupts */
2657 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2658 
2659 	ret = iwm_nic_init(sc);
2660 	if (ret) {
2661 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2662 		goto out;
2663 	}
2664 
2665 	/*
2666 	 * Now, we load the firmware and don't want to be interrupted, even
2667 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2668 	 * FH_TX interrupt which is needed to load the firmware). If the
2669 	 * RF-Kill switch is toggled, we will find out after having loaded
2670 	 * the firmware and return the proper value to the caller.
2671 	 */
2672 	iwm_enable_fw_load_int(sc);
2673 
2674 	/* really make sure rfkill handshake bits are cleared */
2675 	/* maybe we should write a few times more?  just to make sure */
2676 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2677 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678 
2679 	/* Load the given image to the HW */
2680 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2681 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2682 	else
2683 		ret = iwm_pcie_load_given_ucode(sc, fw);
2684 
2685 	/* XXX re-check RF-Kill state */
2686 
2687 out:
2688 	return ret;
2689 }
2690 
2691 static int
2692 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2693 {
2694 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2695 		.valid = htole32(valid_tx_ant),
2696 	};
2697 
2698 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2699 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2700 }
2701 
2702 /* iwlwifi: mvm/fw.c */
2703 static int
2704 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2705 {
2706 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2707 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2708 
2709 	/* Set parameters */
2710 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2711 	phy_cfg_cmd.calib_control.event_trigger =
2712 	    sc->sc_default_calib[ucode_type].event_trigger;
2713 	phy_cfg_cmd.calib_control.flow_trigger =
2714 	    sc->sc_default_calib[ucode_type].flow_trigger;
2715 
2716 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2717 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2718 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2719 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2720 }
2721 
2722 static int
2723 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2724 {
2725 	struct iwm_alive_data *alive_data = data;
2726 	struct iwm_alive_resp_v3 *palive3;
2727 	struct iwm_alive_resp *palive;
2728 	struct iwm_umac_alive *umac;
2729 	struct iwm_lmac_alive *lmac1;
2730 	struct iwm_lmac_alive *lmac2 = NULL;
2731 	uint16_t status;
2732 
2733 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2734 		palive = (void *)pkt->data;
2735 		umac = &palive->umac_data;
2736 		lmac1 = &palive->lmac_data[0];
2737 		lmac2 = &palive->lmac_data[1];
2738 		status = le16toh(palive->status);
2739 	} else {
2740 		palive3 = (void *)pkt->data;
2741 		umac = &palive3->umac_data;
2742 		lmac1 = &palive3->lmac_data;
2743 		status = le16toh(palive3->status);
2744 	}
2745 
2746 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2747 	if (lmac2)
2748 		sc->error_event_table[1] =
2749 			le32toh(lmac2->error_event_table_ptr);
2750 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2751 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2752 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2753 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2754 	if (sc->umac_error_event_table)
2755 		sc->support_umac_log = TRUE;
2756 
2757 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2758 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2759 		    status, lmac1->ver_type, lmac1->ver_subtype);
2760 
2761 	if (lmac2)
2762 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2763 
2764 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2765 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2766 		    le32toh(umac->umac_major),
2767 		    le32toh(umac->umac_minor));
2768 
2769 	return TRUE;
2770 }
2771 
2772 static int
2773 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2774 	struct iwm_rx_packet *pkt, void *data)
2775 {
2776 	struct iwm_phy_db *phy_db = data;
2777 
2778 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2779 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2780 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2781 			    __func__, pkt->hdr.code);
2782 		}
2783 		return TRUE;
2784 	}
2785 
2786 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2787 		device_printf(sc->sc_dev,
2788 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2789 	}
2790 
2791 	return FALSE;
2792 }
2793 
2794 static int
2795 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2796 	enum iwm_ucode_type ucode_type)
2797 {
2798 	struct iwm_notification_wait alive_wait;
2799 	struct iwm_alive_data alive_data;
2800 	const struct iwm_fw_img *fw;
2801 	enum iwm_ucode_type old_type = sc->cur_ucode;
2802 	int error;
2803 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2804 
2805 	fw = &sc->sc_fw.img[ucode_type];
2806 	sc->cur_ucode = ucode_type;
2807 	sc->ucode_loaded = FALSE;
2808 
2809 	memset(&alive_data, 0, sizeof(alive_data));
2810 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2811 				   alive_cmd, nitems(alive_cmd),
2812 				   iwm_alive_fn, &alive_data);
2813 
2814 	error = iwm_start_fw(sc, fw);
2815 	if (error) {
2816 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2817 		sc->cur_ucode = old_type;
2818 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2819 		return error;
2820 	}
2821 
2822 	/*
2823 	 * Some things may run in the background now, but we
2824 	 * just wait for the ALIVE notification here.
2825 	 */
2826 	IWM_UNLOCK(sc);
2827 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2828 				      IWM_UCODE_ALIVE_TIMEOUT);
2829 	IWM_LOCK(sc);
2830 	if (error) {
2831 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2832 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2833 			if (iwm_nic_lock(sc)) {
2834 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2835 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2836 				iwm_nic_unlock(sc);
2837 			}
2838 			device_printf(sc->sc_dev,
2839 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2840 			    a, b);
2841 		}
2842 		sc->cur_ucode = old_type;
2843 		return error;
2844 	}
2845 
2846 	if (!alive_data.valid) {
2847 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2848 		    __func__);
2849 		sc->cur_ucode = old_type;
2850 		return EIO;
2851 	}
2852 
2853 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2854 
2855 	/*
2856 	 * configure and operate fw paging mechanism.
2857 	 * driver configures the paging flow only once, CPU2 paging image
2858 	 * included in the IWM_UCODE_INIT image.
2859 	 */
2860 	if (fw->paging_mem_size) {
2861 		error = iwm_save_fw_paging(sc, fw);
2862 		if (error) {
2863 			device_printf(sc->sc_dev,
2864 			    "%s: failed to save the FW paging image\n",
2865 			    __func__);
2866 			return error;
2867 		}
2868 
2869 		error = iwm_send_paging_cmd(sc, fw);
2870 		if (error) {
2871 			device_printf(sc->sc_dev,
2872 			    "%s: failed to send the paging cmd\n", __func__);
2873 			iwm_free_fw_paging(sc);
2874 			return error;
2875 		}
2876 	}
2877 
2878 	if (!error)
2879 		sc->ucode_loaded = TRUE;
2880 	return error;
2881 }
2882 
2883 /*
2884  * mvm misc bits
2885  */
2886 
2887 /*
2888  * follows iwlwifi/fw.c
2889  */
2890 static int
2891 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2892 {
2893 	struct iwm_notification_wait calib_wait;
2894 	static const uint16_t init_complete[] = {
2895 		IWM_INIT_COMPLETE_NOTIF,
2896 		IWM_CALIB_RES_NOTIF_PHY_DB
2897 	};
2898 	int ret;
2899 
2900 	/* do not operate with rfkill switch turned on */
2901 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2902 		device_printf(sc->sc_dev,
2903 		    "radio is disabled by hardware switch\n");
2904 		return EPERM;
2905 	}
2906 
2907 	iwm_init_notification_wait(sc->sc_notif_wait,
2908 				   &calib_wait,
2909 				   init_complete,
2910 				   nitems(init_complete),
2911 				   iwm_wait_phy_db_entry,
2912 				   sc->sc_phy_db);
2913 
2914 	/* Will also start the device */
2915 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2916 	if (ret) {
2917 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2918 		    ret);
2919 		goto error;
2920 	}
2921 
2922 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2923 		ret = iwm_send_bt_init_conf(sc);
2924 		if (ret) {
2925 			device_printf(sc->sc_dev,
2926 			    "failed to send bt coex configuration: %d\n", ret);
2927 			goto error;
2928 		}
2929 	}
2930 
2931 	if (justnvm) {
2932 		/* Read nvm */
2933 		ret = iwm_nvm_init(sc);
2934 		if (ret) {
2935 			device_printf(sc->sc_dev, "failed to read nvm\n");
2936 			goto error;
2937 		}
2938 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2939 		goto error;
2940 	}
2941 
2942 	/* Send TX valid antennas before triggering calibrations */
2943 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2944 	if (ret) {
2945 		device_printf(sc->sc_dev,
2946 		    "failed to send antennas before calibration: %d\n", ret);
2947 		goto error;
2948 	}
2949 
2950 	/*
2951 	 * Send phy configurations command to init uCode
2952 	 * to start the 16.0 uCode init image internal calibrations.
2953 	 */
2954 	ret = iwm_send_phy_cfg_cmd(sc);
2955 	if (ret) {
2956 		device_printf(sc->sc_dev,
2957 		    "%s: Failed to run INIT calibrations: %d\n",
2958 		    __func__, ret);
2959 		goto error;
2960 	}
2961 
2962 	/*
2963 	 * Nothing to do but wait for the init complete notification
2964 	 * from the firmware.
2965 	 */
2966 	IWM_UNLOCK(sc);
2967 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2968 	    IWM_UCODE_CALIB_TIMEOUT);
2969 	IWM_LOCK(sc);
2970 
2971 
2972 	goto out;
2973 
2974 error:
2975 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2976 out:
2977 	return ret;
2978 }
2979 
2980 static int
2981 iwm_config_ltr(struct iwm_softc *sc)
2982 {
2983 	struct iwm_ltr_config_cmd cmd = {
2984 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
2985 	};
2986 
2987 	if (!sc->sc_ltr_enabled)
2988 		return 0;
2989 
2990 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2991 }
2992 
2993 /*
2994  * receive side
2995  */
2996 
2997 /* (re)stock rx ring, called at init-time and at runtime */
2998 static int
2999 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3000 {
3001 	struct iwm_rx_ring *ring = &sc->rxq;
3002 	struct iwm_rx_data *data = &ring->data[idx];
3003 	struct mbuf *m;
3004 	bus_dmamap_t dmamap;
3005 	bus_dma_segment_t seg;
3006 	int nsegs, error;
3007 
3008 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3009 	if (m == NULL)
3010 		return ENOBUFS;
3011 
3012 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3013 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3014 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3015 	if (error != 0) {
3016 		device_printf(sc->sc_dev,
3017 		    "%s: can't map mbuf, error %d\n", __func__, error);
3018 		m_freem(m);
3019 		return error;
3020 	}
3021 
3022 	if (data->m != NULL)
3023 		bus_dmamap_unload(ring->data_dmat, data->map);
3024 
3025 	/* Swap ring->spare_map with data->map */
3026 	dmamap = data->map;
3027 	data->map = ring->spare_map;
3028 	ring->spare_map = dmamap;
3029 
3030 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3031 	data->m = m;
3032 
3033 	/* Update RX descriptor. */
3034 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3035 	if (sc->cfg->mqrx_supported)
3036 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3037 	else
3038 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3039 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3040 	    BUS_DMASYNC_PREWRITE);
3041 
3042 	return 0;
3043 }
3044 
3045 static void
3046 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3047 {
3048 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3049 
3050 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3051 
3052 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3053 }
3054 
3055 /*
3056  * Retrieve the average noise (in dBm) among receivers.
3057  */
3058 static int
3059 iwm_get_noise(struct iwm_softc *sc,
3060     const struct iwm_statistics_rx_non_phy *stats)
3061 {
3062 	int i, total, nbant, noise;
3063 
3064 	total = nbant = noise = 0;
3065 	for (i = 0; i < 3; i++) {
3066 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3067 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3068 		    __func__,
3069 		    i,
3070 		    noise);
3071 
3072 		if (noise) {
3073 			total += noise;
3074 			nbant++;
3075 		}
3076 	}
3077 
3078 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3079 	    __func__, nbant, total);
3080 #if 0
3081 	/* There should be at least one antenna but check anyway. */
3082 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3083 #else
3084 	/* For now, just hard-code it to -96 to be safe */
3085 	return (-96);
3086 #endif
3087 }
3088 
3089 static void
3090 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3091 {
3092 	struct iwm_notif_statistics *stats = (void *)&pkt->data;
3093 
3094 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3095 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3096 }
3097 
3098 /* iwlwifi: mvm/rx.c */
3099 /*
3100  * iwm_get_signal_strength - use new rx PHY INFO API
3101  * values are reported by the fw as positive values - need to negate
3102  * to obtain their dBM.  Account for missing antennas by replacing 0
3103  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3104  */
3105 static int
3106 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3107     struct iwm_rx_phy_info *phy_info)
3108 {
3109 	int energy_a, energy_b, energy_c, max_energy;
3110 	uint32_t val;
3111 
3112 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3113 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3114 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3115 	energy_a = energy_a ? -energy_a : -256;
3116 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3117 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3118 	energy_b = energy_b ? -energy_b : -256;
3119 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3120 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3121 	energy_c = energy_c ? -energy_c : -256;
3122 	max_energy = MAX(energy_a, energy_b);
3123 	max_energy = MAX(max_energy, energy_c);
3124 
3125 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3126 	    "energy In A %d B %d C %d , and max %d\n",
3127 	    energy_a, energy_b, energy_c, max_energy);
3128 
3129 	return max_energy;
3130 }
3131 
3132 static int
3133 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3134     struct iwm_rx_mpdu_desc *desc)
3135 {
3136 	int energy_a, energy_b;
3137 
3138 	energy_a = desc->v1.energy_a;
3139 	energy_b = desc->v1.energy_b;
3140 	energy_a = energy_a ? -energy_a : -256;
3141 	energy_b = energy_b ? -energy_b : -256;
3142 	return MAX(energy_a, energy_b);
3143 }
3144 
3145 /*
3146  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3147  *
3148  * Handles the actual data of the Rx packet from the fw
3149  */
3150 static bool
3151 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3152     bool stolen)
3153 {
3154 	struct ieee80211com *ic = &sc->sc_ic;
3155 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3156 	struct ieee80211_frame *wh;
3157 	struct ieee80211_rx_stats rxs;
3158 	struct iwm_rx_phy_info *phy_info;
3159 	struct iwm_rx_mpdu_res_start *rx_res;
3160 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3161 	uint32_t len;
3162 	uint32_t rx_pkt_status;
3163 	int rssi;
3164 
3165 	phy_info = &sc->sc_last_phy_info;
3166 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3167 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3168 	len = le16toh(rx_res->byte_count);
3169 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3170 
3171 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3172 		device_printf(sc->sc_dev,
3173 		    "dsp size out of range [0,20]: %d\n",
3174 		    phy_info->cfg_phy_cnt);
3175 		return false;
3176 	}
3177 
3178 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3179 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3180 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3181 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3182 		return false;
3183 	}
3184 
3185 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3186 
3187 	/* Map it to relative value */
3188 	rssi = rssi - sc->sc_noise;
3189 
3190 	/* replenish ring for the buffer we're going to feed to the sharks */
3191 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3192 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3193 		    __func__);
3194 		return false;
3195 	}
3196 
3197 	m->m_data = pkt->data + sizeof(*rx_res);
3198 	m->m_pkthdr.len = m->m_len = len;
3199 
3200 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3201 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3202 
3203 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3204 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3205 	    __func__,
3206 	    le16toh(phy_info->channel),
3207 	    le16toh(phy_info->phy_flags));
3208 
3209 	/*
3210 	 * Populate an RX state struct with the provided information.
3211 	 */
3212 	bzero(&rxs, sizeof(rxs));
3213 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3214 	rxs.r_flags |= IEEE80211_R_BAND;
3215 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3216 	rxs.c_ieee = le16toh(phy_info->channel);
3217 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3218 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3219 		rxs.c_band = IEEE80211_CHAN_2GHZ;
3220 	} else {
3221 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3222 		rxs.c_band = IEEE80211_CHAN_5GHZ;
3223 	}
3224 
3225 	/* rssi is in 1/2db units */
3226 	rxs.c_rssi = rssi * 2;
3227 	rxs.c_nf = sc->sc_noise;
3228 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3229 		return false;
3230 
3231 	if (ieee80211_radiotap_active_vap(vap)) {
3232 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3233 
3234 		tap->wr_flags = 0;
3235 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3236 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3237 		tap->wr_chan_freq = htole16(rxs.c_freq);
3238 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3239 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3240 		tap->wr_dbm_antsignal = (int8_t)rssi;
3241 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3242 		tap->wr_tsft = phy_info->system_timestamp;
3243 		switch (phy_info->rate) {
3244 		/* CCK rates. */
3245 		case  10: tap->wr_rate =   2; break;
3246 		case  20: tap->wr_rate =   4; break;
3247 		case  55: tap->wr_rate =  11; break;
3248 		case 110: tap->wr_rate =  22; break;
3249 		/* OFDM rates. */
3250 		case 0xd: tap->wr_rate =  12; break;
3251 		case 0xf: tap->wr_rate =  18; break;
3252 		case 0x5: tap->wr_rate =  24; break;
3253 		case 0x7: tap->wr_rate =  36; break;
3254 		case 0x9: tap->wr_rate =  48; break;
3255 		case 0xb: tap->wr_rate =  72; break;
3256 		case 0x1: tap->wr_rate =  96; break;
3257 		case 0x3: tap->wr_rate = 108; break;
3258 		/* Unknown rate: should not happen. */
3259 		default:  tap->wr_rate =   0;
3260 		}
3261 	}
3262 
3263 	return true;
3264 }
3265 
3266 static bool
3267 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3268     bool stolen)
3269 {
3270 	struct ieee80211com *ic = &sc->sc_ic;
3271 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3272 	struct ieee80211_frame *wh;
3273 	struct ieee80211_rx_stats rxs;
3274 	struct iwm_rx_mpdu_desc *desc;
3275 	struct iwm_rx_packet *pkt;
3276 	int rssi;
3277 	uint32_t hdrlen, len, rate_n_flags;
3278 	uint16_t phy_info;
3279 	uint8_t channel;
3280 
3281 	pkt = mtodo(m, offset);
3282 	desc = (void *)pkt->data;
3283 
3284 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3285 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3286 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3287 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3288 		return false;
3289 	}
3290 
3291 	channel = desc->v1.channel;
3292 	len = le16toh(desc->mpdu_len);
3293 	phy_info = le16toh(desc->phy_info);
3294 	rate_n_flags = desc->v1.rate_n_flags;
3295 
3296 	wh = mtodo(m, sizeof(*desc));
3297 	m->m_data = pkt->data + sizeof(*desc);
3298 	m->m_pkthdr.len = m->m_len = len;
3299 	m->m_len = len;
3300 
3301 	/* Account for padding following the frame header. */
3302 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3303 		hdrlen = ieee80211_anyhdrsize(wh);
3304 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3305 		m->m_data = mtodo(m, 2);
3306 		wh = mtod(m, struct ieee80211_frame *);
3307 	}
3308 
3309 	/* Map it to relative value */
3310 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3311 	rssi = rssi - sc->sc_noise;
3312 
3313 	/* replenish ring for the buffer we're going to feed to the sharks */
3314 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3315 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3316 		    __func__);
3317 		return false;
3318 	}
3319 
3320 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3321 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3322 
3323 	/*
3324 	 * Populate an RX state struct with the provided information.
3325 	 */
3326 	bzero(&rxs, sizeof(rxs));
3327 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3328 	rxs.r_flags |= IEEE80211_R_BAND;
3329 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3330 	rxs.c_ieee = channel;
3331 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3332 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3333 	rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3334 
3335 	/* rssi is in 1/2db units */
3336 	rxs.c_rssi = rssi * 2;
3337 	rxs.c_nf = sc->sc_noise;
3338 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3339 		return false;
3340 
3341 	if (ieee80211_radiotap_active_vap(vap)) {
3342 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3343 
3344 		tap->wr_flags = 0;
3345 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3346 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3347 		tap->wr_chan_freq = htole16(rxs.c_freq);
3348 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3349 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3350 		tap->wr_dbm_antsignal = (int8_t)rssi;
3351 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3352 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3353 		switch ((rate_n_flags & 0xff)) {
3354 		/* CCK rates. */
3355 		case  10: tap->wr_rate =   2; break;
3356 		case  20: tap->wr_rate =   4; break;
3357 		case  55: tap->wr_rate =  11; break;
3358 		case 110: tap->wr_rate =  22; break;
3359 		/* OFDM rates. */
3360 		case 0xd: tap->wr_rate =  12; break;
3361 		case 0xf: tap->wr_rate =  18; break;
3362 		case 0x5: tap->wr_rate =  24; break;
3363 		case 0x7: tap->wr_rate =  36; break;
3364 		case 0x9: tap->wr_rate =  48; break;
3365 		case 0xb: tap->wr_rate =  72; break;
3366 		case 0x1: tap->wr_rate =  96; break;
3367 		case 0x3: tap->wr_rate = 108; break;
3368 		/* Unknown rate: should not happen. */
3369 		default:  tap->wr_rate =   0;
3370 		}
3371 	}
3372 
3373 	return true;
3374 }
3375 
3376 static bool
3377 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3378     bool stolen)
3379 {
3380   	struct epoch_tracker et;
3381 	struct ieee80211com *ic;
3382 	struct ieee80211_frame *wh;
3383 	struct ieee80211_node *ni;
3384 	bool ret;
3385 
3386 	ic = &sc->sc_ic;
3387 
3388 	ret = sc->cfg->mqrx_supported ?
3389 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3390 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3391 	if (!ret) {
3392 		counter_u64_add(ic->ic_ierrors, 1);
3393 		return (ret);
3394 	}
3395 
3396 	wh = mtod(m, struct ieee80211_frame *);
3397 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3398 
3399 	IWM_UNLOCK(sc);
3400 
3401 	NET_EPOCH_ENTER(et);
3402 	if (ni != NULL) {
3403 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3404 		ieee80211_input_mimo(ni, m);
3405 		ieee80211_free_node(ni);
3406 	} else {
3407 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3408 		ieee80211_input_mimo_all(ic, m);
3409 	}
3410 	NET_EPOCH_EXIT(et);
3411 
3412 	IWM_LOCK(sc);
3413 
3414 	return true;
3415 }
3416 
3417 static int
3418 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3419 	struct iwm_node *in)
3420 {
3421 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3422 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3423 	struct ieee80211_node *ni = &in->in_ni;
3424 	struct ieee80211vap *vap = ni->ni_vap;
3425 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3426 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3427 	boolean_t rate_matched;
3428 	uint8_t tx_resp_rate;
3429 
3430 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3431 
3432 	/* Update rate control statistics. */
3433 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3434 	    __func__,
3435 	    (int) le16toh(tx_resp->status.status),
3436 	    (int) le16toh(tx_resp->status.sequence),
3437 	    tx_resp->frame_count,
3438 	    tx_resp->bt_kill_count,
3439 	    tx_resp->failure_rts,
3440 	    tx_resp->failure_frame,
3441 	    le32toh(tx_resp->initial_rate),
3442 	    (int) le16toh(tx_resp->wireless_media_time));
3443 
3444 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3445 
3446 	/* For rate control, ignore frames sent at different initial rate */
3447 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3448 
3449 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3450 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3451 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3452 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3453 	}
3454 
3455 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3456 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3457 	txs->short_retries = tx_resp->failure_rts;
3458 	txs->long_retries = tx_resp->failure_frame;
3459 	if (status != IWM_TX_STATUS_SUCCESS &&
3460 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3461 		switch (status) {
3462 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3463 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3464 			break;
3465 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3466 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3467 			break;
3468 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3469 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3470 			break;
3471 		default:
3472 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3473 			break;
3474 		}
3475 	} else {
3476 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3477 	}
3478 
3479 	if (rate_matched) {
3480 		ieee80211_ratectl_tx_complete(ni, txs);
3481 
3482 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3483 		new_rate = vap->iv_bss->ni_txrate;
3484 		if (new_rate != 0 && new_rate != cur_rate) {
3485 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3486 			iwm_setrates(sc, in, rix);
3487 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3488 		}
3489  	}
3490 
3491 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3492 }
3493 
3494 static void
3495 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3496 {
3497 	struct iwm_cmd_header *cmd_hdr;
3498 	struct iwm_tx_ring *ring;
3499 	struct iwm_tx_data *txd;
3500 	struct iwm_node *in;
3501 	struct mbuf *m;
3502 	int idx, qid, qmsk, status;
3503 
3504 	cmd_hdr = &pkt->hdr;
3505 	idx = cmd_hdr->idx;
3506 	qid = cmd_hdr->qid;
3507 
3508 	ring = &sc->txq[qid];
3509 	txd = &ring->data[idx];
3510 	in = txd->in;
3511 	m = txd->m;
3512 
3513 	KASSERT(txd->done == 0, ("txd not done"));
3514 	KASSERT(txd->in != NULL, ("txd without node"));
3515 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3516 
3517 	sc->sc_tx_timer = 0;
3518 
3519 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3520 
3521 	/* Unmap and free mbuf. */
3522 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3523 	bus_dmamap_unload(ring->data_dmat, txd->map);
3524 
3525 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3526 	    "free txd %p, in %p\n", txd, txd->in);
3527 	txd->done = 1;
3528 	txd->m = NULL;
3529 	txd->in = NULL;
3530 
3531 	ieee80211_tx_complete(&in->in_ni, m, status);
3532 
3533 	qmsk = 1 << qid;
3534 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3535 		sc->qfullmsk &= ~qmsk;
3536 		if (sc->qfullmsk == 0)
3537 			iwm_start(sc);
3538 	}
3539 }
3540 
3541 /*
3542  * transmit side
3543  */
3544 
3545 /*
3546  * Process a "command done" firmware notification.  This is where we wakeup
3547  * processes waiting for a synchronous command completion.
3548  * from if_iwn
3549  */
3550 static void
3551 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3552 {
3553 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3554 	struct iwm_tx_data *data;
3555 
3556 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3557 		return;	/* Not a command ack. */
3558 	}
3559 
3560 	/* XXX wide commands? */
3561 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3562 	    "cmd notification type 0x%x qid %d idx %d\n",
3563 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3564 
3565 	data = &ring->data[pkt->hdr.idx];
3566 
3567 	/* If the command was mapped in an mbuf, free it. */
3568 	if (data->m != NULL) {
3569 		bus_dmamap_sync(ring->data_dmat, data->map,
3570 		    BUS_DMASYNC_POSTWRITE);
3571 		bus_dmamap_unload(ring->data_dmat, data->map);
3572 		m_freem(data->m);
3573 		data->m = NULL;
3574 	}
3575 	wakeup(&ring->desc[pkt->hdr.idx]);
3576 
3577 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3578 		device_printf(sc->sc_dev,
3579 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3580 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3581 		/* XXX call iwm_force_nmi() */
3582 	}
3583 
3584 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3585 	ring->queued--;
3586 	if (ring->queued == 0)
3587 		iwm_pcie_clear_cmd_in_flight(sc);
3588 }
3589 
3590 #if 0
3591 /*
3592  * necessary only for block ack mode
3593  */
3594 void
3595 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3596 	uint16_t len)
3597 {
3598 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3599 	uint16_t w_val;
3600 
3601 	scd_bc_tbl = sc->sched_dma.vaddr;
3602 
3603 	len += 8; /* magic numbers came naturally from paris */
3604 	len = roundup(len, 4) / 4;
3605 
3606 	w_val = htole16(sta_id << 12 | len);
3607 
3608 	/* Update TX scheduler. */
3609 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3610 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3611 	    BUS_DMASYNC_PREWRITE);
3612 
3613 	/* I really wonder what this is ?!? */
3614 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3615 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3616 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3617 		    BUS_DMASYNC_PREWRITE);
3618 	}
3619 }
3620 #endif
3621 
3622 static int
3623 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3624 {
3625 	int i;
3626 
3627 	for (i = 0; i < nitems(iwm_rates); i++) {
3628 		if (iwm_rates[i].rate == rate)
3629 			return (i);
3630 	}
3631 	/* XXX error? */
3632 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3633 	    "%s: couldn't find an entry for rate=%d\n",
3634 	    __func__,
3635 	    rate);
3636 	return (0);
3637 }
3638 
3639 /*
3640  * Fill in the rate related information for a transmit command.
3641  */
3642 static const struct iwm_rate *
3643 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3644 	struct mbuf *m, struct iwm_tx_cmd *tx)
3645 {
3646 	struct ieee80211_node *ni = &in->in_ni;
3647 	struct ieee80211_frame *wh;
3648 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3649 	const struct iwm_rate *rinfo;
3650 	int type;
3651 	int ridx, rate_flags;
3652 
3653 	wh = mtod(m, struct ieee80211_frame *);
3654 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3655 
3656 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3657 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3658 
3659 	if (type == IEEE80211_FC0_TYPE_MGT ||
3660 	    type == IEEE80211_FC0_TYPE_CTL ||
3661 	    (m->m_flags & M_EAPOL) != 0) {
3662 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3663 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3664 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3665 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3666 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3667 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3668 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3669 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3670 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3671 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3672 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3673 	} else {
3674 		/* for data frames, use RS table */
3675 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3676 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3677 		if (ridx == -1)
3678 			ridx = 0;
3679 
3680 		/* This is the index into the programmed table */
3681 		tx->initial_rate_index = 0;
3682 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3683 	}
3684 
3685 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3686 	    "%s: frame type=%d txrate %d\n",
3687 	        __func__, type, iwm_rates[ridx].rate);
3688 
3689 	rinfo = &iwm_rates[ridx];
3690 
3691 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3692 	    __func__, ridx,
3693 	    rinfo->rate,
3694 	    !! (IWM_RIDX_IS_CCK(ridx))
3695 	    );
3696 
3697 	/* XXX TODO: hard-coded TX antenna? */
3698 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3699 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3700 	else
3701 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3702 	if (IWM_RIDX_IS_CCK(ridx))
3703 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3704 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3705 
3706 	return rinfo;
3707 }
3708 
3709 #define TB0_SIZE 16
3710 static int
3711 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3712 {
3713 	struct ieee80211com *ic = &sc->sc_ic;
3714 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3715 	struct iwm_node *in = IWM_NODE(ni);
3716 	struct iwm_tx_ring *ring;
3717 	struct iwm_tx_data *data;
3718 	struct iwm_tfd *desc;
3719 	struct iwm_device_cmd *cmd;
3720 	struct iwm_tx_cmd *tx;
3721 	struct ieee80211_frame *wh;
3722 	struct ieee80211_key *k = NULL;
3723 	struct mbuf *m1;
3724 	const struct iwm_rate *rinfo;
3725 	uint32_t flags;
3726 	u_int hdrlen;
3727 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3728 	int nsegs;
3729 	uint8_t tid, type;
3730 	int i, totlen, error, pad;
3731 
3732 	wh = mtod(m, struct ieee80211_frame *);
3733 	hdrlen = ieee80211_anyhdrsize(wh);
3734 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3735 	tid = 0;
3736 	ring = &sc->txq[ac];
3737 	desc = &ring->desc[ring->cur];
3738 	data = &ring->data[ring->cur];
3739 
3740 	/* Fill out iwm_tx_cmd to send to the firmware */
3741 	cmd = &ring->cmd[ring->cur];
3742 	cmd->hdr.code = IWM_TX_CMD;
3743 	cmd->hdr.flags = 0;
3744 	cmd->hdr.qid = ring->qid;
3745 	cmd->hdr.idx = ring->cur;
3746 
3747 	tx = (void *)cmd->data;
3748 	memset(tx, 0, sizeof(*tx));
3749 
3750 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3751 
3752 	/* Encrypt the frame if need be. */
3753 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3754 		/* Retrieve key for TX && do software encryption. */
3755 		k = ieee80211_crypto_encap(ni, m);
3756 		if (k == NULL) {
3757 			m_freem(m);
3758 			return (ENOBUFS);
3759 		}
3760 		/* 802.11 header may have moved. */
3761 		wh = mtod(m, struct ieee80211_frame *);
3762 	}
3763 
3764 	if (ieee80211_radiotap_active_vap(vap)) {
3765 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3766 
3767 		tap->wt_flags = 0;
3768 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3769 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3770 		tap->wt_rate = rinfo->rate;
3771 		if (k != NULL)
3772 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3773 		ieee80211_radiotap_tx(vap, m);
3774 	}
3775 
3776 	flags = 0;
3777 	totlen = m->m_pkthdr.len;
3778 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3779 		flags |= IWM_TX_CMD_FLG_ACK;
3780 	}
3781 
3782 	if (type == IEEE80211_FC0_TYPE_DATA &&
3783 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3784 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3785 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3786 	}
3787 
3788 	tx->sta_id = IWM_STATION_ID;
3789 
3790 	if (type == IEEE80211_FC0_TYPE_MGT) {
3791 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3792 
3793 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3794 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3795 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3796 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3797 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3798 		} else {
3799 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3800 		}
3801 	} else {
3802 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3803 	}
3804 
3805 	if (hdrlen & 3) {
3806 		/* First segment length must be a multiple of 4. */
3807 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3808 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3809 		pad = 4 - (hdrlen & 3);
3810 	} else {
3811 		tx->offload_assist = 0;
3812 		pad = 0;
3813 	}
3814 
3815 	tx->len = htole16(totlen);
3816 	tx->tid_tspec = tid;
3817 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3818 
3819 	/* Set physical address of "scratch area". */
3820 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3821 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3822 
3823 	/* Copy 802.11 header in TX command. */
3824 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3825 
3826 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3827 
3828 	tx->sec_ctl = 0;
3829 	tx->tx_flags |= htole32(flags);
3830 
3831 	/* Trim 802.11 header. */
3832 	m_adj(m, hdrlen);
3833 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3834 	    segs, &nsegs, BUS_DMA_NOWAIT);
3835 	if (error != 0) {
3836 		if (error != EFBIG) {
3837 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3838 			    error);
3839 			m_freem(m);
3840 			return error;
3841 		}
3842 		/* Too many DMA segments, linearize mbuf. */
3843 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3844 		if (m1 == NULL) {
3845 			device_printf(sc->sc_dev,
3846 			    "%s: could not defrag mbuf\n", __func__);
3847 			m_freem(m);
3848 			return (ENOBUFS);
3849 		}
3850 		m = m1;
3851 
3852 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3853 		    segs, &nsegs, BUS_DMA_NOWAIT);
3854 		if (error != 0) {
3855 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3856 			    error);
3857 			m_freem(m);
3858 			return error;
3859 		}
3860 	}
3861 	data->m = m;
3862 	data->in = in;
3863 	data->done = 0;
3864 
3865 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3866 	    "sending txd %p, in %p\n", data, data->in);
3867 	KASSERT(data->in != NULL, ("node is NULL"));
3868 
3869 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3870 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3871 	    ring->qid, ring->cur, totlen, nsegs,
3872 	    le32toh(tx->tx_flags),
3873 	    le32toh(tx->rate_n_flags),
3874 	    tx->initial_rate_index
3875 	    );
3876 
3877 	/* Fill TX descriptor. */
3878 	memset(desc, 0, sizeof(*desc));
3879 	desc->num_tbs = 2 + nsegs;
3880 
3881 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3882 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3883 	    (TB0_SIZE << 4));
3884 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3885 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3886 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3887 	    hdrlen + pad - TB0_SIZE) << 4));
3888 
3889 	/* Other DMA segments are for data payload. */
3890 	for (i = 0; i < nsegs; i++) {
3891 		seg = &segs[i];
3892 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3893 		desc->tbs[i + 2].hi_n_len =
3894 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3895 		    (seg->ds_len << 4);
3896 	}
3897 
3898 	bus_dmamap_sync(ring->data_dmat, data->map,
3899 	    BUS_DMASYNC_PREWRITE);
3900 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3901 	    BUS_DMASYNC_PREWRITE);
3902 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3903 	    BUS_DMASYNC_PREWRITE);
3904 
3905 #if 0
3906 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3907 #endif
3908 
3909 	/* Kick TX ring. */
3910 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3911 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3912 
3913 	/* Mark TX ring as full if we reach a certain threshold. */
3914 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3915 		sc->qfullmsk |= 1 << ring->qid;
3916 	}
3917 
3918 	return 0;
3919 }
3920 
3921 static int
3922 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3923     const struct ieee80211_bpf_params *params)
3924 {
3925 	struct ieee80211com *ic = ni->ni_ic;
3926 	struct iwm_softc *sc = ic->ic_softc;
3927 	int error = 0;
3928 
3929 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3930 	    "->%s begin\n", __func__);
3931 
3932 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3933 		m_freem(m);
3934 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3935 		    "<-%s not RUNNING\n", __func__);
3936 		return (ENETDOWN);
3937         }
3938 
3939 	IWM_LOCK(sc);
3940 	/* XXX fix this */
3941         if (params == NULL) {
3942 		error = iwm_tx(sc, m, ni, 0);
3943 	} else {
3944 		error = iwm_tx(sc, m, ni, 0);
3945 	}
3946 	if (sc->sc_tx_timer == 0)
3947 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3948 	sc->sc_tx_timer = 5;
3949 	IWM_UNLOCK(sc);
3950 
3951         return (error);
3952 }
3953 
3954 /*
3955  * mvm/tx.c
3956  */
3957 
3958 /*
3959  * Note that there are transports that buffer frames before they reach
3960  * the firmware. This means that after flush_tx_path is called, the
3961  * queue might not be empty. The race-free way to handle this is to:
3962  * 1) set the station as draining
3963  * 2) flush the Tx path
3964  * 3) wait for the transport queues to be empty
3965  */
3966 int
3967 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3968 {
3969 	int ret;
3970 	struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3971 		.queues_ctl = htole32(tfd_msk),
3972 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3973 	};
3974 
3975 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3976 	    sizeof(flush_cmd), &flush_cmd);
3977 	if (ret)
3978                 device_printf(sc->sc_dev,
3979 		    "Flushing tx queue failed: %d\n", ret);
3980 	return ret;
3981 }
3982 
3983 /*
3984  * BEGIN mvm/quota.c
3985  */
3986 
3987 static int
3988 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3989 {
3990 	struct iwm_time_quota_cmd_v1 cmd;
3991 	int i, idx, ret, num_active_macs, quota, quota_rem;
3992 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3993 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3994 	uint16_t id;
3995 
3996 	memset(&cmd, 0, sizeof(cmd));
3997 
3998 	/* currently, PHY ID == binding ID */
3999 	if (ivp) {
4000 		id = ivp->phy_ctxt->id;
4001 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4002 		colors[id] = ivp->phy_ctxt->color;
4003 
4004 		if (1)
4005 			n_ifs[id] = 1;
4006 	}
4007 
4008 	/*
4009 	 * The FW's scheduling session consists of
4010 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4011 	 * equally between all the bindings that require quota
4012 	 */
4013 	num_active_macs = 0;
4014 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4015 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4016 		num_active_macs += n_ifs[i];
4017 	}
4018 
4019 	quota = 0;
4020 	quota_rem = 0;
4021 	if (num_active_macs) {
4022 		quota = IWM_MAX_QUOTA / num_active_macs;
4023 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4024 	}
4025 
4026 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4027 		if (colors[i] < 0)
4028 			continue;
4029 
4030 		cmd.quotas[idx].id_and_color =
4031 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4032 
4033 		if (n_ifs[i] <= 0) {
4034 			cmd.quotas[idx].quota = htole32(0);
4035 			cmd.quotas[idx].max_duration = htole32(0);
4036 		} else {
4037 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4038 			cmd.quotas[idx].max_duration = htole32(0);
4039 		}
4040 		idx++;
4041 	}
4042 
4043 	/* Give the remainder of the session to the first binding */
4044 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4045 
4046 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4047 	    sizeof(cmd), &cmd);
4048 	if (ret)
4049 		device_printf(sc->sc_dev,
4050 		    "%s: Failed to send quota: %d\n", __func__, ret);
4051 	return ret;
4052 }
4053 
4054 /*
4055  * END mvm/quota.c
4056  */
4057 
4058 /*
4059  * ieee80211 routines
4060  */
4061 
4062 /*
4063  * Change to AUTH state in 80211 state machine.  Roughly matches what
4064  * Linux does in bss_info_changed().
4065  */
4066 static int
4067 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4068 {
4069 	struct ieee80211_node *ni;
4070 	struct iwm_node *in;
4071 	struct iwm_vap *iv = IWM_VAP(vap);
4072 	uint32_t duration;
4073 	int error;
4074 
4075 	/*
4076 	 * XXX i have a feeling that the vap node is being
4077 	 * freed from underneath us. Grr.
4078 	 */
4079 	ni = ieee80211_ref_node(vap->iv_bss);
4080 	in = IWM_NODE(ni);
4081 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4082 	    "%s: called; vap=%p, bss ni=%p\n",
4083 	    __func__,
4084 	    vap,
4085 	    ni);
4086 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4087 	    __func__, ether_sprintf(ni->ni_bssid));
4088 
4089 	in->in_assoc = 0;
4090 	iv->iv_auth = 1;
4091 
4092 	/*
4093 	 * Firmware bug - it'll crash if the beacon interval is less
4094 	 * than 16. We can't avoid connecting at all, so refuse the
4095 	 * station state change, this will cause net80211 to abandon
4096 	 * attempts to connect to this AP, and eventually wpa_s will
4097 	 * blacklist the AP...
4098 	 */
4099 	if (ni->ni_intval < 16) {
4100 		device_printf(sc->sc_dev,
4101 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4102 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4103 		error = EINVAL;
4104 		goto out;
4105 	}
4106 
4107 	error = iwm_allow_mcast(vap, sc);
4108 	if (error) {
4109 		device_printf(sc->sc_dev,
4110 		    "%s: failed to set multicast\n", __func__);
4111 		goto out;
4112 	}
4113 
4114 	/*
4115 	 * This is where it deviates from what Linux does.
4116 	 *
4117 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4118 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4119 	 * and always does a mac_ctx_changed().
4120 	 *
4121 	 * The openbsd port doesn't attempt to do that - it reset things
4122 	 * at odd states and does the add here.
4123 	 *
4124 	 * So, until the state handling is fixed (ie, we never reset
4125 	 * the NIC except for a firmware failure, which should drag
4126 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4127 	 * contexts that are required), let's do a dirty hack here.
4128 	 */
4129 	if (iv->is_uploaded) {
4130 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4131 			device_printf(sc->sc_dev,
4132 			    "%s: failed to update MAC\n", __func__);
4133 			goto out;
4134 		}
4135 	} else {
4136 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4137 			device_printf(sc->sc_dev,
4138 			    "%s: failed to add MAC\n", __func__);
4139 			goto out;
4140 		}
4141 	}
4142 	sc->sc_firmware_state = 1;
4143 
4144 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4145 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4146 		device_printf(sc->sc_dev,
4147 		    "%s: failed update phy ctxt\n", __func__);
4148 		goto out;
4149 	}
4150 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4151 
4152 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4153 		device_printf(sc->sc_dev,
4154 		    "%s: binding update cmd\n", __func__);
4155 		goto out;
4156 	}
4157 	sc->sc_firmware_state = 2;
4158 	/*
4159 	 * Authentication becomes unreliable when powersaving is left enabled
4160 	 * here. Powersaving will be activated again when association has
4161 	 * finished or is aborted.
4162 	 */
4163 	iv->ps_disabled = TRUE;
4164 	error = iwm_power_update_mac(sc);
4165 	iv->ps_disabled = FALSE;
4166 	if (error != 0) {
4167 		device_printf(sc->sc_dev,
4168 		    "%s: failed to update power management\n",
4169 		    __func__);
4170 		goto out;
4171 	}
4172 	if ((error = iwm_add_sta(sc, in)) != 0) {
4173 		device_printf(sc->sc_dev,
4174 		    "%s: failed to add sta\n", __func__);
4175 		goto out;
4176 	}
4177 	sc->sc_firmware_state = 3;
4178 
4179 	/*
4180 	 * Prevent the FW from wandering off channel during association
4181 	 * by "protecting" the session with a time event.
4182 	 */
4183 	/* XXX duration is in units of TU, not MS */
4184 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4185 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4186 
4187 	error = 0;
4188 out:
4189 	if (error != 0)
4190 		iv->iv_auth = 0;
4191 	ieee80211_free_node(ni);
4192 	return (error);
4193 }
4194 
4195 static struct ieee80211_node *
4196 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4197 {
4198 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4199 	    M_NOWAIT | M_ZERO);
4200 }
4201 
4202 static uint8_t
4203 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4204 {
4205 	uint8_t plcp = rate_n_flags & 0xff;
4206 	int i;
4207 
4208 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4209 		if (iwm_rates[i].plcp == plcp)
4210 			return iwm_rates[i].rate;
4211 	}
4212 	return 0;
4213 }
4214 
4215 uint8_t
4216 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4217 {
4218 	int i;
4219 	uint8_t rval;
4220 
4221 	for (i = 0; i < rs->rs_nrates; i++) {
4222 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4223 		if (rval == iwm_rates[ridx].rate)
4224 			return rs->rs_rates[i];
4225 	}
4226 
4227 	return 0;
4228 }
4229 
4230 static int
4231 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4232 {
4233 	int i;
4234 
4235 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4236 		if (iwm_rates[i].rate == rate)
4237 			return i;
4238 	}
4239 
4240 	device_printf(sc->sc_dev,
4241 	    "%s: WARNING: device rate for %u not found!\n",
4242 	    __func__, rate);
4243 
4244 	return -1;
4245 }
4246 
4247 
4248 static void
4249 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4250 {
4251 	struct ieee80211_node *ni = &in->in_ni;
4252 	struct iwm_lq_cmd *lq = &in->in_lq;
4253 	struct ieee80211_rateset *rs = &ni->ni_rates;
4254 	int nrates = rs->rs_nrates;
4255 	int i, ridx, tab = 0;
4256 //	int txant = 0;
4257 
4258 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4259 
4260 	if (nrates > nitems(lq->rs_table)) {
4261 		device_printf(sc->sc_dev,
4262 		    "%s: node supports %d rates, driver handles "
4263 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4264 		return;
4265 	}
4266 	if (nrates == 0) {
4267 		device_printf(sc->sc_dev,
4268 		    "%s: node supports 0 rates, odd!\n", __func__);
4269 		return;
4270 	}
4271 	nrates = imin(rix + 1, nrates);
4272 
4273 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4274 	    "%s: nrates=%d\n", __func__, nrates);
4275 
4276 	/* then construct a lq_cmd based on those */
4277 	memset(lq, 0, sizeof(*lq));
4278 	lq->sta_id = IWM_STATION_ID;
4279 
4280 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4281 	if (ni->ni_flags & IEEE80211_NODE_HT)
4282 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4283 
4284 	/*
4285 	 * are these used? (we don't do SISO or MIMO)
4286 	 * need to set them to non-zero, though, or we get an error.
4287 	 */
4288 	lq->single_stream_ant_msk = 1;
4289 	lq->dual_stream_ant_msk = 1;
4290 
4291 	/*
4292 	 * Build the actual rate selection table.
4293 	 * The lowest bits are the rates.  Additionally,
4294 	 * CCK needs bit 9 to be set.  The rest of the bits
4295 	 * we add to the table select the tx antenna
4296 	 * Note that we add the rates in the highest rate first
4297 	 * (opposite of ni_rates).
4298 	 */
4299 	for (i = 0; i < nrates; i++) {
4300 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4301 		int nextant;
4302 
4303 		/* Map 802.11 rate to HW rate index. */
4304 		ridx = iwm_rate2ridx(sc, rate);
4305 		if (ridx == -1)
4306 			continue;
4307 
4308 #if 0
4309 		if (txant == 0)
4310 			txant = iwm_get_valid_tx_ant(sc);
4311 		nextant = 1<<(ffs(txant)-1);
4312 		txant &= ~nextant;
4313 #else
4314 		nextant = iwm_get_valid_tx_ant(sc);
4315 #endif
4316 		tab = iwm_rates[ridx].plcp;
4317 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4318 		if (IWM_RIDX_IS_CCK(ridx))
4319 			tab |= IWM_RATE_MCS_CCK_MSK;
4320 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4321 		    "station rate i=%d, rate=%d, hw=%x\n",
4322 		    i, iwm_rates[ridx].rate, tab);
4323 		lq->rs_table[i] = htole32(tab);
4324 	}
4325 	/* then fill the rest with the lowest possible rate */
4326 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4327 		KASSERT(tab != 0, ("invalid tab"));
4328 		lq->rs_table[i] = htole32(tab);
4329 	}
4330 }
4331 
4332 static void
4333 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4334 {
4335 	struct iwm_vap *ivp = IWM_VAP(vap);
4336 	int error;
4337 
4338 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4339 	sc->sc_tx_timer = 0;
4340 
4341 	ivp->iv_auth = 0;
4342 	if (sc->sc_firmware_state == 3) {
4343 		iwm_xmit_queue_drain(sc);
4344 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4345 		error = iwm_rm_sta(sc, vap, TRUE);
4346 		if (error) {
4347 			device_printf(sc->sc_dev,
4348 			    "%s: Failed to remove station: %d\n",
4349 			    __func__, error);
4350 		}
4351 	}
4352 	if (sc->sc_firmware_state == 3) {
4353 		error = iwm_mac_ctxt_changed(sc, vap);
4354 		if (error) {
4355 			device_printf(sc->sc_dev,
4356 			    "%s: Failed to change mac context: %d\n",
4357 			    __func__, error);
4358 		}
4359 	}
4360 	if (sc->sc_firmware_state == 3) {
4361 		error = iwm_sf_update(sc, vap, FALSE);
4362 		if (error) {
4363 			device_printf(sc->sc_dev,
4364 			    "%s: Failed to update smart FIFO: %d\n",
4365 			    __func__, error);
4366 		}
4367 	}
4368 	if (sc->sc_firmware_state == 3) {
4369 		error = iwm_rm_sta_id(sc, vap);
4370 		if (error) {
4371 			device_printf(sc->sc_dev,
4372 			    "%s: Failed to remove station id: %d\n",
4373 			    __func__, error);
4374 		}
4375 	}
4376 	if (sc->sc_firmware_state == 3) {
4377 		error = iwm_update_quotas(sc, NULL);
4378 		if (error) {
4379 			device_printf(sc->sc_dev,
4380 			    "%s: Failed to update PHY quota: %d\n",
4381 			    __func__, error);
4382 		}
4383 	}
4384 	if (sc->sc_firmware_state == 3) {
4385 		/* XXX Might need to specify bssid correctly. */
4386 		error = iwm_mac_ctxt_changed(sc, vap);
4387 		if (error) {
4388 			device_printf(sc->sc_dev,
4389 			    "%s: Failed to change mac context: %d\n",
4390 			    __func__, error);
4391 		}
4392 	}
4393 	if (sc->sc_firmware_state == 3) {
4394 		sc->sc_firmware_state = 2;
4395 	}
4396 	if (sc->sc_firmware_state > 1) {
4397 		error = iwm_binding_remove_vif(sc, ivp);
4398 		if (error) {
4399 			device_printf(sc->sc_dev,
4400 			    "%s: Failed to remove channel ctx: %d\n",
4401 			    __func__, error);
4402 		}
4403 	}
4404 	if (sc->sc_firmware_state > 1) {
4405 		sc->sc_firmware_state = 1;
4406 	}
4407 	ivp->phy_ctxt = NULL;
4408 	if (sc->sc_firmware_state > 0) {
4409 		error = iwm_mac_ctxt_changed(sc, vap);
4410 		if (error) {
4411 			device_printf(sc->sc_dev,
4412 			    "%s: Failed to change mac context: %d\n",
4413 			    __func__, error);
4414 		}
4415 	}
4416 	if (sc->sc_firmware_state > 0) {
4417 		error = iwm_power_update_mac(sc);
4418 		if (error != 0) {
4419 			device_printf(sc->sc_dev,
4420 			    "%s: failed to update power management\n",
4421 			    __func__);
4422 		}
4423 	}
4424 	sc->sc_firmware_state = 0;
4425 }
4426 
4427 static int
4428 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4429 {
4430 	struct iwm_vap *ivp = IWM_VAP(vap);
4431 	struct ieee80211com *ic = vap->iv_ic;
4432 	struct iwm_softc *sc = ic->ic_softc;
4433 	struct iwm_node *in;
4434 	int error;
4435 
4436 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4437 	    "switching state %s -> %s arg=0x%x\n",
4438 	    ieee80211_state_name[vap->iv_state],
4439 	    ieee80211_state_name[nstate],
4440 	    arg);
4441 
4442 	IEEE80211_UNLOCK(ic);
4443 	IWM_LOCK(sc);
4444 
4445 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4446 	    (nstate == IEEE80211_S_AUTH ||
4447 	     nstate == IEEE80211_S_ASSOC ||
4448 	     nstate == IEEE80211_S_RUN)) {
4449 		/* Stop blinking for a scan, when authenticating. */
4450 		iwm_led_blink_stop(sc);
4451 	}
4452 
4453 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4454 		iwm_led_disable(sc);
4455 		/* disable beacon filtering if we're hopping out of RUN */
4456 		iwm_disable_beacon_filter(sc);
4457 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4458 			in->in_assoc = 0;
4459 	}
4460 
4461 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4462 	     vap->iv_state == IEEE80211_S_ASSOC ||
4463 	     vap->iv_state == IEEE80211_S_RUN) &&
4464 	    (nstate == IEEE80211_S_INIT ||
4465 	     nstate == IEEE80211_S_SCAN ||
4466 	     nstate == IEEE80211_S_AUTH)) {
4467 		iwm_stop_session_protection(sc, ivp);
4468 	}
4469 
4470 	if ((vap->iv_state == IEEE80211_S_RUN ||
4471 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4472 	    nstate == IEEE80211_S_INIT) {
4473 		/*
4474 		 * In this case, iv_newstate() wants to send an 80211 frame on
4475 		 * the network that we are leaving. So we need to call it,
4476 		 * before tearing down all the firmware state.
4477 		 */
4478 		IWM_UNLOCK(sc);
4479 		IEEE80211_LOCK(ic);
4480 		ivp->iv_newstate(vap, nstate, arg);
4481 		IEEE80211_UNLOCK(ic);
4482 		IWM_LOCK(sc);
4483 		iwm_bring_down_firmware(sc, vap);
4484 		IWM_UNLOCK(sc);
4485 		IEEE80211_LOCK(ic);
4486 		return 0;
4487 	}
4488 
4489 	switch (nstate) {
4490 	case IEEE80211_S_INIT:
4491 	case IEEE80211_S_SCAN:
4492 		break;
4493 
4494 	case IEEE80211_S_AUTH:
4495 		iwm_bring_down_firmware(sc, vap);
4496 		if ((error = iwm_auth(vap, sc)) != 0) {
4497 			device_printf(sc->sc_dev,
4498 			    "%s: could not move to auth state: %d\n",
4499 			    __func__, error);
4500 			iwm_bring_down_firmware(sc, vap);
4501 			IWM_UNLOCK(sc);
4502 			IEEE80211_LOCK(ic);
4503 			return 1;
4504 		}
4505 		break;
4506 
4507 	case IEEE80211_S_ASSOC:
4508 		/*
4509 		 * EBS may be disabled due to previous failures reported by FW.
4510 		 * Reset EBS status here assuming environment has been changed.
4511 		 */
4512 		sc->last_ebs_successful = TRUE;
4513 		break;
4514 
4515 	case IEEE80211_S_RUN:
4516 		in = IWM_NODE(vap->iv_bss);
4517 		/* Update the association state, now we have it all */
4518 		/* (eg associd comes in at this point */
4519 		error = iwm_update_sta(sc, in);
4520 		if (error != 0) {
4521 			device_printf(sc->sc_dev,
4522 			    "%s: failed to update STA\n", __func__);
4523 			IWM_UNLOCK(sc);
4524 			IEEE80211_LOCK(ic);
4525 			return error;
4526 		}
4527 		in->in_assoc = 1;
4528 		error = iwm_mac_ctxt_changed(sc, vap);
4529 		if (error != 0) {
4530 			device_printf(sc->sc_dev,
4531 			    "%s: failed to update MAC: %d\n", __func__, error);
4532 		}
4533 
4534 		iwm_sf_update(sc, vap, FALSE);
4535 		iwm_enable_beacon_filter(sc, ivp);
4536 		iwm_power_update_mac(sc);
4537 		iwm_update_quotas(sc, ivp);
4538 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4539 		iwm_setrates(sc, in, rix);
4540 
4541 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4542 			device_printf(sc->sc_dev,
4543 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4544 		}
4545 
4546 		iwm_led_enable(sc);
4547 		break;
4548 
4549 	default:
4550 		break;
4551 	}
4552 	IWM_UNLOCK(sc);
4553 	IEEE80211_LOCK(ic);
4554 
4555 	return (ivp->iv_newstate(vap, nstate, arg));
4556 }
4557 
4558 void
4559 iwm_endscan_cb(void *arg, int pending)
4560 {
4561 	struct iwm_softc *sc = arg;
4562 	struct ieee80211com *ic = &sc->sc_ic;
4563 
4564 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4565 	    "%s: scan ended\n",
4566 	    __func__);
4567 
4568 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4569 }
4570 
4571 static int
4572 iwm_send_bt_init_conf(struct iwm_softc *sc)
4573 {
4574 	struct iwm_bt_coex_cmd bt_cmd;
4575 
4576 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4577 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4578 
4579 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4580 	    &bt_cmd);
4581 }
4582 
4583 static boolean_t
4584 iwm_is_lar_supported(struct iwm_softc *sc)
4585 {
4586 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4587 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4588 
4589 	if (iwm_lar_disable)
4590 		return FALSE;
4591 
4592 	/*
4593 	 * Enable LAR only if it is supported by the FW (TLV) &&
4594 	 * enabled in the NVM
4595 	 */
4596 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4597 		return nvm_lar && tlv_lar;
4598 	else
4599 		return tlv_lar;
4600 }
4601 
4602 static boolean_t
4603 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4604 {
4605 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4606 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4607 }
4608 
4609 static int
4610 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4611 {
4612 	struct iwm_mcc_update_cmd mcc_cmd;
4613 	struct iwm_host_cmd hcmd = {
4614 		.id = IWM_MCC_UPDATE_CMD,
4615 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4616 		.data = { &mcc_cmd },
4617 	};
4618 	int ret;
4619 #ifdef IWM_DEBUG
4620 	struct iwm_rx_packet *pkt;
4621 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4622 	struct iwm_mcc_update_resp_v2 *mcc_resp;
4623 	int n_channels;
4624 	uint16_t mcc;
4625 #endif
4626 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4627 
4628 	if (!iwm_is_lar_supported(sc)) {
4629 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4630 		    __func__);
4631 		return 0;
4632 	}
4633 
4634 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4635 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4636 	if (iwm_is_wifi_mcc_supported(sc))
4637 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4638 	else
4639 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4640 
4641 	if (resp_v2)
4642 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4643 	else
4644 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4645 
4646 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4647 	    "send MCC update to FW with '%c%c' src = %d\n",
4648 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4649 
4650 	ret = iwm_send_cmd(sc, &hcmd);
4651 	if (ret)
4652 		return ret;
4653 
4654 #ifdef IWM_DEBUG
4655 	pkt = hcmd.resp_pkt;
4656 
4657 	/* Extract MCC response */
4658 	if (resp_v2) {
4659 		mcc_resp = (void *)pkt->data;
4660 		mcc = mcc_resp->mcc;
4661 		n_channels =  le32toh(mcc_resp->n_channels);
4662 	} else {
4663 		mcc_resp_v1 = (void *)pkt->data;
4664 		mcc = mcc_resp_v1->mcc;
4665 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4666 	}
4667 
4668 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4669 	if (mcc == 0)
4670 		mcc = 0x3030;  /* "00" - world */
4671 
4672 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4673 	    "regulatory domain '%c%c' (%d channels available)\n",
4674 	    mcc >> 8, mcc & 0xff, n_channels);
4675 #endif
4676 	iwm_free_resp(sc, &hcmd);
4677 
4678 	return 0;
4679 }
4680 
4681 static void
4682 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4683 {
4684 	struct iwm_host_cmd cmd = {
4685 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4686 		.len = { sizeof(uint32_t), },
4687 		.data = { &backoff, },
4688 	};
4689 
4690 	if (iwm_send_cmd(sc, &cmd) != 0) {
4691 		device_printf(sc->sc_dev,
4692 		    "failed to change thermal tx backoff\n");
4693 	}
4694 }
4695 
4696 static int
4697 iwm_init_hw(struct iwm_softc *sc)
4698 {
4699 	struct ieee80211com *ic = &sc->sc_ic;
4700 	int error, i, ac;
4701 
4702 	sc->sf_state = IWM_SF_UNINIT;
4703 
4704 	if ((error = iwm_start_hw(sc)) != 0) {
4705 		printf("iwm_start_hw: failed %d\n", error);
4706 		return error;
4707 	}
4708 
4709 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4710 		printf("iwm_run_init_ucode: failed %d\n", error);
4711 		return error;
4712 	}
4713 
4714 	/*
4715 	 * should stop and start HW since that INIT
4716 	 * image just loaded
4717 	 */
4718 	iwm_stop_device(sc);
4719 	sc->sc_ps_disabled = FALSE;
4720 	if ((error = iwm_start_hw(sc)) != 0) {
4721 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4722 		return error;
4723 	}
4724 
4725 	/* omstart, this time with the regular firmware */
4726 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4727 	if (error) {
4728 		device_printf(sc->sc_dev, "could not load firmware\n");
4729 		goto error;
4730 	}
4731 
4732 	error = iwm_sf_update(sc, NULL, FALSE);
4733 	if (error)
4734 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4735 
4736 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4737 		device_printf(sc->sc_dev, "bt init conf failed\n");
4738 		goto error;
4739 	}
4740 
4741 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4742 	if (error != 0) {
4743 		device_printf(sc->sc_dev, "antenna config failed\n");
4744 		goto error;
4745 	}
4746 
4747 	/* Send phy db control command and then phy db calibration */
4748 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4749 		goto error;
4750 
4751 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4752 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4753 		goto error;
4754 	}
4755 
4756 	/* Add auxiliary station for scanning */
4757 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4758 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4759 		goto error;
4760 	}
4761 
4762 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4763 		/*
4764 		 * The channel used here isn't relevant as it's
4765 		 * going to be overwritten in the other flows.
4766 		 * For now use the first channel we have.
4767 		 */
4768 		if ((error = iwm_phy_ctxt_add(sc,
4769 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4770 			goto error;
4771 	}
4772 
4773 	/* Initialize tx backoffs to the minimum. */
4774 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4775 		iwm_tt_tx_backoff(sc, 0);
4776 
4777 	if (iwm_config_ltr(sc) != 0)
4778 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4779 
4780 	error = iwm_power_update_device(sc);
4781 	if (error)
4782 		goto error;
4783 
4784 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4785 		goto error;
4786 
4787 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4788 		if ((error = iwm_config_umac_scan(sc)) != 0)
4789 			goto error;
4790 	}
4791 
4792 	/* Enable Tx queues. */
4793 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4794 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4795 		    iwm_ac_to_tx_fifo[ac]);
4796 		if (error)
4797 			goto error;
4798 	}
4799 
4800 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4801 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4802 		goto error;
4803 	}
4804 
4805 	return 0;
4806 
4807  error:
4808 	iwm_stop_device(sc);
4809 	return error;
4810 }
4811 
4812 /* Allow multicast from our BSSID. */
4813 static int
4814 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4815 {
4816 	struct ieee80211_node *ni = vap->iv_bss;
4817 	struct iwm_mcast_filter_cmd *cmd;
4818 	size_t size;
4819 	int error;
4820 
4821 	size = roundup(sizeof(*cmd), 4);
4822 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4823 	if (cmd == NULL)
4824 		return ENOMEM;
4825 	cmd->filter_own = 1;
4826 	cmd->port_id = 0;
4827 	cmd->count = 0;
4828 	cmd->pass_all = 1;
4829 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4830 
4831 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4832 	    IWM_CMD_SYNC, size, cmd);
4833 	free(cmd, M_DEVBUF);
4834 
4835 	return (error);
4836 }
4837 
4838 /*
4839  * ifnet interfaces
4840  */
4841 
4842 static void
4843 iwm_init(struct iwm_softc *sc)
4844 {
4845 	int error;
4846 
4847 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4848 		return;
4849 	}
4850 	sc->sc_generation++;
4851 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4852 
4853 	if ((error = iwm_init_hw(sc)) != 0) {
4854 		printf("iwm_init_hw failed %d\n", error);
4855 		iwm_stop(sc);
4856 		return;
4857 	}
4858 
4859 	/*
4860 	 * Ok, firmware loaded and we are jogging
4861 	 */
4862 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4863 }
4864 
4865 static int
4866 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4867 {
4868 	struct iwm_softc *sc;
4869 	int error;
4870 
4871 	sc = ic->ic_softc;
4872 
4873 	IWM_LOCK(sc);
4874 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4875 		IWM_UNLOCK(sc);
4876 		return (ENXIO);
4877 	}
4878 	error = mbufq_enqueue(&sc->sc_snd, m);
4879 	if (error) {
4880 		IWM_UNLOCK(sc);
4881 		return (error);
4882 	}
4883 	iwm_start(sc);
4884 	IWM_UNLOCK(sc);
4885 	return (0);
4886 }
4887 
4888 /*
4889  * Dequeue packets from sendq and call send.
4890  */
4891 static void
4892 iwm_start(struct iwm_softc *sc)
4893 {
4894 	struct ieee80211_node *ni;
4895 	struct mbuf *m;
4896 	int ac = 0;
4897 
4898 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4899 	while (sc->qfullmsk == 0 &&
4900 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4901 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4902 		if (iwm_tx(sc, m, ni, ac) != 0) {
4903 			if_inc_counter(ni->ni_vap->iv_ifp,
4904 			    IFCOUNTER_OERRORS, 1);
4905 			ieee80211_free_node(ni);
4906 			continue;
4907 		}
4908 		if (sc->sc_tx_timer == 0) {
4909 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4910 			    sc);
4911 		}
4912 		sc->sc_tx_timer = 15;
4913 	}
4914 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4915 }
4916 
4917 static void
4918 iwm_stop(struct iwm_softc *sc)
4919 {
4920 
4921 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4922 	sc->sc_flags |= IWM_FLAG_STOPPED;
4923 	sc->sc_generation++;
4924 	iwm_led_blink_stop(sc);
4925 	sc->sc_tx_timer = 0;
4926 	iwm_stop_device(sc);
4927 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4928 }
4929 
4930 static void
4931 iwm_watchdog(void *arg)
4932 {
4933 	struct iwm_softc *sc = arg;
4934 	struct ieee80211com *ic = &sc->sc_ic;
4935 
4936 	if (sc->sc_attached == 0)
4937 		return;
4938 
4939 	if (sc->sc_tx_timer > 0) {
4940 		if (--sc->sc_tx_timer == 0) {
4941 			device_printf(sc->sc_dev, "device timeout\n");
4942 #ifdef IWM_DEBUG
4943 			iwm_nic_error(sc);
4944 #endif
4945 			ieee80211_restart_all(ic);
4946 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4947 			return;
4948 		}
4949 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4950 	}
4951 }
4952 
4953 static void
4954 iwm_parent(struct ieee80211com *ic)
4955 {
4956 	struct iwm_softc *sc = ic->ic_softc;
4957 	int startall = 0;
4958 	int rfkill = 0;
4959 
4960 	IWM_LOCK(sc);
4961 	if (ic->ic_nrunning > 0) {
4962 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4963 			iwm_init(sc);
4964 			rfkill = iwm_check_rfkill(sc);
4965 			if (!rfkill)
4966 				startall = 1;
4967 		}
4968 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4969 		iwm_stop(sc);
4970 	IWM_UNLOCK(sc);
4971 	if (startall)
4972 		ieee80211_start_all(ic);
4973 	else if (rfkill)
4974 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
4975 }
4976 
4977 static void
4978 iwm_rftoggle_task(void *arg, int npending __unused)
4979 {
4980 	struct iwm_softc *sc = arg;
4981 	struct ieee80211com *ic = &sc->sc_ic;
4982 	int rfkill;
4983 
4984 	IWM_LOCK(sc);
4985 	rfkill = iwm_check_rfkill(sc);
4986 	IWM_UNLOCK(sc);
4987 	if (rfkill) {
4988 		device_printf(sc->sc_dev,
4989 		    "%s: rfkill switch, disabling interface\n", __func__);
4990 		ieee80211_suspend_all(ic);
4991 		ieee80211_notify_radio(ic, 0);
4992 	} else {
4993 		device_printf(sc->sc_dev,
4994 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
4995 		ieee80211_resume_all(ic);
4996 		ieee80211_notify_radio(ic, 1);
4997 	}
4998 }
4999 
5000 /*
5001  * The interrupt side of things
5002  */
5003 
5004 /*
5005  * error dumping routines are from iwlwifi/mvm/utils.c
5006  */
5007 
5008 /*
5009  * Note: This structure is read from the device with IO accesses,
5010  * and the reading already does the endian conversion. As it is
5011  * read with uint32_t-sized accesses, any members with a different size
5012  * need to be ordered correctly though!
5013  */
5014 struct iwm_error_event_table {
5015 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5016 	uint32_t error_id;		/* type of error */
5017 	uint32_t trm_hw_status0;	/* TRM HW status */
5018 	uint32_t trm_hw_status1;	/* TRM HW status */
5019 	uint32_t blink2;		/* branch link */
5020 	uint32_t ilink1;		/* interrupt link */
5021 	uint32_t ilink2;		/* interrupt link */
5022 	uint32_t data1;		/* error-specific data */
5023 	uint32_t data2;		/* error-specific data */
5024 	uint32_t data3;		/* error-specific data */
5025 	uint32_t bcon_time;		/* beacon timer */
5026 	uint32_t tsf_low;		/* network timestamp function timer */
5027 	uint32_t tsf_hi;		/* network timestamp function timer */
5028 	uint32_t gp1;		/* GP1 timer register */
5029 	uint32_t gp2;		/* GP2 timer register */
5030 	uint32_t fw_rev_type;	/* firmware revision type */
5031 	uint32_t major;		/* uCode version major */
5032 	uint32_t minor;		/* uCode version minor */
5033 	uint32_t hw_ver;		/* HW Silicon version */
5034 	uint32_t brd_ver;		/* HW board version */
5035 	uint32_t log_pc;		/* log program counter */
5036 	uint32_t frame_ptr;		/* frame pointer */
5037 	uint32_t stack_ptr;		/* stack pointer */
5038 	uint32_t hcmd;		/* last host command header */
5039 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5040 				 * rxtx_flag */
5041 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5042 				 * host_flag */
5043 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5044 				 * enc_flag */
5045 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5046 				 * time_flag */
5047 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5048 				 * wico interrupt */
5049 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5050 	uint32_t wait_event;		/* wait event() caller address */
5051 	uint32_t l2p_control;	/* L2pControlField */
5052 	uint32_t l2p_duration;	/* L2pDurationField */
5053 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5054 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5055 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5056 				 * (LMPM_PMG_SEL) */
5057 	uint32_t u_timestamp;	/* indicate when the date and time of the
5058 				 * compilation */
5059 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5060 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5061 
5062 /*
5063  * UMAC error struct - relevant starting from family 8000 chip.
5064  * Note: This structure is read from the device with IO accesses,
5065  * and the reading already does the endian conversion. As it is
5066  * read with u32-sized accesses, any members with a different size
5067  * need to be ordered correctly though!
5068  */
5069 struct iwm_umac_error_event_table {
5070 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5071 	uint32_t error_id;	/* type of error */
5072 	uint32_t blink1;	/* branch link */
5073 	uint32_t blink2;	/* branch link */
5074 	uint32_t ilink1;	/* interrupt link */
5075 	uint32_t ilink2;	/* interrupt link */
5076 	uint32_t data1;		/* error-specific data */
5077 	uint32_t data2;		/* error-specific data */
5078 	uint32_t data3;		/* error-specific data */
5079 	uint32_t umac_major;
5080 	uint32_t umac_minor;
5081 	uint32_t frame_pointer;	/* core register 27*/
5082 	uint32_t stack_pointer;	/* core register 28 */
5083 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5084 	uint32_t nic_isr_pref;	/* ISR status register */
5085 } __packed;
5086 
5087 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5088 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5089 
5090 #ifdef IWM_DEBUG
5091 struct {
5092 	const char *name;
5093 	uint8_t num;
5094 } advanced_lookup[] = {
5095 	{ "NMI_INTERRUPT_WDG", 0x34 },
5096 	{ "SYSASSERT", 0x35 },
5097 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5098 	{ "BAD_COMMAND", 0x38 },
5099 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5100 	{ "FATAL_ERROR", 0x3D },
5101 	{ "NMI_TRM_HW_ERR", 0x46 },
5102 	{ "NMI_INTERRUPT_TRM", 0x4C },
5103 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5104 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5105 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5106 	{ "NMI_INTERRUPT_HOST", 0x66 },
5107 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5108 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5109 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5110 	{ "ADVANCED_SYSASSERT", 0 },
5111 };
5112 
5113 static const char *
5114 iwm_desc_lookup(uint32_t num)
5115 {
5116 	int i;
5117 
5118 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5119 		if (advanced_lookup[i].num == num)
5120 			return advanced_lookup[i].name;
5121 
5122 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5123 	return advanced_lookup[i].name;
5124 }
5125 
5126 static void
5127 iwm_nic_umac_error(struct iwm_softc *sc)
5128 {
5129 	struct iwm_umac_error_event_table table;
5130 	uint32_t base;
5131 
5132 	base = sc->umac_error_event_table;
5133 
5134 	if (base < 0x800000) {
5135 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5136 		    base);
5137 		return;
5138 	}
5139 
5140 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5141 		device_printf(sc->sc_dev, "reading errlog failed\n");
5142 		return;
5143 	}
5144 
5145 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5146 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5147 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5148 		    sc->sc_flags, table.valid);
5149 	}
5150 
5151 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5152 		iwm_desc_lookup(table.error_id));
5153 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5154 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5155 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5156 	    table.ilink1);
5157 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5158 	    table.ilink2);
5159 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5160 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5161 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5162 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5163 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5164 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5165 	    table.frame_pointer);
5166 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5167 	    table.stack_pointer);
5168 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5169 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5170 	    table.nic_isr_pref);
5171 }
5172 
5173 /*
5174  * Support for dumping the error log seemed like a good idea ...
5175  * but it's mostly hex junk and the only sensible thing is the
5176  * hw/ucode revision (which we know anyway).  Since it's here,
5177  * I'll just leave it in, just in case e.g. the Intel guys want to
5178  * help us decipher some "ADVANCED_SYSASSERT" later.
5179  */
5180 static void
5181 iwm_nic_error(struct iwm_softc *sc)
5182 {
5183 	struct iwm_error_event_table table;
5184 	uint32_t base;
5185 
5186 	device_printf(sc->sc_dev, "dumping device error log\n");
5187 	base = sc->error_event_table[0];
5188 	if (base < 0x800000) {
5189 		device_printf(sc->sc_dev,
5190 		    "Invalid error log pointer 0x%08x\n", base);
5191 		return;
5192 	}
5193 
5194 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5195 		device_printf(sc->sc_dev, "reading errlog failed\n");
5196 		return;
5197 	}
5198 
5199 	if (!table.valid) {
5200 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5201 		return;
5202 	}
5203 
5204 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5205 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5206 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5207 		    sc->sc_flags, table.valid);
5208 	}
5209 
5210 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5211 	    iwm_desc_lookup(table.error_id));
5212 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5213 	    table.trm_hw_status0);
5214 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5215 	    table.trm_hw_status1);
5216 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5217 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5218 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5219 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5220 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5221 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5222 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5223 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5224 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5225 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5226 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5227 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5228 	    table.fw_rev_type);
5229 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5230 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5231 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5232 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5233 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5234 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5235 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5236 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5237 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5238 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5239 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5240 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5241 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5242 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5243 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5244 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5245 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5246 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5247 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5248 
5249 	if (sc->umac_error_event_table)
5250 		iwm_nic_umac_error(sc);
5251 }
5252 #endif
5253 
5254 static void
5255 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5256 {
5257 	struct ieee80211com *ic = &sc->sc_ic;
5258 	struct iwm_cmd_response *cresp;
5259 	struct mbuf *m1;
5260 	uint32_t offset = 0;
5261 	uint32_t maxoff = IWM_RBUF_SIZE;
5262 	uint32_t nextoff;
5263 	boolean_t stolen = FALSE;
5264 
5265 #define HAVEROOM(a)	\
5266     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5267 
5268 	while (HAVEROOM(offset)) {
5269 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5270 		    offset);
5271 		int qid, idx, code, len;
5272 
5273 		qid = pkt->hdr.qid;
5274 		idx = pkt->hdr.idx;
5275 
5276 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5277 
5278 		/*
5279 		 * randomly get these from the firmware, no idea why.
5280 		 * they at least seem harmless, so just ignore them for now
5281 		 */
5282 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5283 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5284 			break;
5285 		}
5286 
5287 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5288 		    "rx packet qid=%d idx=%d type=%x\n",
5289 		    qid & ~0x80, pkt->hdr.idx, code);
5290 
5291 		len = iwm_rx_packet_len(pkt);
5292 		len += sizeof(uint32_t); /* account for status word */
5293 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5294 
5295 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5296 
5297 		switch (code) {
5298 		case IWM_REPLY_RX_PHY_CMD:
5299 			iwm_rx_rx_phy_cmd(sc, pkt);
5300 			break;
5301 
5302 		case IWM_REPLY_RX_MPDU_CMD: {
5303 			/*
5304 			 * If this is the last frame in the RX buffer, we
5305 			 * can directly feed the mbuf to the sharks here.
5306 			 */
5307 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5308 			    struct iwm_rx_packet *, nextoff);
5309 			if (!HAVEROOM(nextoff) ||
5310 			    (nextpkt->hdr.code == 0 &&
5311 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5312 			     nextpkt->hdr.idx == 0) ||
5313 			    (nextpkt->len_n_flags ==
5314 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5315 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5316 					stolen = FALSE;
5317 					/* Make sure we abort the loop */
5318 					nextoff = maxoff;
5319 				}
5320 				break;
5321 			}
5322 
5323 			/*
5324 			 * Use m_copym instead of m_split, because that
5325 			 * makes it easier to keep a valid rx buffer in
5326 			 * the ring, when iwm_rx_mpdu() fails.
5327 			 *
5328 			 * We need to start m_copym() at offset 0, to get the
5329 			 * M_PKTHDR flag preserved.
5330 			 */
5331 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5332 			if (m1) {
5333 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5334 					stolen = TRUE;
5335 				else
5336 					m_freem(m1);
5337 			}
5338 			break;
5339 		}
5340 
5341 		case IWM_TX_CMD:
5342 			iwm_rx_tx_cmd(sc, pkt);
5343 			break;
5344 
5345 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5346 			struct iwm_missed_beacons_notif *resp;
5347 			int missed;
5348 
5349 			/* XXX look at mac_id to determine interface ID */
5350 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5351 
5352 			resp = (void *)pkt->data;
5353 			missed = le32toh(resp->consec_missed_beacons);
5354 
5355 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5356 			    "%s: MISSED_BEACON: mac_id=%d, "
5357 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5358 			    "num_rx=%d\n",
5359 			    __func__,
5360 			    le32toh(resp->mac_id),
5361 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5362 			    le32toh(resp->consec_missed_beacons),
5363 			    le32toh(resp->num_expected_beacons),
5364 			    le32toh(resp->num_recvd_beacons));
5365 
5366 			/* Be paranoid */
5367 			if (vap == NULL)
5368 				break;
5369 
5370 			/* XXX no net80211 locking? */
5371 			if (vap->iv_state == IEEE80211_S_RUN &&
5372 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5373 				if (missed > vap->iv_bmissthreshold) {
5374 					/* XXX bad locking; turn into task */
5375 					IWM_UNLOCK(sc);
5376 					ieee80211_beacon_miss(ic);
5377 					IWM_LOCK(sc);
5378 				}
5379 			}
5380 
5381 			break;
5382 		}
5383 
5384 		case IWM_MFUART_LOAD_NOTIFICATION:
5385 			break;
5386 
5387 		case IWM_ALIVE:
5388 			break;
5389 
5390 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5391 			break;
5392 
5393 		case IWM_STATISTICS_NOTIFICATION:
5394 			iwm_handle_rx_statistics(sc, pkt);
5395 			break;
5396 
5397 		case IWM_NVM_ACCESS_CMD:
5398 		case IWM_MCC_UPDATE_CMD:
5399 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5400 				memcpy(sc->sc_cmd_resp,
5401 				    pkt, sizeof(sc->sc_cmd_resp));
5402 			}
5403 			break;
5404 
5405 		case IWM_MCC_CHUB_UPDATE_CMD: {
5406 			struct iwm_mcc_chub_notif *notif;
5407 			notif = (void *)pkt->data;
5408 
5409 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5410 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5411 			sc->sc_fw_mcc[2] = '\0';
5412 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5413 			    "fw source %d sent CC '%s'\n",
5414 			    notif->source_id, sc->sc_fw_mcc);
5415 			break;
5416 		}
5417 
5418 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5419 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5420 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5421 			struct iwm_dts_measurement_notif_v1 *notif;
5422 
5423 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5424 				device_printf(sc->sc_dev,
5425 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5426 				break;
5427 			}
5428 			notif = (void *)pkt->data;
5429 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5430 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5431 			    notif->temp);
5432 			break;
5433 		}
5434 
5435 		case IWM_PHY_CONFIGURATION_CMD:
5436 		case IWM_TX_ANT_CONFIGURATION_CMD:
5437 		case IWM_ADD_STA:
5438 		case IWM_MAC_CONTEXT_CMD:
5439 		case IWM_REPLY_SF_CFG_CMD:
5440 		case IWM_POWER_TABLE_CMD:
5441 		case IWM_LTR_CONFIG:
5442 		case IWM_PHY_CONTEXT_CMD:
5443 		case IWM_BINDING_CONTEXT_CMD:
5444 		case IWM_TIME_EVENT_CMD:
5445 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5446 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5447 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5448 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5449 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5450 		case IWM_REPLY_BEACON_FILTERING_CMD:
5451 		case IWM_MAC_PM_POWER_TABLE:
5452 		case IWM_TIME_QUOTA_CMD:
5453 		case IWM_REMOVE_STA:
5454 		case IWM_TXPATH_FLUSH:
5455 		case IWM_LQ_CMD:
5456 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5457 				 IWM_FW_PAGING_BLOCK_CMD):
5458 		case IWM_BT_CONFIG:
5459 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5460 			cresp = (void *)pkt->data;
5461 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5462 				memcpy(sc->sc_cmd_resp,
5463 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5464 			}
5465 			break;
5466 
5467 		/* ignore */
5468 		case IWM_PHY_DB_CMD:
5469 			break;
5470 
5471 		case IWM_INIT_COMPLETE_NOTIF:
5472 			break;
5473 
5474 		case IWM_SCAN_OFFLOAD_COMPLETE:
5475 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5476 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5477 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5478 				ieee80211_runtask(ic, &sc->sc_es_task);
5479 			}
5480 			break;
5481 
5482 		case IWM_SCAN_ITERATION_COMPLETE: {
5483 			struct iwm_lmac_scan_complete_notif *notif;
5484 			notif = (void *)pkt->data;
5485 			break;
5486 		}
5487 
5488 		case IWM_SCAN_COMPLETE_UMAC:
5489 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5490 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5491 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5492 				ieee80211_runtask(ic, &sc->sc_es_task);
5493 			}
5494 			break;
5495 
5496 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5497 			struct iwm_umac_scan_iter_complete_notif *notif;
5498 			notif = (void *)pkt->data;
5499 
5500 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5501 			    "complete, status=0x%x, %d channels scanned\n",
5502 			    notif->status, notif->scanned_channels);
5503 			break;
5504 		}
5505 
5506 		case IWM_REPLY_ERROR: {
5507 			struct iwm_error_resp *resp;
5508 			resp = (void *)pkt->data;
5509 
5510 			device_printf(sc->sc_dev,
5511 			    "firmware error 0x%x, cmd 0x%x\n",
5512 			    le32toh(resp->error_type),
5513 			    resp->cmd_id);
5514 			break;
5515 		}
5516 
5517 		case IWM_TIME_EVENT_NOTIFICATION:
5518 			iwm_rx_time_event_notif(sc, pkt);
5519 			break;
5520 
5521 		/*
5522 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5523 		 * messages. Just ignore them for now.
5524 		 */
5525 		case IWM_DEBUG_LOG_MSG:
5526 			break;
5527 
5528 		case IWM_MCAST_FILTER_CMD:
5529 			break;
5530 
5531 		case IWM_SCD_QUEUE_CFG: {
5532 			struct iwm_scd_txq_cfg_rsp *rsp;
5533 			rsp = (void *)pkt->data;
5534 
5535 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5536 			    "queue cfg token=0x%x sta_id=%d "
5537 			    "tid=%d scd_queue=%d\n",
5538 			    rsp->token, rsp->sta_id, rsp->tid,
5539 			    rsp->scd_queue);
5540 			break;
5541 		}
5542 
5543 		default:
5544 			device_printf(sc->sc_dev,
5545 			    "code %x, frame %d/%d %x unhandled\n",
5546 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5547 			break;
5548 		}
5549 
5550 		/*
5551 		 * Why test bit 0x80?  The Linux driver:
5552 		 *
5553 		 * There is one exception:  uCode sets bit 15 when it
5554 		 * originates the response/notification, i.e. when the
5555 		 * response/notification is not a direct response to a
5556 		 * command sent by the driver.  For example, uCode issues
5557 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5558 		 * it is not a direct response to any driver command.
5559 		 *
5560 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5561 		 * uses a slightly different format for pkt->hdr, and "qid"
5562 		 * is actually the upper byte of a two-byte field.
5563 		 */
5564 		if (!(qid & (1 << 7)))
5565 			iwm_cmd_done(sc, pkt);
5566 
5567 		offset = nextoff;
5568 	}
5569 	if (stolen)
5570 		m_freem(m);
5571 #undef HAVEROOM
5572 }
5573 
5574 /*
5575  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5576  * Basic structure from if_iwn
5577  */
5578 static void
5579 iwm_notif_intr(struct iwm_softc *sc)
5580 {
5581 	int count;
5582 	uint32_t wreg;
5583 	uint16_t hw;
5584 
5585 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5586 	    BUS_DMASYNC_POSTREAD);
5587 
5588 	if (sc->cfg->mqrx_supported) {
5589 		count = IWM_RX_MQ_RING_COUNT;
5590 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5591 	} else {
5592 		count = IWM_RX_LEGACY_RING_COUNT;
5593 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5594 	}
5595 
5596 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5597 
5598 	/*
5599 	 * Process responses
5600 	 */
5601 	while (sc->rxq.cur != hw) {
5602 		struct iwm_rx_ring *ring = &sc->rxq;
5603 		struct iwm_rx_data *data = &ring->data[ring->cur];
5604 
5605 		bus_dmamap_sync(ring->data_dmat, data->map,
5606 		    BUS_DMASYNC_POSTREAD);
5607 
5608 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5609 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5610 		iwm_handle_rxb(sc, data->m);
5611 
5612 		ring->cur = (ring->cur + 1) % count;
5613 	}
5614 
5615 	/*
5616 	 * Tell the firmware that it can reuse the ring entries that
5617 	 * we have just processed.
5618 	 * Seems like the hardware gets upset unless we align
5619 	 * the write by 8??
5620 	 */
5621 	hw = (hw == 0) ? count - 1 : hw - 1;
5622 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5623 }
5624 
5625 static void
5626 iwm_intr(void *arg)
5627 {
5628 	struct iwm_softc *sc = arg;
5629 	int handled = 0;
5630 	int r1, r2, rv = 0;
5631 	int isperiodic = 0;
5632 
5633 	IWM_LOCK(sc);
5634 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5635 
5636 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5637 		uint32_t *ict = sc->ict_dma.vaddr;
5638 		int tmp;
5639 
5640 		tmp = htole32(ict[sc->ict_cur]);
5641 		if (!tmp)
5642 			goto out_ena;
5643 
5644 		/*
5645 		 * ok, there was something.  keep plowing until we have all.
5646 		 */
5647 		r1 = r2 = 0;
5648 		while (tmp) {
5649 			r1 |= tmp;
5650 			ict[sc->ict_cur] = 0;
5651 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5652 			tmp = htole32(ict[sc->ict_cur]);
5653 		}
5654 
5655 		/* this is where the fun begins.  don't ask */
5656 		if (r1 == 0xffffffff)
5657 			r1 = 0;
5658 
5659 		/* i am not expected to understand this */
5660 		if (r1 & 0xc0000)
5661 			r1 |= 0x8000;
5662 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5663 	} else {
5664 		r1 = IWM_READ(sc, IWM_CSR_INT);
5665 		/* "hardware gone" (where, fishing?) */
5666 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5667 			goto out;
5668 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5669 	}
5670 	if (r1 == 0 && r2 == 0) {
5671 		goto out_ena;
5672 	}
5673 
5674 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5675 
5676 	/* Safely ignore these bits for debug checks below */
5677 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5678 
5679 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5680 		int i;
5681 		struct ieee80211com *ic = &sc->sc_ic;
5682 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5683 
5684 #ifdef IWM_DEBUG
5685 		iwm_nic_error(sc);
5686 #endif
5687 		/* Dump driver status (TX and RX rings) while we're here. */
5688 		device_printf(sc->sc_dev, "driver status:\n");
5689 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5690 			struct iwm_tx_ring *ring = &sc->txq[i];
5691 			device_printf(sc->sc_dev,
5692 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5693 			    "queued=%-3d\n",
5694 			    i, ring->qid, ring->cur, ring->queued);
5695 		}
5696 		device_printf(sc->sc_dev,
5697 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5698 		device_printf(sc->sc_dev,
5699 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5700 
5701 		/* Reset our firmware state tracking. */
5702 		sc->sc_firmware_state = 0;
5703 		/* Don't stop the device; just do a VAP restart */
5704 		IWM_UNLOCK(sc);
5705 
5706 		if (vap == NULL) {
5707 			printf("%s: null vap\n", __func__);
5708 			return;
5709 		}
5710 
5711 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5712 		    "restarting\n", __func__, vap->iv_state);
5713 
5714 		ieee80211_restart_all(ic);
5715 		return;
5716 	}
5717 
5718 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5719 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5720 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5721 		iwm_stop(sc);
5722 		rv = 1;
5723 		goto out;
5724 	}
5725 
5726 	/* firmware chunk loaded */
5727 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5728 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5729 		handled |= IWM_CSR_INT_BIT_FH_TX;
5730 		sc->sc_fw_chunk_done = 1;
5731 		wakeup(&sc->sc_fw);
5732 	}
5733 
5734 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5735 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5736 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5737 	}
5738 
5739 	/*
5740 	 * The Linux driver uses periodic interrupts to avoid races.
5741 	 * We cargo-cult like it's going out of fashion.
5742 	 */
5743 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5744 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5745 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5746 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5747 			IWM_WRITE_1(sc,
5748 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5749 		isperiodic = 1;
5750 	}
5751 
5752 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5753 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5754 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5755 
5756 		iwm_notif_intr(sc);
5757 
5758 		/* enable periodic interrupt, see above */
5759 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5760 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5761 			    IWM_CSR_INT_PERIODIC_ENA);
5762 	}
5763 
5764 	if (__predict_false(r1 & ~handled))
5765 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5766 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5767 	rv = 1;
5768 
5769  out_ena:
5770 	iwm_restore_interrupts(sc);
5771  out:
5772 	IWM_UNLOCK(sc);
5773 	return;
5774 }
5775 
5776 /*
5777  * Autoconf glue-sniffing
5778  */
5779 #define	PCI_VENDOR_INTEL		0x8086
5780 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5781 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5782 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5783 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5784 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5785 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5786 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5787 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5788 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5789 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5790 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5791 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5792 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5793 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5794 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5795 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5796 
5797 static const struct iwm_devices {
5798 	uint16_t		device;
5799 	const struct iwm_cfg	*cfg;
5800 } iwm_devices[] = {
5801 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5802 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5803 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5804 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5805 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5806 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5807 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5808 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5809 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5810 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5811 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5812 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5813 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5814 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5815 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5816 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5817 };
5818 
5819 static int
5820 iwm_probe(device_t dev)
5821 {
5822 	int i;
5823 
5824 	for (i = 0; i < nitems(iwm_devices); i++) {
5825 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5826 		    pci_get_device(dev) == iwm_devices[i].device) {
5827 			device_set_desc(dev, iwm_devices[i].cfg->name);
5828 			return (BUS_PROBE_DEFAULT);
5829 		}
5830 	}
5831 
5832 	return (ENXIO);
5833 }
5834 
5835 static int
5836 iwm_dev_check(device_t dev)
5837 {
5838 	struct iwm_softc *sc;
5839 	uint16_t devid;
5840 	int i;
5841 
5842 	sc = device_get_softc(dev);
5843 
5844 	devid = pci_get_device(dev);
5845 	for (i = 0; i < nitems(iwm_devices); i++) {
5846 		if (iwm_devices[i].device == devid) {
5847 			sc->cfg = iwm_devices[i].cfg;
5848 			return (0);
5849 		}
5850 	}
5851 	device_printf(dev, "unknown adapter type\n");
5852 	return ENXIO;
5853 }
5854 
5855 /* PCI registers */
5856 #define PCI_CFG_RETRY_TIMEOUT	0x041
5857 
5858 static int
5859 iwm_pci_attach(device_t dev)
5860 {
5861 	struct iwm_softc *sc;
5862 	int count, error, rid;
5863 	uint16_t reg;
5864 
5865 	sc = device_get_softc(dev);
5866 
5867 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5868 	 * PCI Tx retries from interfering with C3 CPU state */
5869 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5870 
5871 	/* Enable bus-mastering and hardware bug workaround. */
5872 	pci_enable_busmaster(dev);
5873 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5874 	/* if !MSI */
5875 	if (reg & PCIM_STATUS_INTxSTATE) {
5876 		reg &= ~PCIM_STATUS_INTxSTATE;
5877 	}
5878 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5879 
5880 	rid = PCIR_BAR(0);
5881 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5882 	    RF_ACTIVE);
5883 	if (sc->sc_mem == NULL) {
5884 		device_printf(sc->sc_dev, "can't map mem space\n");
5885 		return (ENXIO);
5886 	}
5887 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5888 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5889 
5890 	/* Install interrupt handler. */
5891 	count = 1;
5892 	rid = 0;
5893 	if (pci_alloc_msi(dev, &count) == 0)
5894 		rid = 1;
5895 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5896 	    (rid != 0 ? 0 : RF_SHAREABLE));
5897 	if (sc->sc_irq == NULL) {
5898 		device_printf(dev, "can't map interrupt\n");
5899 			return (ENXIO);
5900 	}
5901 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5902 	    NULL, iwm_intr, sc, &sc->sc_ih);
5903 	if (sc->sc_ih == NULL) {
5904 		device_printf(dev, "can't establish interrupt");
5905 			return (ENXIO);
5906 	}
5907 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5908 
5909 	return (0);
5910 }
5911 
5912 static void
5913 iwm_pci_detach(device_t dev)
5914 {
5915 	struct iwm_softc *sc = device_get_softc(dev);
5916 
5917 	if (sc->sc_irq != NULL) {
5918 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5919 		bus_release_resource(dev, SYS_RES_IRQ,
5920 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5921 		pci_release_msi(dev);
5922         }
5923 	if (sc->sc_mem != NULL)
5924 		bus_release_resource(dev, SYS_RES_MEMORY,
5925 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5926 }
5927 
5928 static int
5929 iwm_attach(device_t dev)
5930 {
5931 	struct iwm_softc *sc = device_get_softc(dev);
5932 	struct ieee80211com *ic = &sc->sc_ic;
5933 	int error;
5934 	int txq_i, i;
5935 
5936 	sc->sc_dev = dev;
5937 	sc->sc_attached = 1;
5938 	IWM_LOCK_INIT(sc);
5939 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5940 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5941 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5942 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5943 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5944 
5945 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5946 	    taskqueue_thread_enqueue, &sc->sc_tq);
5947 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5948 	if (error != 0) {
5949 		device_printf(dev, "can't start taskq thread, error %d\n",
5950 		    error);
5951 		goto fail;
5952 	}
5953 
5954 	error = iwm_dev_check(dev);
5955 	if (error != 0)
5956 		goto fail;
5957 
5958 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5959 	if (sc->sc_notif_wait == NULL) {
5960 		device_printf(dev, "failed to init notification wait struct\n");
5961 		goto fail;
5962 	}
5963 
5964 	sc->sf_state = IWM_SF_UNINIT;
5965 
5966 	/* Init phy db */
5967 	sc->sc_phy_db = iwm_phy_db_init(sc);
5968 	if (!sc->sc_phy_db) {
5969 		device_printf(dev, "Cannot init phy_db\n");
5970 		goto fail;
5971 	}
5972 
5973 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5974 	sc->last_ebs_successful = TRUE;
5975 
5976 	/* PCI attach */
5977 	error = iwm_pci_attach(dev);
5978 	if (error != 0)
5979 		goto fail;
5980 
5981 	sc->sc_wantresp = -1;
5982 
5983 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5984 	/*
5985 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5986 	 * changed, and now the revision step also includes bit 0-1 (no more
5987 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5988 	 * in the old format.
5989 	 */
5990 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
5991 		int ret;
5992 		uint32_t hw_step;
5993 
5994 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5995 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5996 
5997 		if (iwm_prepare_card_hw(sc) != 0) {
5998 			device_printf(dev, "could not initialize hardware\n");
5999 			goto fail;
6000 		}
6001 
6002 		/*
6003 		 * In order to recognize C step the driver should read the
6004 		 * chip version id located at the AUX bus MISC address.
6005 		 */
6006 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6007 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6008 		DELAY(2);
6009 
6010 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6011 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6012 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6013 				   25000);
6014 		if (!ret) {
6015 			device_printf(sc->sc_dev,
6016 			    "Failed to wake up the nic\n");
6017 			goto fail;
6018 		}
6019 
6020 		if (iwm_nic_lock(sc)) {
6021 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6022 			hw_step |= IWM_ENABLE_WFPM;
6023 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6024 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6025 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6026 			if (hw_step == 0x3)
6027 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6028 						(IWM_SILICON_C_STEP << 2);
6029 			iwm_nic_unlock(sc);
6030 		} else {
6031 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6032 			goto fail;
6033 		}
6034 	}
6035 
6036 	/* special-case 7265D, it has the same PCI IDs. */
6037 	if (sc->cfg == &iwm7265_cfg &&
6038 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6039 		sc->cfg = &iwm7265d_cfg;
6040 	}
6041 
6042 	/* Allocate DMA memory for firmware transfers. */
6043 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6044 		device_printf(dev, "could not allocate memory for firmware\n");
6045 		goto fail;
6046 	}
6047 
6048 	/* Allocate "Keep Warm" page. */
6049 	if ((error = iwm_alloc_kw(sc)) != 0) {
6050 		device_printf(dev, "could not allocate keep warm page\n");
6051 		goto fail;
6052 	}
6053 
6054 	/* We use ICT interrupts */
6055 	if ((error = iwm_alloc_ict(sc)) != 0) {
6056 		device_printf(dev, "could not allocate ICT table\n");
6057 		goto fail;
6058 	}
6059 
6060 	/* Allocate TX scheduler "rings". */
6061 	if ((error = iwm_alloc_sched(sc)) != 0) {
6062 		device_printf(dev, "could not allocate TX scheduler rings\n");
6063 		goto fail;
6064 	}
6065 
6066 	/* Allocate TX rings */
6067 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6068 		if ((error = iwm_alloc_tx_ring(sc,
6069 		    &sc->txq[txq_i], txq_i)) != 0) {
6070 			device_printf(dev,
6071 			    "could not allocate TX ring %d\n",
6072 			    txq_i);
6073 			goto fail;
6074 		}
6075 	}
6076 
6077 	/* Allocate RX ring. */
6078 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6079 		device_printf(dev, "could not allocate RX ring\n");
6080 		goto fail;
6081 	}
6082 
6083 	/* Clear pending interrupts. */
6084 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6085 
6086 	ic->ic_softc = sc;
6087 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6088 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6089 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6090 
6091 	/* Set device capabilities. */
6092 	ic->ic_caps =
6093 	    IEEE80211_C_STA |
6094 	    IEEE80211_C_WPA |		/* WPA/RSN */
6095 	    IEEE80211_C_WME |
6096 	    IEEE80211_C_PMGT |
6097 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6098 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6099 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6100 	    ;
6101 	/* Advertise full-offload scanning */
6102 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6103 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6104 		sc->sc_phyctxt[i].id = i;
6105 		sc->sc_phyctxt[i].color = 0;
6106 		sc->sc_phyctxt[i].ref = 0;
6107 		sc->sc_phyctxt[i].channel = NULL;
6108 	}
6109 
6110 	/* Default noise floor */
6111 	sc->sc_noise = -96;
6112 
6113 	/* Max RSSI */
6114 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6115 
6116 #ifdef IWM_DEBUG
6117 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6118 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6119 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6120 #endif
6121 
6122 	error = iwm_read_firmware(sc);
6123 	if (error) {
6124 		goto fail;
6125 	} else if (sc->sc_fw.fw_fp == NULL) {
6126 		/*
6127 		 * XXX Add a solution for properly deferring firmware load
6128 		 *     during bootup.
6129 		 */
6130 		goto fail;
6131 	} else {
6132 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6133 		sc->sc_preinit_hook.ich_arg = sc;
6134 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6135 			device_printf(dev,
6136 			    "config_intrhook_establish failed\n");
6137 			goto fail;
6138 		}
6139 	}
6140 
6141 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6142 	    "<-%s\n", __func__);
6143 
6144 	return 0;
6145 
6146 	/* Free allocated memory if something failed during attachment. */
6147 fail:
6148 	iwm_detach_local(sc, 0);
6149 
6150 	return ENXIO;
6151 }
6152 
6153 static int
6154 iwm_is_valid_ether_addr(uint8_t *addr)
6155 {
6156 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6157 
6158 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6159 		return (FALSE);
6160 
6161 	return (TRUE);
6162 }
6163 
6164 static int
6165 iwm_wme_update(struct ieee80211com *ic)
6166 {
6167 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6168 	struct iwm_softc *sc = ic->ic_softc;
6169 	struct chanAccParams chp;
6170 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6171 	struct iwm_vap *ivp = IWM_VAP(vap);
6172 	struct iwm_node *in;
6173 	struct wmeParams tmp[WME_NUM_AC];
6174 	int aci, error;
6175 
6176 	if (vap == NULL)
6177 		return (0);
6178 
6179 	ieee80211_wme_ic_getparams(ic, &chp);
6180 
6181 	IEEE80211_LOCK(ic);
6182 	for (aci = 0; aci < WME_NUM_AC; aci++)
6183 		tmp[aci] = chp.cap_wmeParams[aci];
6184 	IEEE80211_UNLOCK(ic);
6185 
6186 	IWM_LOCK(sc);
6187 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6188 		const struct wmeParams *ac = &tmp[aci];
6189 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6190 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6191 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6192 		ivp->queue_params[aci].edca_txop =
6193 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6194 	}
6195 	ivp->have_wme = TRUE;
6196 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6197 		in = IWM_NODE(vap->iv_bss);
6198 		if (in->in_assoc) {
6199 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6200 				device_printf(sc->sc_dev,
6201 				    "%s: failed to update MAC\n", __func__);
6202 			}
6203 		}
6204 	}
6205 	IWM_UNLOCK(sc);
6206 
6207 	return (0);
6208 #undef IWM_EXP2
6209 }
6210 
6211 static void
6212 iwm_preinit(void *arg)
6213 {
6214 	struct iwm_softc *sc = arg;
6215 	device_t dev = sc->sc_dev;
6216 	struct ieee80211com *ic = &sc->sc_ic;
6217 	int error;
6218 
6219 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6220 	    "->%s\n", __func__);
6221 
6222 	IWM_LOCK(sc);
6223 	if ((error = iwm_start_hw(sc)) != 0) {
6224 		device_printf(dev, "could not initialize hardware\n");
6225 		IWM_UNLOCK(sc);
6226 		goto fail;
6227 	}
6228 
6229 	error = iwm_run_init_ucode(sc, 1);
6230 	iwm_stop_device(sc);
6231 	if (error) {
6232 		IWM_UNLOCK(sc);
6233 		goto fail;
6234 	}
6235 	device_printf(dev,
6236 	    "hw rev 0x%x, fw ver %s, address %s\n",
6237 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6238 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6239 
6240 	/* not all hardware can do 5GHz band */
6241 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6242 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6243 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6244 	IWM_UNLOCK(sc);
6245 
6246 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6247 	    ic->ic_channels);
6248 
6249 	/*
6250 	 * At this point we've committed - if we fail to do setup,
6251 	 * we now also have to tear down the net80211 state.
6252 	 */
6253 	ieee80211_ifattach(ic);
6254 	ic->ic_vap_create = iwm_vap_create;
6255 	ic->ic_vap_delete = iwm_vap_delete;
6256 	ic->ic_raw_xmit = iwm_raw_xmit;
6257 	ic->ic_node_alloc = iwm_node_alloc;
6258 	ic->ic_scan_start = iwm_scan_start;
6259 	ic->ic_scan_end = iwm_scan_end;
6260 	ic->ic_update_mcast = iwm_update_mcast;
6261 	ic->ic_getradiocaps = iwm_init_channel_map;
6262 	ic->ic_set_channel = iwm_set_channel;
6263 	ic->ic_scan_curchan = iwm_scan_curchan;
6264 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6265 	ic->ic_wme.wme_update = iwm_wme_update;
6266 	ic->ic_parent = iwm_parent;
6267 	ic->ic_transmit = iwm_transmit;
6268 	iwm_radiotap_attach(sc);
6269 	if (bootverbose)
6270 		ieee80211_announce(ic);
6271 
6272 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6273 	    "<-%s\n", __func__);
6274 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6275 
6276 	return;
6277 fail:
6278 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6279 	iwm_detach_local(sc, 0);
6280 }
6281 
6282 /*
6283  * Attach the interface to 802.11 radiotap.
6284  */
6285 static void
6286 iwm_radiotap_attach(struct iwm_softc *sc)
6287 {
6288         struct ieee80211com *ic = &sc->sc_ic;
6289 
6290 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6291 	    "->%s begin\n", __func__);
6292         ieee80211_radiotap_attach(ic,
6293             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6294                 IWM_TX_RADIOTAP_PRESENT,
6295             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6296                 IWM_RX_RADIOTAP_PRESENT);
6297 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6298 	    "->%s end\n", __func__);
6299 }
6300 
6301 static struct ieee80211vap *
6302 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6303     enum ieee80211_opmode opmode, int flags,
6304     const uint8_t bssid[IEEE80211_ADDR_LEN],
6305     const uint8_t mac[IEEE80211_ADDR_LEN])
6306 {
6307 	struct iwm_vap *ivp;
6308 	struct ieee80211vap *vap;
6309 
6310 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6311 		return NULL;
6312 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6313 	vap = &ivp->iv_vap;
6314 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6315 	vap->iv_bmissthreshold = 10;            /* override default */
6316 	/* Override with driver methods. */
6317 	ivp->iv_newstate = vap->iv_newstate;
6318 	vap->iv_newstate = iwm_newstate;
6319 
6320 	ivp->id = IWM_DEFAULT_MACID;
6321 	ivp->color = IWM_DEFAULT_COLOR;
6322 
6323 	ivp->have_wme = FALSE;
6324 	ivp->ps_disabled = FALSE;
6325 
6326 	ieee80211_ratectl_init(vap);
6327 	/* Complete setup. */
6328 	ieee80211_vap_attach(vap, ieee80211_media_change,
6329 	    ieee80211_media_status, mac);
6330 	ic->ic_opmode = opmode;
6331 
6332 	return vap;
6333 }
6334 
6335 static void
6336 iwm_vap_delete(struct ieee80211vap *vap)
6337 {
6338 	struct iwm_vap *ivp = IWM_VAP(vap);
6339 
6340 	ieee80211_ratectl_deinit(vap);
6341 	ieee80211_vap_detach(vap);
6342 	free(ivp, M_80211_VAP);
6343 }
6344 
6345 static void
6346 iwm_xmit_queue_drain(struct iwm_softc *sc)
6347 {
6348 	struct mbuf *m;
6349 	struct ieee80211_node *ni;
6350 
6351 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6352 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6353 		ieee80211_free_node(ni);
6354 		m_freem(m);
6355 	}
6356 }
6357 
6358 static void
6359 iwm_scan_start(struct ieee80211com *ic)
6360 {
6361 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6362 	struct iwm_softc *sc = ic->ic_softc;
6363 	int error;
6364 
6365 	IWM_LOCK(sc);
6366 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6367 		/* This should not be possible */
6368 		device_printf(sc->sc_dev,
6369 		    "%s: Previous scan not completed yet\n", __func__);
6370 	}
6371 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6372 		error = iwm_umac_scan(sc);
6373 	else
6374 		error = iwm_lmac_scan(sc);
6375 	if (error != 0) {
6376 		device_printf(sc->sc_dev, "could not initiate scan\n");
6377 		IWM_UNLOCK(sc);
6378 		ieee80211_cancel_scan(vap);
6379 	} else {
6380 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6381 		iwm_led_blink_start(sc);
6382 		IWM_UNLOCK(sc);
6383 	}
6384 }
6385 
6386 static void
6387 iwm_scan_end(struct ieee80211com *ic)
6388 {
6389 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6390 	struct iwm_softc *sc = ic->ic_softc;
6391 
6392 	IWM_LOCK(sc);
6393 	iwm_led_blink_stop(sc);
6394 	if (vap->iv_state == IEEE80211_S_RUN)
6395 		iwm_led_enable(sc);
6396 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6397 		/*
6398 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6399 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6400 		 * taskqueue.
6401 		 */
6402 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6403 		iwm_scan_stop_wait(sc);
6404 	}
6405 	IWM_UNLOCK(sc);
6406 
6407 	/*
6408 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6409 	 * This is to make sure that it won't call ieee80211_scan_done
6410 	 * when we have already started the next scan.
6411 	 */
6412 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6413 }
6414 
6415 static void
6416 iwm_update_mcast(struct ieee80211com *ic)
6417 {
6418 }
6419 
6420 static void
6421 iwm_set_channel(struct ieee80211com *ic)
6422 {
6423 }
6424 
6425 static void
6426 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6427 {
6428 }
6429 
6430 static void
6431 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6432 {
6433 }
6434 
6435 void
6436 iwm_init_task(void *arg1)
6437 {
6438 	struct iwm_softc *sc = arg1;
6439 
6440 	IWM_LOCK(sc);
6441 	while (sc->sc_flags & IWM_FLAG_BUSY)
6442 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6443 	sc->sc_flags |= IWM_FLAG_BUSY;
6444 	iwm_stop(sc);
6445 	if (sc->sc_ic.ic_nrunning > 0)
6446 		iwm_init(sc);
6447 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6448 	wakeup(&sc->sc_flags);
6449 	IWM_UNLOCK(sc);
6450 }
6451 
6452 static int
6453 iwm_resume(device_t dev)
6454 {
6455 	struct iwm_softc *sc = device_get_softc(dev);
6456 	int do_reinit = 0;
6457 
6458 	/*
6459 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6460 	 * PCI Tx retries from interfering with C3 CPU state.
6461 	 */
6462 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6463 
6464 	if (!sc->sc_attached)
6465 		return 0;
6466 
6467 	iwm_init_task(device_get_softc(dev));
6468 
6469 	IWM_LOCK(sc);
6470 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6471 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6472 		do_reinit = 1;
6473 	}
6474 	IWM_UNLOCK(sc);
6475 
6476 	if (do_reinit)
6477 		ieee80211_resume_all(&sc->sc_ic);
6478 
6479 	return 0;
6480 }
6481 
6482 static int
6483 iwm_suspend(device_t dev)
6484 {
6485 	int do_stop = 0;
6486 	struct iwm_softc *sc = device_get_softc(dev);
6487 
6488 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6489 
6490 	if (!sc->sc_attached)
6491 		return (0);
6492 
6493 	ieee80211_suspend_all(&sc->sc_ic);
6494 
6495 	if (do_stop) {
6496 		IWM_LOCK(sc);
6497 		iwm_stop(sc);
6498 		sc->sc_flags |= IWM_FLAG_SCANNING;
6499 		IWM_UNLOCK(sc);
6500 	}
6501 
6502 	return (0);
6503 }
6504 
6505 static int
6506 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6507 {
6508 	struct iwm_fw_info *fw = &sc->sc_fw;
6509 	device_t dev = sc->sc_dev;
6510 	int i;
6511 
6512 	if (!sc->sc_attached)
6513 		return 0;
6514 	sc->sc_attached = 0;
6515 	if (do_net80211) {
6516 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6517 	}
6518 	iwm_stop_device(sc);
6519 	taskqueue_drain_all(sc->sc_tq);
6520 	taskqueue_free(sc->sc_tq);
6521 	if (do_net80211) {
6522 		IWM_LOCK(sc);
6523 		iwm_xmit_queue_drain(sc);
6524 		IWM_UNLOCK(sc);
6525 		ieee80211_ifdetach(&sc->sc_ic);
6526 	}
6527 	callout_drain(&sc->sc_led_blink_to);
6528 	callout_drain(&sc->sc_watchdog_to);
6529 
6530 	iwm_phy_db_free(sc->sc_phy_db);
6531 	sc->sc_phy_db = NULL;
6532 
6533 	iwm_free_nvm_data(sc->nvm_data);
6534 
6535 	/* Free descriptor rings */
6536 	iwm_free_rx_ring(sc, &sc->rxq);
6537 	for (i = 0; i < nitems(sc->txq); i++)
6538 		iwm_free_tx_ring(sc, &sc->txq[i]);
6539 
6540 	/* Free firmware */
6541 	if (fw->fw_fp != NULL)
6542 		iwm_fw_info_free(fw);
6543 
6544 	/* Free scheduler */
6545 	iwm_dma_contig_free(&sc->sched_dma);
6546 	iwm_dma_contig_free(&sc->ict_dma);
6547 	iwm_dma_contig_free(&sc->kw_dma);
6548 	iwm_dma_contig_free(&sc->fw_dma);
6549 
6550 	iwm_free_fw_paging(sc);
6551 
6552 	/* Finished with the hardware - detach things */
6553 	iwm_pci_detach(dev);
6554 
6555 	if (sc->sc_notif_wait != NULL) {
6556 		iwm_notification_wait_free(sc->sc_notif_wait);
6557 		sc->sc_notif_wait = NULL;
6558 	}
6559 
6560 	IWM_LOCK_DESTROY(sc);
6561 
6562 	return (0);
6563 }
6564 
6565 static int
6566 iwm_detach(device_t dev)
6567 {
6568 	struct iwm_softc *sc = device_get_softc(dev);
6569 
6570 	return (iwm_detach_local(sc, 1));
6571 }
6572 
6573 static device_method_t iwm_pci_methods[] = {
6574         /* Device interface */
6575         DEVMETHOD(device_probe,         iwm_probe),
6576         DEVMETHOD(device_attach,        iwm_attach),
6577         DEVMETHOD(device_detach,        iwm_detach),
6578         DEVMETHOD(device_suspend,       iwm_suspend),
6579         DEVMETHOD(device_resume,        iwm_resume),
6580 
6581         DEVMETHOD_END
6582 };
6583 
6584 static driver_t iwm_pci_driver = {
6585         "iwm",
6586         iwm_pci_methods,
6587         sizeof (struct iwm_softc)
6588 };
6589 
6590 static devclass_t iwm_devclass;
6591 
6592 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6593 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6594     iwm_devices, nitems(iwm_devices));
6595 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6596 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6597 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6598