xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 57e129b18cfc348b6c91abe2f9ab7df05e72271e)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_mvm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_mvm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_sects *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_sects *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_sects *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_sects *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
324 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
325 					    struct iwm_rx_phy_info *);
326 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
327                                       struct iwm_rx_packet *);
328 static int	iwm_get_noise(struct iwm_softc *sc,
329 		    const struct iwm_mvm_statistics_rx_non_phy *);
330 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331 				    uint32_t, boolean_t);
332 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                          struct iwm_rx_packet *,
334 				         struct iwm_node *);
335 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337 #if 0
338 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                  uint16_t);
340 #endif
341 static const struct iwm_rate *
342 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343 			struct mbuf *, struct iwm_tx_cmd *);
344 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
345                        struct ieee80211_node *, int);
346 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347 			     const struct ieee80211_bpf_params *);
348 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
355 static int	iwm_media_change(struct ifnet *);
356 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
357 static void	iwm_endscan_cb(void *, int);
358 static int	iwm_send_bt_init_conf(struct iwm_softc *);
359 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
360 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
361 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
362 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
363 static int	iwm_init_hw(struct iwm_softc *);
364 static void	iwm_init(struct iwm_softc *);
365 static void	iwm_start(struct iwm_softc *);
366 static void	iwm_stop(struct iwm_softc *);
367 static void	iwm_watchdog(void *);
368 static void	iwm_parent(struct ieee80211com *);
369 #ifdef IWM_DEBUG
370 static const char *
371 		iwm_desc_lookup(uint32_t);
372 static void	iwm_nic_error(struct iwm_softc *);
373 static void	iwm_nic_umac_error(struct iwm_softc *);
374 #endif
375 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
376 static void	iwm_notif_intr(struct iwm_softc *);
377 static void	iwm_intr(void *);
378 static int	iwm_attach(device_t);
379 static int	iwm_is_valid_ether_addr(uint8_t *);
380 static void	iwm_preinit(void *);
381 static int	iwm_detach_local(struct iwm_softc *sc, int);
382 static void	iwm_init_task(void *);
383 static void	iwm_radiotap_attach(struct iwm_softc *);
384 static struct ieee80211vap *
385 		iwm_vap_create(struct ieee80211com *,
386 		               const char [IFNAMSIZ], int,
387 		               enum ieee80211_opmode, int,
388 		               const uint8_t [IEEE80211_ADDR_LEN],
389 		               const uint8_t [IEEE80211_ADDR_LEN]);
390 static void	iwm_vap_delete(struct ieee80211vap *);
391 static void	iwm_scan_start(struct ieee80211com *);
392 static void	iwm_scan_end(struct ieee80211com *);
393 static void	iwm_update_mcast(struct ieee80211com *);
394 static void	iwm_set_channel(struct ieee80211com *);
395 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int	iwm_detach(device_t);
398 
399 static int	iwm_lar_disable = 0;
400 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
401 
402 /*
403  * Firmware parser.
404  */
405 
406 static int
407 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
408 {
409 	const struct iwm_fw_cscheme_list *l = (const void *)data;
410 
411 	if (dlen < sizeof(*l) ||
412 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
413 		return EINVAL;
414 
415 	/* we don't actually store anything for now, always use s/w crypto */
416 
417 	return 0;
418 }
419 
420 static int
421 iwm_firmware_store_section(struct iwm_softc *sc,
422     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
423 {
424 	struct iwm_fw_sects *fws;
425 	struct iwm_fw_desc *fwone;
426 
427 	if (type >= IWM_UCODE_TYPE_MAX)
428 		return EINVAL;
429 	if (dlen < sizeof(uint32_t))
430 		return EINVAL;
431 
432 	fws = &sc->sc_fw.fw_sects[type];
433 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
434 		return EINVAL;
435 
436 	fwone = &fws->fw_sect[fws->fw_count];
437 
438 	/* first 32bit are device load offset */
439 	memcpy(&fwone->offset, data, sizeof(uint32_t));
440 
441 	/* rest is data */
442 	fwone->data = data + sizeof(uint32_t);
443 	fwone->len = dlen - sizeof(uint32_t);
444 
445 	fws->fw_count++;
446 
447 	return 0;
448 }
449 
450 #define IWM_DEFAULT_SCAN_CHANNELS 40
451 
452 /* iwlwifi: iwl-drv.c */
453 struct iwm_tlv_calib_data {
454 	uint32_t ucode_type;
455 	struct iwm_tlv_calib_ctrl calib;
456 } __packed;
457 
458 static int
459 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
460 {
461 	const struct iwm_tlv_calib_data *def_calib = data;
462 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
463 
464 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
465 		device_printf(sc->sc_dev,
466 		    "Wrong ucode_type %u for default "
467 		    "calibration.\n", ucode_type);
468 		return EINVAL;
469 	}
470 
471 	sc->sc_default_calib[ucode_type].flow_trigger =
472 	    def_calib->calib.flow_trigger;
473 	sc->sc_default_calib[ucode_type].event_trigger =
474 	    def_calib->calib.event_trigger;
475 
476 	return 0;
477 }
478 
479 static int
480 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
481 			struct iwm_ucode_capabilities *capa)
482 {
483 	const struct iwm_ucode_api *ucode_api = (const void *)data;
484 	uint32_t api_index = le32toh(ucode_api->api_index);
485 	uint32_t api_flags = le32toh(ucode_api->api_flags);
486 	int i;
487 
488 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
489 		device_printf(sc->sc_dev,
490 		    "api flags index %d larger than supported by driver\n",
491 		    api_index);
492 		/* don't return an error so we can load FW that has more bits */
493 		return 0;
494 	}
495 
496 	for (i = 0; i < 32; i++) {
497 		if (api_flags & (1U << i))
498 			setbit(capa->enabled_api, i + 32 * api_index);
499 	}
500 
501 	return 0;
502 }
503 
504 static int
505 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
506 			   struct iwm_ucode_capabilities *capa)
507 {
508 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
509 	uint32_t api_index = le32toh(ucode_capa->api_index);
510 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
511 	int i;
512 
513 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
514 		device_printf(sc->sc_dev,
515 		    "capa flags index %d larger than supported by driver\n",
516 		    api_index);
517 		/* don't return an error so we can load FW that has more bits */
518 		return 0;
519 	}
520 
521 	for (i = 0; i < 32; i++) {
522 		if (api_flags & (1U << i))
523 			setbit(capa->enabled_capa, i + 32 * api_index);
524 	}
525 
526 	return 0;
527 }
528 
529 static void
530 iwm_fw_info_free(struct iwm_fw_info *fw)
531 {
532 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
533 	fw->fw_fp = NULL;
534 	/* don't touch fw->fw_status */
535 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
536 }
537 
538 static int
539 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
540 {
541 	struct iwm_fw_info *fw = &sc->sc_fw;
542 	const struct iwm_tlv_ucode_header *uhdr;
543 	const struct iwm_ucode_tlv *tlv;
544 	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
545 	enum iwm_ucode_tlv_type tlv_type;
546 	const struct firmware *fwp;
547 	const uint8_t *data;
548 	uint32_t tlv_len;
549 	uint32_t usniffer_img;
550 	const uint8_t *tlv_data;
551 	uint32_t paging_mem_size;
552 	int num_of_cpus;
553 	int error = 0;
554 	size_t len;
555 
556 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
557 	    ucode_type != IWM_UCODE_INIT)
558 		return 0;
559 
560 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
561 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
562 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
563 
564 	if (fw->fw_fp != NULL)
565 		iwm_fw_info_free(fw);
566 
567 	/*
568 	 * Load firmware into driver memory.
569 	 * fw_fp will be set.
570 	 */
571 	IWM_UNLOCK(sc);
572 	fwp = firmware_get(sc->cfg->fw_name);
573 	IWM_LOCK(sc);
574 	if (fwp == NULL) {
575 		device_printf(sc->sc_dev,
576 		    "could not read firmware %s (error %d)\n",
577 		    sc->cfg->fw_name, error);
578 		goto out;
579 	}
580 	fw->fw_fp = fwp;
581 
582 	/* (Re-)Initialize default values. */
583 	capa->flags = 0;
584 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
585 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
586 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
587 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
588 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
589 
590 	/*
591 	 * Parse firmware contents
592 	 */
593 
594 	uhdr = (const void *)fw->fw_fp->data;
595 	if (*(const uint32_t *)fw->fw_fp->data != 0
596 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
597 		device_printf(sc->sc_dev, "invalid firmware %s\n",
598 		    sc->cfg->fw_name);
599 		error = EINVAL;
600 		goto out;
601 	}
602 
603 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
604 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
605 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
606 	    IWM_UCODE_API(le32toh(uhdr->ver)));
607 	data = uhdr->data;
608 	len = fw->fw_fp->datasize - sizeof(*uhdr);
609 
610 	while (len >= sizeof(*tlv)) {
611 		len -= sizeof(*tlv);
612 		tlv = (const void *)data;
613 
614 		tlv_len = le32toh(tlv->length);
615 		tlv_type = le32toh(tlv->type);
616 		tlv_data = tlv->data;
617 
618 		if (len < tlv_len) {
619 			device_printf(sc->sc_dev,
620 			    "firmware too short: %zu bytes\n",
621 			    len);
622 			error = EINVAL;
623 			goto parse_out;
624 		}
625 		len -= roundup2(tlv_len, 4);
626 		data += sizeof(tlv) + roundup2(tlv_len, 4);
627 
628 		switch ((int)tlv_type) {
629 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
630 			if (tlv_len != sizeof(uint32_t)) {
631 				device_printf(sc->sc_dev,
632 				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
633 				    __func__,
634 				    (int) tlv_len);
635 				error = EINVAL;
636 				goto parse_out;
637 			}
638 			capa->max_probe_length =
639 			    le32_to_cpup((const uint32_t *)tlv_data);
640 			/* limit it to something sensible */
641 			if (capa->max_probe_length >
642 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
643 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
644 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
645 				    "ridiculous\n", __func__);
646 				error = EINVAL;
647 				goto parse_out;
648 			}
649 			break;
650 		case IWM_UCODE_TLV_PAN:
651 			if (tlv_len) {
652 				device_printf(sc->sc_dev,
653 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
654 				    __func__,
655 				    (int) tlv_len);
656 				error = EINVAL;
657 				goto parse_out;
658 			}
659 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
660 			break;
661 		case IWM_UCODE_TLV_FLAGS:
662 			if (tlv_len < sizeof(uint32_t)) {
663 				device_printf(sc->sc_dev,
664 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
665 				    __func__,
666 				    (int) tlv_len);
667 				error = EINVAL;
668 				goto parse_out;
669 			}
670 			if (tlv_len % sizeof(uint32_t)) {
671 				device_printf(sc->sc_dev,
672 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
673 				    __func__,
674 				    (int) tlv_len);
675 				error = EINVAL;
676 				goto parse_out;
677 			}
678 			/*
679 			 * Apparently there can be many flags, but Linux driver
680 			 * parses only the first one, and so do we.
681 			 *
682 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
683 			 * Intentional or a bug?  Observations from
684 			 * current firmware file:
685 			 *  1) TLV_PAN is parsed first
686 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
687 			 * ==> this resets TLV_PAN to itself... hnnnk
688 			 */
689 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
690 			break;
691 		case IWM_UCODE_TLV_CSCHEME:
692 			if ((error = iwm_store_cscheme(sc,
693 			    tlv_data, tlv_len)) != 0) {
694 				device_printf(sc->sc_dev,
695 				    "%s: iwm_store_cscheme(): returned %d\n",
696 				    __func__,
697 				    error);
698 				goto parse_out;
699 			}
700 			break;
701 		case IWM_UCODE_TLV_NUM_OF_CPU:
702 			if (tlv_len != sizeof(uint32_t)) {
703 				device_printf(sc->sc_dev,
704 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
705 				    __func__,
706 				    (int) tlv_len);
707 				error = EINVAL;
708 				goto parse_out;
709 			}
710 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
711 			if (num_of_cpus == 2) {
712 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
713 					TRUE;
714 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
715 					TRUE;
716 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
717 					TRUE;
718 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
719 				device_printf(sc->sc_dev,
720 				    "%s: Driver supports only 1 or 2 CPUs\n",
721 				    __func__);
722 				error = EINVAL;
723 				goto parse_out;
724 			}
725 			break;
726 		case IWM_UCODE_TLV_SEC_RT:
727 			if ((error = iwm_firmware_store_section(sc,
728 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
729 				device_printf(sc->sc_dev,
730 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
731 				    __func__,
732 				    error);
733 				goto parse_out;
734 			}
735 			break;
736 		case IWM_UCODE_TLV_SEC_INIT:
737 			if ((error = iwm_firmware_store_section(sc,
738 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
739 				device_printf(sc->sc_dev,
740 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
741 				    __func__,
742 				    error);
743 				goto parse_out;
744 			}
745 			break;
746 		case IWM_UCODE_TLV_SEC_WOWLAN:
747 			if ((error = iwm_firmware_store_section(sc,
748 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
749 				device_printf(sc->sc_dev,
750 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
751 				    __func__,
752 				    error);
753 				goto parse_out;
754 			}
755 			break;
756 		case IWM_UCODE_TLV_DEF_CALIB:
757 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
758 				device_printf(sc->sc_dev,
759 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
760 				    __func__,
761 				    (int) tlv_len,
762 				    (int) sizeof(struct iwm_tlv_calib_data));
763 				error = EINVAL;
764 				goto parse_out;
765 			}
766 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
767 				device_printf(sc->sc_dev,
768 				    "%s: iwm_set_default_calib() failed: %d\n",
769 				    __func__,
770 				    error);
771 				goto parse_out;
772 			}
773 			break;
774 		case IWM_UCODE_TLV_PHY_SKU:
775 			if (tlv_len != sizeof(uint32_t)) {
776 				error = EINVAL;
777 				device_printf(sc->sc_dev,
778 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
779 				    __func__,
780 				    (int) tlv_len);
781 				goto parse_out;
782 			}
783 			sc->sc_fw.phy_config =
784 			    le32_to_cpup((const uint32_t *)tlv_data);
785 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
786 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
787 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
788 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
789 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
790 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
791 			break;
792 
793 		case IWM_UCODE_TLV_API_CHANGES_SET: {
794 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
795 				error = EINVAL;
796 				goto parse_out;
797 			}
798 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
799 				error = EINVAL;
800 				goto parse_out;
801 			}
802 			break;
803 		}
804 
805 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
807 				error = EINVAL;
808 				goto parse_out;
809 			}
810 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
811 				error = EINVAL;
812 				goto parse_out;
813 			}
814 			break;
815 		}
816 
817 		case 48: /* undocumented TLV */
818 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820 			/* ignore, not used by current driver */
821 			break;
822 
823 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824 			if ((error = iwm_firmware_store_section(sc,
825 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826 			    tlv_len)) != 0)
827 				goto parse_out;
828 			break;
829 
830 		case IWM_UCODE_TLV_PAGING:
831 			if (tlv_len != sizeof(uint32_t)) {
832 				error = EINVAL;
833 				goto parse_out;
834 			}
835 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
836 
837 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838 			    "%s: Paging: paging enabled (size = %u bytes)\n",
839 			    __func__, paging_mem_size);
840 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841 				device_printf(sc->sc_dev,
842 					"%s: Paging: driver supports up to %u bytes for paging image\n",
843 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
844 				error = EINVAL;
845 				goto out;
846 			}
847 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848 				device_printf(sc->sc_dev,
849 				    "%s: Paging: image isn't multiple %u\n",
850 				    __func__, IWM_FW_PAGING_SIZE);
851 				error = EINVAL;
852 				goto out;
853 			}
854 
855 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
856 			    paging_mem_size;
857 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
859 			    paging_mem_size;
860 			break;
861 
862 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863 			if (tlv_len != sizeof(uint32_t)) {
864 				error = EINVAL;
865 				goto parse_out;
866 			}
867 			capa->n_scan_channels =
868 			    le32_to_cpup((const uint32_t *)tlv_data);
869 			break;
870 
871 		case IWM_UCODE_TLV_FW_VERSION:
872 			if (tlv_len != sizeof(uint32_t) * 3) {
873 				error = EINVAL;
874 				goto parse_out;
875 			}
876 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877 			    "%d.%d.%d",
878 			    le32toh(((const uint32_t *)tlv_data)[0]),
879 			    le32toh(((const uint32_t *)tlv_data)[1]),
880 			    le32toh(((const uint32_t *)tlv_data)[2]));
881 			break;
882 
883 		case IWM_UCODE_TLV_FW_MEM_SEG:
884 			break;
885 
886 		default:
887 			device_printf(sc->sc_dev,
888 			    "%s: unknown firmware section %d, abort\n",
889 			    __func__, tlv_type);
890 			error = EINVAL;
891 			goto parse_out;
892 		}
893 	}
894 
895 	KASSERT(error == 0, ("unhandled error"));
896 
897  parse_out:
898 	if (error) {
899 		device_printf(sc->sc_dev, "firmware parse error %d, "
900 		    "section type %d\n", error, tlv_type);
901 	}
902 
903  out:
904 	if (error) {
905 		fw->fw_status = IWM_FW_STATUS_NONE;
906 		if (fw->fw_fp != NULL)
907 			iwm_fw_info_free(fw);
908 	} else
909 		fw->fw_status = IWM_FW_STATUS_DONE;
910 	wakeup(&sc->sc_fw);
911 
912 	return error;
913 }
914 
915 /*
916  * DMA resource routines
917  */
918 
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923 	/* Must be aligned on a 16-byte boundary. */
924 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927 
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932 	/* TX scheduler rings must be aligned on a 1KB boundary. */
933 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936 
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943 
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951 
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955 	bus_size_t size;
956 	int i, error;
957 
958 	ring->cur = 0;
959 
960 	/* Allocate RX descriptors (256-byte aligned). */
961 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev,
965 		    "could not allocate RX ring DMA memory\n");
966 		goto fail;
967 	}
968 	ring->desc = ring->desc_dma.vaddr;
969 
970 	/* Allocate RX status area (16-byte aligned). */
971 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972 	    sizeof(*ring->stat), 16);
973 	if (error != 0) {
974 		device_printf(sc->sc_dev,
975 		    "could not allocate RX status DMA memory\n");
976 		goto fail;
977 	}
978 	ring->stat = ring->stat_dma.vaddr;
979 
980         /* Create RX buffer DMA tag. */
981         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
982             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
983             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "%s: could not create RX buf DMA tag, error %d\n",
987                     __func__, error);
988                 goto fail;
989         }
990 
991 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
992 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
993 	if (error != 0) {
994 		device_printf(sc->sc_dev,
995 		    "%s: could not create RX buf DMA map, error %d\n",
996 		    __func__, error);
997 		goto fail;
998 	}
999 	/*
1000 	 * Allocate and map RX buffers.
1001 	 */
1002 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1003 		struct iwm_rx_data *data = &ring->data[i];
1004 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1005 		if (error != 0) {
1006 			device_printf(sc->sc_dev,
1007 			    "%s: could not create RX buf DMA map, error %d\n",
1008 			    __func__, error);
1009 			goto fail;
1010 		}
1011 		data->m = NULL;
1012 
1013 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1014 			goto fail;
1015 		}
1016 	}
1017 	return 0;
1018 
1019 fail:	iwm_free_rx_ring(sc, ring);
1020 	return error;
1021 }
1022 
1023 static void
1024 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1025 {
1026 	/* Reset the ring state */
1027 	ring->cur = 0;
1028 
1029 	/*
1030 	 * The hw rx ring index in shared memory must also be cleared,
1031 	 * otherwise the discrepancy can cause reprocessing chaos.
1032 	 */
1033 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1034 }
1035 
1036 static void
1037 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1038 {
1039 	int i;
1040 
1041 	iwm_dma_contig_free(&ring->desc_dma);
1042 	iwm_dma_contig_free(&ring->stat_dma);
1043 
1044 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1045 		struct iwm_rx_data *data = &ring->data[i];
1046 
1047 		if (data->m != NULL) {
1048 			bus_dmamap_sync(ring->data_dmat, data->map,
1049 			    BUS_DMASYNC_POSTREAD);
1050 			bus_dmamap_unload(ring->data_dmat, data->map);
1051 			m_freem(data->m);
1052 			data->m = NULL;
1053 		}
1054 		if (data->map != NULL) {
1055 			bus_dmamap_destroy(ring->data_dmat, data->map);
1056 			data->map = NULL;
1057 		}
1058 	}
1059 	if (ring->spare_map != NULL) {
1060 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1061 		ring->spare_map = NULL;
1062 	}
1063 	if (ring->data_dmat != NULL) {
1064 		bus_dma_tag_destroy(ring->data_dmat);
1065 		ring->data_dmat = NULL;
1066 	}
1067 }
1068 
1069 static int
1070 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1071 {
1072 	bus_addr_t paddr;
1073 	bus_size_t size;
1074 	size_t maxsize;
1075 	int nsegments;
1076 	int i, error;
1077 
1078 	ring->qid = qid;
1079 	ring->queued = 0;
1080 	ring->cur = 0;
1081 
1082 	/* Allocate TX descriptors (256-byte aligned). */
1083 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1084 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1085 	if (error != 0) {
1086 		device_printf(sc->sc_dev,
1087 		    "could not allocate TX ring DMA memory\n");
1088 		goto fail;
1089 	}
1090 	ring->desc = ring->desc_dma.vaddr;
1091 
1092 	/*
1093 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1094 	 * to allocate commands space for other rings.
1095 	 */
1096 	if (qid > IWM_MVM_CMD_QUEUE)
1097 		return 0;
1098 
1099 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1100 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1101 	if (error != 0) {
1102 		device_printf(sc->sc_dev,
1103 		    "could not allocate TX cmd DMA memory\n");
1104 		goto fail;
1105 	}
1106 	ring->cmd = ring->cmd_dma.vaddr;
1107 
1108 	/* FW commands may require more mapped space than packets. */
1109 	if (qid == IWM_MVM_CMD_QUEUE) {
1110 		maxsize = IWM_RBUF_SIZE;
1111 		nsegments = 1;
1112 	} else {
1113 		maxsize = MCLBYTES;
1114 		nsegments = IWM_MAX_SCATTER - 2;
1115 	}
1116 
1117 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1118 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1119             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1120 	if (error != 0) {
1121 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1122 		goto fail;
1123 	}
1124 
1125 	paddr = ring->cmd_dma.paddr;
1126 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127 		struct iwm_tx_data *data = &ring->data[i];
1128 
1129 		data->cmd_paddr = paddr;
1130 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1131 		    + offsetof(struct iwm_tx_cmd, scratch);
1132 		paddr += sizeof(struct iwm_device_cmd);
1133 
1134 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1135 		if (error != 0) {
1136 			device_printf(sc->sc_dev,
1137 			    "could not create TX buf DMA map\n");
1138 			goto fail;
1139 		}
1140 	}
1141 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1142 	    ("invalid physical address"));
1143 	return 0;
1144 
1145 fail:	iwm_free_tx_ring(sc, ring);
1146 	return error;
1147 }
1148 
1149 static void
1150 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1151 {
1152 	int i;
1153 
1154 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1155 		struct iwm_tx_data *data = &ring->data[i];
1156 
1157 		if (data->m != NULL) {
1158 			bus_dmamap_sync(ring->data_dmat, data->map,
1159 			    BUS_DMASYNC_POSTWRITE);
1160 			bus_dmamap_unload(ring->data_dmat, data->map);
1161 			m_freem(data->m);
1162 			data->m = NULL;
1163 		}
1164 	}
1165 	/* Clear TX descriptors. */
1166 	memset(ring->desc, 0, ring->desc_dma.size);
1167 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1168 	    BUS_DMASYNC_PREWRITE);
1169 	sc->qfullmsk &= ~(1 << ring->qid);
1170 	ring->queued = 0;
1171 	ring->cur = 0;
1172 
1173 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174 		iwm_pcie_clear_cmd_in_flight(sc);
1175 }
1176 
1177 static void
1178 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1179 {
1180 	int i;
1181 
1182 	iwm_dma_contig_free(&ring->desc_dma);
1183 	iwm_dma_contig_free(&ring->cmd_dma);
1184 
1185 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186 		struct iwm_tx_data *data = &ring->data[i];
1187 
1188 		if (data->m != NULL) {
1189 			bus_dmamap_sync(ring->data_dmat, data->map,
1190 			    BUS_DMASYNC_POSTWRITE);
1191 			bus_dmamap_unload(ring->data_dmat, data->map);
1192 			m_freem(data->m);
1193 			data->m = NULL;
1194 		}
1195 		if (data->map != NULL) {
1196 			bus_dmamap_destroy(ring->data_dmat, data->map);
1197 			data->map = NULL;
1198 		}
1199 	}
1200 	if (ring->data_dmat != NULL) {
1201 		bus_dma_tag_destroy(ring->data_dmat);
1202 		ring->data_dmat = NULL;
1203 	}
1204 }
1205 
1206 /*
1207  * High-level hardware frobbing routines
1208  */
1209 
1210 static void
1211 iwm_enable_interrupts(struct iwm_softc *sc)
1212 {
1213 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1214 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1215 }
1216 
1217 static void
1218 iwm_restore_interrupts(struct iwm_softc *sc)
1219 {
1220 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221 }
1222 
1223 static void
1224 iwm_disable_interrupts(struct iwm_softc *sc)
1225 {
1226 	/* disable interrupts */
1227 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1228 
1229 	/* acknowledge all interrupts */
1230 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1232 }
1233 
1234 static void
1235 iwm_ict_reset(struct iwm_softc *sc)
1236 {
1237 	iwm_disable_interrupts(sc);
1238 
1239 	/* Reset ICT table. */
1240 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1241 	sc->ict_cur = 0;
1242 
1243 	/* Set physical address of ICT table (4KB aligned). */
1244 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1245 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1247 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1248 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1249 
1250 	/* Switch to ICT interrupt mode in driver. */
1251 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1252 
1253 	/* Re-enable interrupts. */
1254 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255 	iwm_enable_interrupts(sc);
1256 }
1257 
1258 /* iwlwifi pcie/trans.c */
1259 
1260 /*
1261  * Since this .. hard-resets things, it's time to actually
1262  * mark the first vap (if any) as having no mac context.
1263  * It's annoying, but since the driver is potentially being
1264  * stop/start'ed whilst active (thanks openbsd port!) we
1265  * have to correctly track this.
1266  */
1267 static void
1268 iwm_stop_device(struct iwm_softc *sc)
1269 {
1270 	struct ieee80211com *ic = &sc->sc_ic;
1271 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1272 	int chnl, qid;
1273 	uint32_t mask = 0;
1274 
1275 	/* tell the device to stop sending interrupts */
1276 	iwm_disable_interrupts(sc);
1277 
1278 	/*
1279 	 * FreeBSD-local: mark the first vap as not-uploaded,
1280 	 * so the next transition through auth/assoc
1281 	 * will correctly populate the MAC context.
1282 	 */
1283 	if (vap) {
1284 		struct iwm_vap *iv = IWM_VAP(vap);
1285 		iv->phy_ctxt = NULL;
1286 		iv->is_uploaded = 0;
1287 	}
1288 
1289 	/* device going down, Stop using ICT table */
1290 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1291 
1292 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1293 
1294 	if (iwm_nic_lock(sc)) {
1295 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1296 
1297 		/* Stop each Tx DMA channel */
1298 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1299 			IWM_WRITE(sc,
1300 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1301 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1302 		}
1303 
1304 		/* Wait for DMA channels to be idle */
1305 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1306 		    5000)) {
1307 			device_printf(sc->sc_dev,
1308 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1309 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1310 		}
1311 		iwm_nic_unlock(sc);
1312 	}
1313 	iwm_pcie_rx_stop(sc);
1314 
1315 	/* Stop RX ring. */
1316 	iwm_reset_rx_ring(sc, &sc->rxq);
1317 
1318 	/* Reset all TX rings. */
1319 	for (qid = 0; qid < nitems(sc->txq); qid++)
1320 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1321 
1322 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1323 		/* Power-down device's busmaster DMA clocks */
1324 		if (iwm_nic_lock(sc)) {
1325 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1326 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1327 			iwm_nic_unlock(sc);
1328 		}
1329 		DELAY(5);
1330 	}
1331 
1332 	/* Make sure (redundant) we've released our request to stay awake */
1333 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1334 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1335 
1336 	/* Stop the device, and put it in low power state */
1337 	iwm_apm_stop(sc);
1338 
1339 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1340 	 * Clean again the interrupt here
1341 	 */
1342 	iwm_disable_interrupts(sc);
1343 	/* stop and reset the on-board processor */
1344 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1345 
1346 	/*
1347 	 * Even if we stop the HW, we still want the RF kill
1348 	 * interrupt
1349 	 */
1350 	iwm_enable_rfkill_int(sc);
1351 	iwm_check_rfkill(sc);
1352 }
1353 
1354 /* iwlwifi: mvm/ops.c */
1355 static void
1356 iwm_mvm_nic_config(struct iwm_softc *sc)
1357 {
1358 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1359 	uint32_t reg_val = 0;
1360 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1361 
1362 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1363 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1364 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1365 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1366 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1367 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1368 
1369 	/* SKU control */
1370 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1371 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1372 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1373 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1374 
1375 	/* radio configuration */
1376 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1377 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1378 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1379 
1380 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1381 
1382 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1383 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1384 	    radio_cfg_step, radio_cfg_dash);
1385 
1386 	/*
1387 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1388 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1389 	 * to lose ownership and not being able to obtain it back.
1390 	 */
1391 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1392 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1393 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1394 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1395 	}
1396 }
1397 
1398 static int
1399 iwm_nic_rx_init(struct iwm_softc *sc)
1400 {
1401 	/*
1402 	 * Initialize RX ring.  This is from the iwn driver.
1403 	 */
1404 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1405 
1406 	/* Stop Rx DMA */
1407 	iwm_pcie_rx_stop(sc);
1408 
1409 	if (!iwm_nic_lock(sc))
1410 		return EBUSY;
1411 
1412 	/* reset and flush pointers */
1413 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1414 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1415 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1416 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1417 
1418 	/* Set physical address of RX ring (256-byte aligned). */
1419 	IWM_WRITE(sc,
1420 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1421 
1422 	/* Set physical address of RX status (16-byte aligned). */
1423 	IWM_WRITE(sc,
1424 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1425 
1426 	/* Enable Rx DMA
1427 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1428 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1429 	 *      the credit mechanism in 5000 HW RX FIFO
1430 	 * Direct rx interrupts to hosts
1431 	 * Rx buffer size 4 or 8k or 12k
1432 	 * RB timeout 0x10
1433 	 * 256 RBDs
1434 	 */
1435 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1436 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1437 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1438 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1439 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1440 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1441 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1442 
1443 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1444 
1445 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1446 	if (sc->cfg->host_interrupt_operation_mode)
1447 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1448 
1449 	/*
1450 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1451 	 *
1452 	 * This value should initially be 0 (before preparing any
1453 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1454 	 */
1455 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1456 
1457 	iwm_nic_unlock(sc);
1458 
1459 	return 0;
1460 }
1461 
1462 static int
1463 iwm_nic_tx_init(struct iwm_softc *sc)
1464 {
1465 	int qid;
1466 
1467 	if (!iwm_nic_lock(sc))
1468 		return EBUSY;
1469 
1470 	/* Deactivate TX scheduler. */
1471 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1472 
1473 	/* Set physical address of "keep warm" page (16-byte aligned). */
1474 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1475 
1476 	/* Initialize TX rings. */
1477 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1478 		struct iwm_tx_ring *txq = &sc->txq[qid];
1479 
1480 		/* Set physical address of TX ring (256-byte aligned). */
1481 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1482 		    txq->desc_dma.paddr >> 8);
1483 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1484 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1485 		    __func__,
1486 		    qid, txq->desc,
1487 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1488 	}
1489 
1490 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1491 
1492 	iwm_nic_unlock(sc);
1493 
1494 	return 0;
1495 }
1496 
1497 static int
1498 iwm_nic_init(struct iwm_softc *sc)
1499 {
1500 	int error;
1501 
1502 	iwm_apm_init(sc);
1503 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1504 		iwm_set_pwr(sc);
1505 
1506 	iwm_mvm_nic_config(sc);
1507 
1508 	if ((error = iwm_nic_rx_init(sc)) != 0)
1509 		return error;
1510 
1511 	/*
1512 	 * Ditto for TX, from iwn
1513 	 */
1514 	if ((error = iwm_nic_tx_init(sc)) != 0)
1515 		return error;
1516 
1517 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1518 	    "%s: shadow registers enabled\n", __func__);
1519 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1520 
1521 	return 0;
1522 }
1523 
1524 int
1525 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1526 {
1527 	if (!iwm_nic_lock(sc)) {
1528 		device_printf(sc->sc_dev,
1529 		    "%s: cannot enable txq %d\n",
1530 		    __func__,
1531 		    qid);
1532 		return EBUSY;
1533 	}
1534 
1535 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1536 
1537 	if (qid == IWM_MVM_CMD_QUEUE) {
1538 		/* unactivate before configuration */
1539 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1540 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1541 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1542 
1543 		iwm_nic_unlock(sc);
1544 
1545 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1546 
1547 		if (!iwm_nic_lock(sc)) {
1548 			device_printf(sc->sc_dev,
1549 			    "%s: cannot enable txq %d\n", __func__, qid);
1550 			return EBUSY;
1551 		}
1552 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1553 		iwm_nic_unlock(sc);
1554 
1555 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1556 		/* Set scheduler window size and frame limit. */
1557 		iwm_write_mem32(sc,
1558 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1559 		    sizeof(uint32_t),
1560 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1561 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1562 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1563 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1564 
1565 		if (!iwm_nic_lock(sc)) {
1566 			device_printf(sc->sc_dev,
1567 			    "%s: cannot enable txq %d\n", __func__, qid);
1568 			return EBUSY;
1569 		}
1570 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1572 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1573 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1574 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1575 	} else {
1576 		struct iwm_scd_txq_cfg_cmd cmd;
1577 		int error;
1578 
1579 		iwm_nic_unlock(sc);
1580 
1581 		memset(&cmd, 0, sizeof(cmd));
1582 		cmd.scd_queue = qid;
1583 		cmd.enable = 1;
1584 		cmd.sta_id = sta_id;
1585 		cmd.tx_fifo = fifo;
1586 		cmd.aggregate = 0;
1587 		cmd.window = IWM_FRAME_LIMIT;
1588 
1589 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1590 		    sizeof(cmd), &cmd);
1591 		if (error) {
1592 			device_printf(sc->sc_dev,
1593 			    "cannot enable txq %d\n", qid);
1594 			return error;
1595 		}
1596 
1597 		if (!iwm_nic_lock(sc))
1598 			return EBUSY;
1599 	}
1600 
1601 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1602 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1603 
1604 	iwm_nic_unlock(sc);
1605 
1606 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1607 	    __func__, qid, fifo);
1608 
1609 	return 0;
1610 }
1611 
1612 static int
1613 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1614 {
1615 	int error, chnl;
1616 
1617 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1618 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1619 
1620 	if (!iwm_nic_lock(sc))
1621 		return EBUSY;
1622 
1623 	iwm_ict_reset(sc);
1624 
1625 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1626 	if (scd_base_addr != 0 &&
1627 	    scd_base_addr != sc->scd_base_addr) {
1628 		device_printf(sc->sc_dev,
1629 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1630 		    __func__, sc->scd_base_addr, scd_base_addr);
1631 	}
1632 
1633 	iwm_nic_unlock(sc);
1634 
1635 	/* reset context data, TX status and translation data */
1636 	error = iwm_write_mem(sc,
1637 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1638 	    NULL, clear_dwords);
1639 	if (error)
1640 		return EBUSY;
1641 
1642 	if (!iwm_nic_lock(sc))
1643 		return EBUSY;
1644 
1645 	/* Set physical address of TX scheduler rings (1KB aligned). */
1646 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1647 
1648 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1649 
1650 	iwm_nic_unlock(sc);
1651 
1652 	/* enable command channel */
1653 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1654 	if (error)
1655 		return error;
1656 
1657 	if (!iwm_nic_lock(sc))
1658 		return EBUSY;
1659 
1660 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1661 
1662 	/* Enable DMA channels. */
1663 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1664 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1665 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1666 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1667 	}
1668 
1669 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1670 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1671 
1672 	iwm_nic_unlock(sc);
1673 
1674 	/* Enable L1-Active */
1675 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1676 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1677 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1678 	}
1679 
1680 	return error;
1681 }
1682 
1683 /*
1684  * NVM read access and content parsing.  We do not support
1685  * external NVM or writing NVM.
1686  * iwlwifi/mvm/nvm.c
1687  */
1688 
1689 /* Default NVM size to read */
1690 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1691 
1692 #define IWM_NVM_WRITE_OPCODE 1
1693 #define IWM_NVM_READ_OPCODE 0
1694 
1695 /* load nvm chunk response */
1696 enum {
1697 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1698 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1699 };
1700 
1701 static int
1702 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1703 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1704 {
1705 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1706 		.offset = htole16(offset),
1707 		.length = htole16(length),
1708 		.type = htole16(section),
1709 		.op_code = IWM_NVM_READ_OPCODE,
1710 	};
1711 	struct iwm_nvm_access_resp *nvm_resp;
1712 	struct iwm_rx_packet *pkt;
1713 	struct iwm_host_cmd cmd = {
1714 		.id = IWM_NVM_ACCESS_CMD,
1715 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1716 		.data = { &nvm_access_cmd, },
1717 	};
1718 	int ret, bytes_read, offset_read;
1719 	uint8_t *resp_data;
1720 
1721 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1722 
1723 	ret = iwm_send_cmd(sc, &cmd);
1724 	if (ret) {
1725 		device_printf(sc->sc_dev,
1726 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1727 		return ret;
1728 	}
1729 
1730 	pkt = cmd.resp_pkt;
1731 
1732 	/* Extract NVM response */
1733 	nvm_resp = (void *)pkt->data;
1734 	ret = le16toh(nvm_resp->status);
1735 	bytes_read = le16toh(nvm_resp->length);
1736 	offset_read = le16toh(nvm_resp->offset);
1737 	resp_data = nvm_resp->data;
1738 	if (ret) {
1739 		if ((offset != 0) &&
1740 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1741 			/*
1742 			 * meaning of NOT_VALID_ADDRESS:
1743 			 * driver try to read chunk from address that is
1744 			 * multiple of 2K and got an error since addr is empty.
1745 			 * meaning of (offset != 0): driver already
1746 			 * read valid data from another chunk so this case
1747 			 * is not an error.
1748 			 */
1749 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1750 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1751 				    offset);
1752 			*len = 0;
1753 			ret = 0;
1754 		} else {
1755 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756 				    "NVM access command failed with status %d\n", ret);
1757 			ret = EIO;
1758 		}
1759 		goto exit;
1760 	}
1761 
1762 	if (offset_read != offset) {
1763 		device_printf(sc->sc_dev,
1764 		    "NVM ACCESS response with invalid offset %d\n",
1765 		    offset_read);
1766 		ret = EINVAL;
1767 		goto exit;
1768 	}
1769 
1770 	if (bytes_read > length) {
1771 		device_printf(sc->sc_dev,
1772 		    "NVM ACCESS response with too much data "
1773 		    "(%d bytes requested, %d bytes received)\n",
1774 		    length, bytes_read);
1775 		ret = EINVAL;
1776 		goto exit;
1777 	}
1778 
1779 	/* Write data to NVM */
1780 	memcpy(data + offset, resp_data, bytes_read);
1781 	*len = bytes_read;
1782 
1783  exit:
1784 	iwm_free_resp(sc, &cmd);
1785 	return ret;
1786 }
1787 
1788 /*
1789  * Reads an NVM section completely.
1790  * NICs prior to 7000 family don't have a real NVM, but just read
1791  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1792  * by uCode, we need to manually check in this case that we don't
1793  * overflow and try to read more than the EEPROM size.
1794  * For 7000 family NICs, we supply the maximal size we can read, and
1795  * the uCode fills the response with as much data as we can,
1796  * without overflowing, so no check is needed.
1797  */
1798 static int
1799 iwm_nvm_read_section(struct iwm_softc *sc,
1800 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1801 {
1802 	uint16_t seglen, length, offset = 0;
1803 	int ret;
1804 
1805 	/* Set nvm section read length */
1806 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1807 
1808 	seglen = length;
1809 
1810 	/* Read the NVM until exhausted (reading less than requested) */
1811 	while (seglen == length) {
1812 		/* Check no memory assumptions fail and cause an overflow */
1813 		if ((size_read + offset + length) >
1814 		    sc->cfg->eeprom_size) {
1815 			device_printf(sc->sc_dev,
1816 			    "EEPROM size is too small for NVM\n");
1817 			return ENOBUFS;
1818 		}
1819 
1820 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1821 		if (ret) {
1822 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1823 				    "Cannot read NVM from section %d offset %d, length %d\n",
1824 				    section, offset, length);
1825 			return ret;
1826 		}
1827 		offset += seglen;
1828 	}
1829 
1830 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 		    "NVM section %d read completed\n", section);
1832 	*len = offset;
1833 	return 0;
1834 }
1835 
1836 /*
1837  * BEGIN IWM_NVM_PARSE
1838  */
1839 
1840 /* iwlwifi/iwl-nvm-parse.c */
1841 
1842 /* NVM offsets (in words) definitions */
1843 enum iwm_nvm_offsets {
1844 	/* NVM HW-Section offset (in words) definitions */
1845 	IWM_HW_ADDR = 0x15,
1846 
1847 /* NVM SW-Section offset (in words) definitions */
1848 	IWM_NVM_SW_SECTION = 0x1C0,
1849 	IWM_NVM_VERSION = 0,
1850 	IWM_RADIO_CFG = 1,
1851 	IWM_SKU = 2,
1852 	IWM_N_HW_ADDRS = 3,
1853 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1854 
1855 /* NVM calibration section offset (in words) definitions */
1856 	IWM_NVM_CALIB_SECTION = 0x2B8,
1857 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1858 };
1859 
1860 enum iwm_8000_nvm_offsets {
1861 	/* NVM HW-Section offset (in words) definitions */
1862 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1863 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1864 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1865 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1866 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1867 
1868 	/* NVM SW-Section offset (in words) definitions */
1869 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1870 	IWM_NVM_VERSION_8000 = 0,
1871 	IWM_RADIO_CFG_8000 = 0,
1872 	IWM_SKU_8000 = 2,
1873 	IWM_N_HW_ADDRS_8000 = 3,
1874 
1875 	/* NVM REGULATORY -Section offset (in words) definitions */
1876 	IWM_NVM_CHANNELS_8000 = 0,
1877 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1878 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1879 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1880 
1881 	/* NVM calibration section offset (in words) definitions */
1882 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1883 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1884 };
1885 
1886 /* SKU Capabilities (actual values from NVM definition) */
1887 enum nvm_sku_bits {
1888 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1889 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1890 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1891 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1892 };
1893 
1894 /* radio config bits (actual values from NVM definition) */
1895 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1896 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1897 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1898 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1899 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1900 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1901 
1902 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1903 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1904 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1905 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1906 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1907 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1908 
1909 #define DEFAULT_MAX_TX_POWER 16
1910 
1911 /**
1912  * enum iwm_nvm_channel_flags - channel flags in NVM
1913  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1914  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1915  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1916  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1917  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1918  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1919  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1920  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1921  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1922  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1923  */
1924 enum iwm_nvm_channel_flags {
1925 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1926 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1927 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1928 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1929 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1930 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1931 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1932 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1933 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1934 };
1935 
1936 /*
1937  * Translate EEPROM flags to net80211.
1938  */
1939 static uint32_t
1940 iwm_eeprom_channel_flags(uint16_t ch_flags)
1941 {
1942 	uint32_t nflags;
1943 
1944 	nflags = 0;
1945 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1946 		nflags |= IEEE80211_CHAN_PASSIVE;
1947 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1948 		nflags |= IEEE80211_CHAN_NOADHOC;
1949 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1950 		nflags |= IEEE80211_CHAN_DFS;
1951 		/* Just in case. */
1952 		nflags |= IEEE80211_CHAN_NOADHOC;
1953 	}
1954 
1955 	return (nflags);
1956 }
1957 
1958 static void
1959 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1960     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1961     const uint8_t bands[])
1962 {
1963 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1964 	uint32_t nflags;
1965 	uint16_t ch_flags;
1966 	uint8_t ieee;
1967 	int error;
1968 
1969 	for (; ch_idx < ch_num; ch_idx++) {
1970 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1971 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1972 			ieee = iwm_nvm_channels[ch_idx];
1973 		else
1974 			ieee = iwm_nvm_channels_8000[ch_idx];
1975 
1976 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1977 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1978 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1979 			    ieee, ch_flags,
1980 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1981 			    "5.2" : "2.4");
1982 			continue;
1983 		}
1984 
1985 		nflags = iwm_eeprom_channel_flags(ch_flags);
1986 		error = ieee80211_add_channel(chans, maxchans, nchans,
1987 		    ieee, 0, 0, nflags, bands);
1988 		if (error != 0)
1989 			break;
1990 
1991 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1992 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1993 		    ieee, ch_flags,
1994 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1995 		    "5.2" : "2.4");
1996 	}
1997 }
1998 
1999 static void
2000 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2001     struct ieee80211_channel chans[])
2002 {
2003 	struct iwm_softc *sc = ic->ic_softc;
2004 	struct iwm_nvm_data *data = sc->nvm_data;
2005 	uint8_t bands[IEEE80211_MODE_BYTES];
2006 	size_t ch_num;
2007 
2008 	memset(bands, 0, sizeof(bands));
2009 	/* 1-13: 11b/g channels. */
2010 	setbit(bands, IEEE80211_MODE_11B);
2011 	setbit(bands, IEEE80211_MODE_11G);
2012 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2013 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2014 
2015 	/* 14: 11b channel only. */
2016 	clrbit(bands, IEEE80211_MODE_11G);
2017 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2018 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2019 
2020 	if (data->sku_cap_band_52GHz_enable) {
2021 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2022 			ch_num = nitems(iwm_nvm_channels);
2023 		else
2024 			ch_num = nitems(iwm_nvm_channels_8000);
2025 		memset(bands, 0, sizeof(bands));
2026 		setbit(bands, IEEE80211_MODE_11A);
2027 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2028 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2029 	}
2030 }
2031 
2032 static void
2033 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2034 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2035 {
2036 	const uint8_t *hw_addr;
2037 
2038 	if (mac_override) {
2039 		static const uint8_t reserved_mac[] = {
2040 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2041 		};
2042 
2043 		hw_addr = (const uint8_t *)(mac_override +
2044 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2045 
2046 		/*
2047 		 * Store the MAC address from MAO section.
2048 		 * No byte swapping is required in MAO section
2049 		 */
2050 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2051 
2052 		/*
2053 		 * Force the use of the OTP MAC address in case of reserved MAC
2054 		 * address in the NVM, or if address is given but invalid.
2055 		 */
2056 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2057 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2058 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2059 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2060 			return;
2061 
2062 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2063 		    "%s: mac address from nvm override section invalid\n",
2064 		    __func__);
2065 	}
2066 
2067 	if (nvm_hw) {
2068 		/* read the mac address from WFMP registers */
2069 		uint32_t mac_addr0 =
2070 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2071 		uint32_t mac_addr1 =
2072 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2073 
2074 		hw_addr = (const uint8_t *)&mac_addr0;
2075 		data->hw_addr[0] = hw_addr[3];
2076 		data->hw_addr[1] = hw_addr[2];
2077 		data->hw_addr[2] = hw_addr[1];
2078 		data->hw_addr[3] = hw_addr[0];
2079 
2080 		hw_addr = (const uint8_t *)&mac_addr1;
2081 		data->hw_addr[4] = hw_addr[1];
2082 		data->hw_addr[5] = hw_addr[0];
2083 
2084 		return;
2085 	}
2086 
2087 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2088 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2089 }
2090 
2091 static int
2092 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2093 	    const uint16_t *phy_sku)
2094 {
2095 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2096 		return le16_to_cpup(nvm_sw + IWM_SKU);
2097 
2098 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2099 }
2100 
2101 static int
2102 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2103 {
2104 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2105 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2106 	else
2107 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2108 						IWM_NVM_VERSION_8000));
2109 }
2110 
2111 static int
2112 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2113 		  const uint16_t *phy_sku)
2114 {
2115         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2117 
2118         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2119 }
2120 
2121 static int
2122 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2123 {
2124 	int n_hw_addr;
2125 
2126 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2128 
2129 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2130 
2131         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2132 }
2133 
2134 static void
2135 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2136 		  uint32_t radio_cfg)
2137 {
2138 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2139 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2140 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2141 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2142 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2143 		return;
2144 	}
2145 
2146 	/* set the radio configuration for family 8000 */
2147 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2148 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2149 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2150 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2151 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2152 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2153 }
2154 
2155 static int
2156 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2157 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2158 {
2159 #ifdef notyet /* for FAMILY 9000 */
2160 	if (cfg->mac_addr_from_csr) {
2161 		iwm_set_hw_address_from_csr(sc, data);
2162         } else
2163 #endif
2164 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2165 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2166 
2167 		/* The byte order is little endian 16 bit, meaning 214365 */
2168 		data->hw_addr[0] = hw_addr[1];
2169 		data->hw_addr[1] = hw_addr[0];
2170 		data->hw_addr[2] = hw_addr[3];
2171 		data->hw_addr[3] = hw_addr[2];
2172 		data->hw_addr[4] = hw_addr[5];
2173 		data->hw_addr[5] = hw_addr[4];
2174 	} else {
2175 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2176 	}
2177 
2178 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2179 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2180 		return EINVAL;
2181 	}
2182 
2183 	return 0;
2184 }
2185 
2186 static struct iwm_nvm_data *
2187 iwm_parse_nvm_data(struct iwm_softc *sc,
2188 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2189 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2190 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2191 {
2192 	struct iwm_nvm_data *data;
2193 	uint32_t sku, radio_cfg;
2194 	uint16_t lar_config;
2195 
2196 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2197 		data = malloc(sizeof(*data) +
2198 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2199 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2200 	} else {
2201 		data = malloc(sizeof(*data) +
2202 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2203 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2204 	}
2205 	if (!data)
2206 		return NULL;
2207 
2208 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2209 
2210 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2211 	iwm_set_radio_cfg(sc, data, radio_cfg);
2212 
2213 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2214 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2215 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2216 	data->sku_cap_11n_enable = 0;
2217 
2218 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2219 
2220 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2221 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2222 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2223 				       IWM_NVM_LAR_OFFSET_8000;
2224 
2225 		lar_config = le16_to_cpup(regulatory + lar_offset);
2226 		data->lar_enabled = !!(lar_config &
2227 				       IWM_NVM_LAR_ENABLED_8000);
2228 	}
2229 
2230 	/* If no valid mac address was found - bail out */
2231 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2232 		free(data, M_DEVBUF);
2233 		return NULL;
2234 	}
2235 
2236 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2237 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2238 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2239 	} else {
2240 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2241 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2242 	}
2243 
2244 	return data;
2245 }
2246 
2247 static void
2248 iwm_free_nvm_data(struct iwm_nvm_data *data)
2249 {
2250 	if (data != NULL)
2251 		free(data, M_DEVBUF);
2252 }
2253 
2254 static struct iwm_nvm_data *
2255 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2256 {
2257 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2258 
2259 	/* Checking for required sections */
2260 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2261 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2263 			device_printf(sc->sc_dev,
2264 			    "Can't parse empty OTP/NVM sections\n");
2265 			return NULL;
2266 		}
2267 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2268 		/* SW and REGULATORY sections are mandatory */
2269 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2270 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2271 			device_printf(sc->sc_dev,
2272 			    "Can't parse empty OTP/NVM sections\n");
2273 			return NULL;
2274 		}
2275 		/* MAC_OVERRIDE or at least HW section must exist */
2276 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2277 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2278 			device_printf(sc->sc_dev,
2279 			    "Can't parse mac_address, empty sections\n");
2280 			return NULL;
2281 		}
2282 
2283 		/* PHY_SKU section is mandatory in B0 */
2284 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2285 			device_printf(sc->sc_dev,
2286 			    "Can't parse phy_sku in B0, empty sections\n");
2287 			return NULL;
2288 		}
2289 	} else {
2290 		panic("unknown device family %d\n", sc->cfg->device_family);
2291 	}
2292 
2293 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2294 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2295 	calib = (const uint16_t *)
2296 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2297 	regulatory = (const uint16_t *)
2298 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2299 	mac_override = (const uint16_t *)
2300 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2301 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2302 
2303 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2304 	    phy_sku, regulatory);
2305 }
2306 
2307 static int
2308 iwm_nvm_init(struct iwm_softc *sc)
2309 {
2310 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2311 	int i, ret, section;
2312 	uint32_t size_read = 0;
2313 	uint8_t *nvm_buffer, *temp;
2314 	uint16_t len;
2315 
2316 	memset(nvm_sections, 0, sizeof(nvm_sections));
2317 
2318 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2319 		return EINVAL;
2320 
2321 	/* load NVM values from nic */
2322 	/* Read From FW NVM */
2323 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2324 
2325 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2326 	if (!nvm_buffer)
2327 		return ENOMEM;
2328 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2329 		/* we override the constness for initial read */
2330 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2331 					   &len, size_read);
2332 		if (ret)
2333 			continue;
2334 		size_read += len;
2335 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2336 		if (!temp) {
2337 			ret = ENOMEM;
2338 			break;
2339 		}
2340 		memcpy(temp, nvm_buffer, len);
2341 
2342 		nvm_sections[section].data = temp;
2343 		nvm_sections[section].length = len;
2344 	}
2345 	if (!size_read)
2346 		device_printf(sc->sc_dev, "OTP is blank\n");
2347 	free(nvm_buffer, M_DEVBUF);
2348 
2349 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2350 	if (!sc->nvm_data)
2351 		return EINVAL;
2352 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2353 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2354 
2355 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2356 		if (nvm_sections[i].data != NULL)
2357 			free(nvm_sections[i].data, M_DEVBUF);
2358 	}
2359 
2360 	return 0;
2361 }
2362 
2363 static int
2364 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2365 	const struct iwm_fw_desc *section)
2366 {
2367 	struct iwm_dma_info *dma = &sc->fw_dma;
2368 	uint8_t *v_addr;
2369 	bus_addr_t p_addr;
2370 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2371 	int ret = 0;
2372 
2373 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2374 		    "%s: [%d] uCode section being loaded...\n",
2375 		    __func__, section_num);
2376 
2377 	v_addr = dma->vaddr;
2378 	p_addr = dma->paddr;
2379 
2380 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2381 		uint32_t copy_size, dst_addr;
2382 		int extended_addr = FALSE;
2383 
2384 		copy_size = MIN(chunk_sz, section->len - offset);
2385 		dst_addr = section->offset + offset;
2386 
2387 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2388 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2389 			extended_addr = TRUE;
2390 
2391 		if (extended_addr)
2392 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2393 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2394 
2395 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2396 		    copy_size);
2397 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2398 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2399 						   copy_size);
2400 
2401 		if (extended_addr)
2402 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2403 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2404 
2405 		if (ret) {
2406 			device_printf(sc->sc_dev,
2407 			    "%s: Could not load the [%d] uCode section\n",
2408 			    __func__, section_num);
2409 			break;
2410 		}
2411 	}
2412 
2413 	return ret;
2414 }
2415 
2416 /*
2417  * ucode
2418  */
2419 static int
2420 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2421 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2422 {
2423 	int ret;
2424 
2425 	sc->sc_fw_chunk_done = 0;
2426 
2427 	if (!iwm_nic_lock(sc))
2428 		return EBUSY;
2429 
2430 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2432 
2433 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2434 	    dst_addr);
2435 
2436 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2437 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2438 
2439 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2440 	    (iwm_get_dma_hi_addr(phy_addr)
2441 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2442 
2443 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2444 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2445 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2446 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2447 
2448 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2450 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2452 
2453 	iwm_nic_unlock(sc);
2454 
2455 	/* wait up to 5s for this segment to load */
2456 	ret = 0;
2457 	while (!sc->sc_fw_chunk_done) {
2458 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2459 		if (ret)
2460 			break;
2461 	}
2462 
2463 	if (ret != 0) {
2464 		device_printf(sc->sc_dev,
2465 		    "fw chunk addr 0x%x len %d failed to load\n",
2466 		    dst_addr, byte_cnt);
2467 		return ETIMEDOUT;
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 static int
2474 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2475 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2476 {
2477 	int shift_param;
2478 	int i, ret = 0, sec_num = 0x1;
2479 	uint32_t val, last_read_idx = 0;
2480 
2481 	if (cpu == 1) {
2482 		shift_param = 0;
2483 		*first_ucode_section = 0;
2484 	} else {
2485 		shift_param = 16;
2486 		(*first_ucode_section)++;
2487 	}
2488 
2489 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2490 		last_read_idx = i;
2491 
2492 		/*
2493 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2494 		 * CPU1 to CPU2.
2495 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2496 		 * CPU2 non paged to CPU2 paging sec.
2497 		 */
2498 		if (!image->fw_sect[i].data ||
2499 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2500 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2501 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2502 				    "Break since Data not valid or Empty section, sec = %d\n",
2503 				    i);
2504 			break;
2505 		}
2506 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2507 		if (ret)
2508 			return ret;
2509 
2510 		/* Notify the ucode of the loaded section number and status */
2511 		if (iwm_nic_lock(sc)) {
2512 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2513 			val = val | (sec_num << shift_param);
2514 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2515 			sec_num = (sec_num << 1) | 0x1;
2516 			iwm_nic_unlock(sc);
2517 		}
2518 	}
2519 
2520 	*first_ucode_section = last_read_idx;
2521 
2522 	iwm_enable_interrupts(sc);
2523 
2524 	if (iwm_nic_lock(sc)) {
2525 		if (cpu == 1)
2526 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2527 		else
2528 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2529 		iwm_nic_unlock(sc);
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 static int
2536 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2537 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2538 {
2539 	int shift_param;
2540 	int i, ret = 0;
2541 	uint32_t last_read_idx = 0;
2542 
2543 	if (cpu == 1) {
2544 		shift_param = 0;
2545 		*first_ucode_section = 0;
2546 	} else {
2547 		shift_param = 16;
2548 		(*first_ucode_section)++;
2549 	}
2550 
2551 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2552 		last_read_idx = i;
2553 
2554 		/*
2555 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2556 		 * CPU1 to CPU2.
2557 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2558 		 * CPU2 non paged to CPU2 paging sec.
2559 		 */
2560 		if (!image->fw_sect[i].data ||
2561 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2562 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2563 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2564 				    "Break since Data not valid or Empty section, sec = %d\n",
2565 				     i);
2566 			break;
2567 		}
2568 
2569 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2570 		if (ret)
2571 			return ret;
2572 	}
2573 
2574 	*first_ucode_section = last_read_idx;
2575 
2576 	return 0;
2577 
2578 }
2579 
2580 static int
2581 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2582 	const struct iwm_fw_sects *image)
2583 {
2584 	int ret = 0;
2585 	int first_ucode_section;
2586 
2587 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588 		     image->is_dual_cpus ? "Dual" : "Single");
2589 
2590 	/* load to FW the binary non secured sections of CPU1 */
2591 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592 	if (ret)
2593 		return ret;
2594 
2595 	if (image->is_dual_cpus) {
2596 		/* set CPU2 header address */
2597 		if (iwm_nic_lock(sc)) {
2598 			iwm_write_prph(sc,
2599 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601 			iwm_nic_unlock(sc);
2602 		}
2603 
2604 		/* load to FW the binary sections of CPU2 */
2605 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606 						 &first_ucode_section);
2607 		if (ret)
2608 			return ret;
2609 	}
2610 
2611 	iwm_enable_interrupts(sc);
2612 
2613 	/* release CPU reset */
2614 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615 
2616 	return 0;
2617 }
2618 
2619 int
2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621 	const struct iwm_fw_sects *image)
2622 {
2623 	int ret = 0;
2624 	int first_ucode_section;
2625 
2626 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627 		    image->is_dual_cpus ? "Dual" : "Single");
2628 
2629 	/* configure the ucode to be ready to get the secured image */
2630 	/* release CPU reset */
2631 	if (iwm_nic_lock(sc)) {
2632 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633 		    IWM_RELEASE_CPU_RESET_BIT);
2634 		iwm_nic_unlock(sc);
2635 	}
2636 
2637 	/* load to FW the binary Secured sections of CPU1 */
2638 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639 	    &first_ucode_section);
2640 	if (ret)
2641 		return ret;
2642 
2643 	/* load to FW the binary sections of CPU2 */
2644 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645 	    &first_ucode_section);
2646 }
2647 
2648 /* XXX Get rid of this definition */
2649 static inline void
2650 iwm_enable_fw_load_int(struct iwm_softc *sc)
2651 {
2652 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655 }
2656 
2657 /* XXX Add proper rfkill support code */
2658 static int
2659 iwm_start_fw(struct iwm_softc *sc,
2660 	const struct iwm_fw_sects *fw)
2661 {
2662 	int ret;
2663 
2664 	/* This may fail if AMT took ownership of the device */
2665 	if (iwm_prepare_card_hw(sc)) {
2666 		device_printf(sc->sc_dev,
2667 		    "%s: Exit HW not ready\n", __func__);
2668 		ret = EIO;
2669 		goto out;
2670 	}
2671 
2672 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2673 
2674 	iwm_disable_interrupts(sc);
2675 
2676 	/* make sure rfkill handshake bits are cleared */
2677 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2679 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2680 
2681 	/* clear (again), then enable host interrupts */
2682 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2683 
2684 	ret = iwm_nic_init(sc);
2685 	if (ret) {
2686 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2687 		goto out;
2688 	}
2689 
2690 	/*
2691 	 * Now, we load the firmware and don't want to be interrupted, even
2692 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2693 	 * FH_TX interrupt which is needed to load the firmware). If the
2694 	 * RF-Kill switch is toggled, we will find out after having loaded
2695 	 * the firmware and return the proper value to the caller.
2696 	 */
2697 	iwm_enable_fw_load_int(sc);
2698 
2699 	/* really make sure rfkill handshake bits are cleared */
2700 	/* maybe we should write a few times more?  just to make sure */
2701 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703 
2704 	/* Load the given image to the HW */
2705 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2706 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2707 	else
2708 		ret = iwm_pcie_load_given_ucode(sc, fw);
2709 
2710 	/* XXX re-check RF-Kill state */
2711 
2712 out:
2713 	return ret;
2714 }
2715 
2716 static int
2717 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2718 {
2719 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2720 		.valid = htole32(valid_tx_ant),
2721 	};
2722 
2723 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2724 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2725 }
2726 
2727 /* iwlwifi: mvm/fw.c */
2728 static int
2729 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2730 {
2731 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2732 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2733 
2734 	/* Set parameters */
2735 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2736 	phy_cfg_cmd.calib_control.event_trigger =
2737 	    sc->sc_default_calib[ucode_type].event_trigger;
2738 	phy_cfg_cmd.calib_control.flow_trigger =
2739 	    sc->sc_default_calib[ucode_type].flow_trigger;
2740 
2741 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2742 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2743 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2744 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2745 }
2746 
2747 static int
2748 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2749 {
2750 	struct iwm_mvm_alive_data *alive_data = data;
2751 	struct iwm_mvm_alive_resp_ver1 *palive1;
2752 	struct iwm_mvm_alive_resp_ver2 *palive2;
2753 	struct iwm_mvm_alive_resp *palive;
2754 
2755 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2756 		palive1 = (void *)pkt->data;
2757 
2758 		sc->support_umac_log = FALSE;
2759                 sc->error_event_table =
2760                         le32toh(palive1->error_event_table_ptr);
2761                 sc->log_event_table =
2762                         le32toh(palive1->log_event_table_ptr);
2763                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2764 
2765                 alive_data->valid = le16toh(palive1->status) ==
2766                                     IWM_ALIVE_STATUS_OK;
2767                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2768 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2769 			     le16toh(palive1->status), palive1->ver_type,
2770                              palive1->ver_subtype, palive1->flags);
2771 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2772 		palive2 = (void *)pkt->data;
2773 		sc->error_event_table =
2774 			le32toh(palive2->error_event_table_ptr);
2775 		sc->log_event_table =
2776 			le32toh(palive2->log_event_table_ptr);
2777 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2778 		sc->umac_error_event_table =
2779                         le32toh(palive2->error_info_addr);
2780 
2781 		alive_data->valid = le16toh(palive2->status) ==
2782 				    IWM_ALIVE_STATUS_OK;
2783 		if (sc->umac_error_event_table)
2784 			sc->support_umac_log = TRUE;
2785 
2786 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2788 			    le16toh(palive2->status), palive2->ver_type,
2789 			    palive2->ver_subtype, palive2->flags);
2790 
2791 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2792 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2793 			    palive2->umac_major, palive2->umac_minor);
2794 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2795 		palive = (void *)pkt->data;
2796 
2797 		sc->error_event_table =
2798 			le32toh(palive->error_event_table_ptr);
2799 		sc->log_event_table =
2800 			le32toh(palive->log_event_table_ptr);
2801 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2802 		sc->umac_error_event_table =
2803 			le32toh(palive->error_info_addr);
2804 
2805 		alive_data->valid = le16toh(palive->status) ==
2806 				    IWM_ALIVE_STATUS_OK;
2807 		if (sc->umac_error_event_table)
2808 			sc->support_umac_log = TRUE;
2809 
2810 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2812 			    le16toh(palive->status), palive->ver_type,
2813 			    palive->ver_subtype, palive->flags);
2814 
2815 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2816 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2817 			    le32toh(palive->umac_major),
2818 			    le32toh(palive->umac_minor));
2819 	}
2820 
2821 	return TRUE;
2822 }
2823 
2824 static int
2825 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2826 	struct iwm_rx_packet *pkt, void *data)
2827 {
2828 	struct iwm_phy_db *phy_db = data;
2829 
2830 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2831 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2832 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2833 			    __func__, pkt->hdr.code);
2834 		}
2835 		return TRUE;
2836 	}
2837 
2838 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2839 		device_printf(sc->sc_dev,
2840 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2841 	}
2842 
2843 	return FALSE;
2844 }
2845 
2846 static int
2847 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2848 	enum iwm_ucode_type ucode_type)
2849 {
2850 	struct iwm_notification_wait alive_wait;
2851 	struct iwm_mvm_alive_data alive_data;
2852 	const struct iwm_fw_sects *fw;
2853 	enum iwm_ucode_type old_type = sc->cur_ucode;
2854 	int error;
2855 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2856 
2857 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2858 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2859 			error);
2860 		return error;
2861 	}
2862 	fw = &sc->sc_fw.fw_sects[ucode_type];
2863 	sc->cur_ucode = ucode_type;
2864 	sc->ucode_loaded = FALSE;
2865 
2866 	memset(&alive_data, 0, sizeof(alive_data));
2867 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2868 				   alive_cmd, nitems(alive_cmd),
2869 				   iwm_alive_fn, &alive_data);
2870 
2871 	error = iwm_start_fw(sc, fw);
2872 	if (error) {
2873 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2874 		sc->cur_ucode = old_type;
2875 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2876 		return error;
2877 	}
2878 
2879 	/*
2880 	 * Some things may run in the background now, but we
2881 	 * just wait for the ALIVE notification here.
2882 	 */
2883 	IWM_UNLOCK(sc);
2884 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2885 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2886 	IWM_LOCK(sc);
2887 	if (error) {
2888 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2889 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2890 			if (iwm_nic_lock(sc)) {
2891 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2892 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2893 				iwm_nic_unlock(sc);
2894 			}
2895 			device_printf(sc->sc_dev,
2896 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2897 			    a, b);
2898 		}
2899 		sc->cur_ucode = old_type;
2900 		return error;
2901 	}
2902 
2903 	if (!alive_data.valid) {
2904 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2905 		    __func__);
2906 		sc->cur_ucode = old_type;
2907 		return EIO;
2908 	}
2909 
2910 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2911 
2912 	/*
2913 	 * configure and operate fw paging mechanism.
2914 	 * driver configures the paging flow only once, CPU2 paging image
2915 	 * included in the IWM_UCODE_INIT image.
2916 	 */
2917 	if (fw->paging_mem_size) {
2918 		error = iwm_save_fw_paging(sc, fw);
2919 		if (error) {
2920 			device_printf(sc->sc_dev,
2921 			    "%s: failed to save the FW paging image\n",
2922 			    __func__);
2923 			return error;
2924 		}
2925 
2926 		error = iwm_send_paging_cmd(sc, fw);
2927 		if (error) {
2928 			device_printf(sc->sc_dev,
2929 			    "%s: failed to send the paging cmd\n", __func__);
2930 			iwm_free_fw_paging(sc);
2931 			return error;
2932 		}
2933 	}
2934 
2935 	if (!error)
2936 		sc->ucode_loaded = TRUE;
2937 	return error;
2938 }
2939 
2940 /*
2941  * mvm misc bits
2942  */
2943 
2944 /*
2945  * follows iwlwifi/fw.c
2946  */
2947 static int
2948 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2949 {
2950 	struct iwm_notification_wait calib_wait;
2951 	static const uint16_t init_complete[] = {
2952 		IWM_INIT_COMPLETE_NOTIF,
2953 		IWM_CALIB_RES_NOTIF_PHY_DB
2954 	};
2955 	int ret;
2956 
2957 	/* do not operate with rfkill switch turned on */
2958 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2959 		device_printf(sc->sc_dev,
2960 		    "radio is disabled by hardware switch\n");
2961 		return EPERM;
2962 	}
2963 
2964 	iwm_init_notification_wait(sc->sc_notif_wait,
2965 				   &calib_wait,
2966 				   init_complete,
2967 				   nitems(init_complete),
2968 				   iwm_wait_phy_db_entry,
2969 				   sc->sc_phy_db);
2970 
2971 	/* Will also start the device */
2972 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2973 	if (ret) {
2974 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2975 		    ret);
2976 		goto error;
2977 	}
2978 
2979 	if (justnvm) {
2980 		/* Read nvm */
2981 		ret = iwm_nvm_init(sc);
2982 		if (ret) {
2983 			device_printf(sc->sc_dev, "failed to read nvm\n");
2984 			goto error;
2985 		}
2986 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2987 		goto error;
2988 	}
2989 
2990 	ret = iwm_send_bt_init_conf(sc);
2991 	if (ret) {
2992 		device_printf(sc->sc_dev,
2993 		    "failed to send bt coex configuration: %d\n", ret);
2994 		goto error;
2995 	}
2996 
2997 	/* Send TX valid antennas before triggering calibrations */
2998 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2999 	if (ret) {
3000 		device_printf(sc->sc_dev,
3001 		    "failed to send antennas before calibration: %d\n", ret);
3002 		goto error;
3003 	}
3004 
3005 	/*
3006 	 * Send phy configurations command to init uCode
3007 	 * to start the 16.0 uCode init image internal calibrations.
3008 	 */
3009 	ret = iwm_send_phy_cfg_cmd(sc);
3010 	if (ret) {
3011 		device_printf(sc->sc_dev,
3012 		    "%s: Failed to run INIT calibrations: %d\n",
3013 		    __func__, ret);
3014 		goto error;
3015 	}
3016 
3017 	/*
3018 	 * Nothing to do but wait for the init complete notification
3019 	 * from the firmware.
3020 	 */
3021 	IWM_UNLOCK(sc);
3022 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3023 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3024 	IWM_LOCK(sc);
3025 
3026 
3027 	goto out;
3028 
3029 error:
3030 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3031 out:
3032 	return ret;
3033 }
3034 
3035 /*
3036  * receive side
3037  */
3038 
3039 /* (re)stock rx ring, called at init-time and at runtime */
3040 static int
3041 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3042 {
3043 	struct iwm_rx_ring *ring = &sc->rxq;
3044 	struct iwm_rx_data *data = &ring->data[idx];
3045 	struct mbuf *m;
3046 	bus_dmamap_t dmamap;
3047 	bus_dma_segment_t seg;
3048 	int nsegs, error;
3049 
3050 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3051 	if (m == NULL)
3052 		return ENOBUFS;
3053 
3054 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3055 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3056 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3057 	if (error != 0) {
3058 		device_printf(sc->sc_dev,
3059 		    "%s: can't map mbuf, error %d\n", __func__, error);
3060 		m_freem(m);
3061 		return error;
3062 	}
3063 
3064 	if (data->m != NULL)
3065 		bus_dmamap_unload(ring->data_dmat, data->map);
3066 
3067 	/* Swap ring->spare_map with data->map */
3068 	dmamap = data->map;
3069 	data->map = ring->spare_map;
3070 	ring->spare_map = dmamap;
3071 
3072 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3073 	data->m = m;
3074 
3075 	/* Update RX descriptor. */
3076 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3077 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3078 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3079 	    BUS_DMASYNC_PREWRITE);
3080 
3081 	return 0;
3082 }
3083 
3084 /* iwlwifi: mvm/rx.c */
3085 /*
3086  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3087  * values are reported by the fw as positive values - need to negate
3088  * to obtain their dBM.  Account for missing antennas by replacing 0
3089  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3090  */
3091 static int
3092 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3093 {
3094 	int energy_a, energy_b, energy_c, max_energy;
3095 	uint32_t val;
3096 
3097 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3098 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3099 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3100 	energy_a = energy_a ? -energy_a : -256;
3101 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3102 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3103 	energy_b = energy_b ? -energy_b : -256;
3104 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3105 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3106 	energy_c = energy_c ? -energy_c : -256;
3107 	max_energy = MAX(energy_a, energy_b);
3108 	max_energy = MAX(max_energy, energy_c);
3109 
3110 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3111 	    "energy In A %d B %d C %d , and max %d\n",
3112 	    energy_a, energy_b, energy_c, max_energy);
3113 
3114 	return max_energy;
3115 }
3116 
3117 static void
3118 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3119 {
3120 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3121 
3122 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3123 
3124 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3125 }
3126 
3127 /*
3128  * Retrieve the average noise (in dBm) among receivers.
3129  */
3130 static int
3131 iwm_get_noise(struct iwm_softc *sc,
3132     const struct iwm_mvm_statistics_rx_non_phy *stats)
3133 {
3134 	int i, total, nbant, noise;
3135 
3136 	total = nbant = noise = 0;
3137 	for (i = 0; i < 3; i++) {
3138 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3139 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3140 		    __func__,
3141 		    i,
3142 		    noise);
3143 
3144 		if (noise) {
3145 			total += noise;
3146 			nbant++;
3147 		}
3148 	}
3149 
3150 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3151 	    __func__, nbant, total);
3152 #if 0
3153 	/* There should be at least one antenna but check anyway. */
3154 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3155 #else
3156 	/* For now, just hard-code it to -96 to be safe */
3157 	return (-96);
3158 #endif
3159 }
3160 
3161 /*
3162  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3163  *
3164  * Handles the actual data of the Rx packet from the fw
3165  */
3166 static boolean_t
3167 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3168 	boolean_t stolen)
3169 {
3170 	struct ieee80211com *ic = &sc->sc_ic;
3171 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3172 	struct ieee80211_frame *wh;
3173 	struct ieee80211_node *ni;
3174 	struct ieee80211_rx_stats rxs;
3175 	struct iwm_rx_phy_info *phy_info;
3176 	struct iwm_rx_mpdu_res_start *rx_res;
3177 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3178 	uint32_t len;
3179 	uint32_t rx_pkt_status;
3180 	int rssi;
3181 
3182 	phy_info = &sc->sc_last_phy_info;
3183 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3184 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3185 	len = le16toh(rx_res->byte_count);
3186 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3187 
3188 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3189 		device_printf(sc->sc_dev,
3190 		    "dsp size out of range [0,20]: %d\n",
3191 		    phy_info->cfg_phy_cnt);
3192 		goto fail;
3193 	}
3194 
3195 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3196 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3197 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3198 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3199 		goto fail;
3200 	}
3201 
3202 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3203 
3204 	/* Map it to relative value */
3205 	rssi = rssi - sc->sc_noise;
3206 
3207 	/* replenish ring for the buffer we're going to feed to the sharks */
3208 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3209 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3210 		    __func__);
3211 		goto fail;
3212 	}
3213 
3214 	m->m_data = pkt->data + sizeof(*rx_res);
3215 	m->m_pkthdr.len = m->m_len = len;
3216 
3217 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3219 
3220 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3221 
3222 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3223 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3224 	    __func__,
3225 	    le16toh(phy_info->channel),
3226 	    le16toh(phy_info->phy_flags));
3227 
3228 	/*
3229 	 * Populate an RX state struct with the provided information.
3230 	 */
3231 	bzero(&rxs, sizeof(rxs));
3232 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3233 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3234 	rxs.c_ieee = le16toh(phy_info->channel);
3235 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3236 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3237 	} else {
3238 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3239 	}
3240 
3241 	/* rssi is in 1/2db units */
3242 	rxs.c_rssi = rssi * 2;
3243 	rxs.c_nf = sc->sc_noise;
3244 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3245 		if (ni)
3246 			ieee80211_free_node(ni);
3247 		goto fail;
3248 	}
3249 
3250 	if (ieee80211_radiotap_active_vap(vap)) {
3251 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3252 
3253 		tap->wr_flags = 0;
3254 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3255 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3256 		tap->wr_chan_freq = htole16(rxs.c_freq);
3257 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3258 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3259 		tap->wr_dbm_antsignal = (int8_t)rssi;
3260 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3261 		tap->wr_tsft = phy_info->system_timestamp;
3262 		switch (phy_info->rate) {
3263 		/* CCK rates. */
3264 		case  10: tap->wr_rate =   2; break;
3265 		case  20: tap->wr_rate =   4; break;
3266 		case  55: tap->wr_rate =  11; break;
3267 		case 110: tap->wr_rate =  22; break;
3268 		/* OFDM rates. */
3269 		case 0xd: tap->wr_rate =  12; break;
3270 		case 0xf: tap->wr_rate =  18; break;
3271 		case 0x5: tap->wr_rate =  24; break;
3272 		case 0x7: tap->wr_rate =  36; break;
3273 		case 0x9: tap->wr_rate =  48; break;
3274 		case 0xb: tap->wr_rate =  72; break;
3275 		case 0x1: tap->wr_rate =  96; break;
3276 		case 0x3: tap->wr_rate = 108; break;
3277 		/* Unknown rate: should not happen. */
3278 		default:  tap->wr_rate =   0;
3279 		}
3280 	}
3281 
3282 	IWM_UNLOCK(sc);
3283 	if (ni != NULL) {
3284 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3285 		ieee80211_input_mimo(ni, m);
3286 		ieee80211_free_node(ni);
3287 	} else {
3288 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3289 		ieee80211_input_mimo_all(ic, m);
3290 	}
3291 	IWM_LOCK(sc);
3292 
3293 	return TRUE;
3294 
3295 fail:
3296 	counter_u64_add(ic->ic_ierrors, 1);
3297 	return FALSE;
3298 }
3299 
3300 static int
3301 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3302 	struct iwm_node *in)
3303 {
3304 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3305 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3306 	struct ieee80211_node *ni = &in->in_ni;
3307 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3308 
3309 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3310 
3311 	/* Update rate control statistics. */
3312 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3313 	    __func__,
3314 	    (int) le16toh(tx_resp->status.status),
3315 	    (int) le16toh(tx_resp->status.sequence),
3316 	    tx_resp->frame_count,
3317 	    tx_resp->bt_kill_count,
3318 	    tx_resp->failure_rts,
3319 	    tx_resp->failure_frame,
3320 	    le32toh(tx_resp->initial_rate),
3321 	    (int) le16toh(tx_resp->wireless_media_time));
3322 
3323 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3324 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3325 	txs->short_retries = tx_resp->failure_rts;
3326 	txs->long_retries = tx_resp->failure_frame;
3327 	if (status != IWM_TX_STATUS_SUCCESS &&
3328 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3329 		switch (status) {
3330 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3331 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3332 			break;
3333 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3334 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3335 			break;
3336 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3337 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3338 			break;
3339 		default:
3340 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3341 			break;
3342 		}
3343 	} else {
3344 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3345 	}
3346 	ieee80211_ratectl_tx_complete(ni, txs);
3347 
3348 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3349 }
3350 
3351 static void
3352 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3353 {
3354 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3355 	int idx = cmd_hdr->idx;
3356 	int qid = cmd_hdr->qid;
3357 	struct iwm_tx_ring *ring = &sc->txq[qid];
3358 	struct iwm_tx_data *txd = &ring->data[idx];
3359 	struct iwm_node *in = txd->in;
3360 	struct mbuf *m = txd->m;
3361 	int status;
3362 
3363 	KASSERT(txd->done == 0, ("txd not done"));
3364 	KASSERT(txd->in != NULL, ("txd without node"));
3365 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3366 
3367 	sc->sc_tx_timer = 0;
3368 
3369 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3370 
3371 	/* Unmap and free mbuf. */
3372 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3373 	bus_dmamap_unload(ring->data_dmat, txd->map);
3374 
3375 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3376 	    "free txd %p, in %p\n", txd, txd->in);
3377 	txd->done = 1;
3378 	txd->m = NULL;
3379 	txd->in = NULL;
3380 
3381 	ieee80211_tx_complete(&in->in_ni, m, status);
3382 
3383 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3384 		sc->qfullmsk &= ~(1 << ring->qid);
3385 		if (sc->qfullmsk == 0) {
3386 			iwm_start(sc);
3387 		}
3388 	}
3389 }
3390 
3391 /*
3392  * transmit side
3393  */
3394 
3395 /*
3396  * Process a "command done" firmware notification.  This is where we wakeup
3397  * processes waiting for a synchronous command completion.
3398  * from if_iwn
3399  */
3400 static void
3401 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3402 {
3403 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3404 	struct iwm_tx_data *data;
3405 
3406 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3407 		return;	/* Not a command ack. */
3408 	}
3409 
3410 	/* XXX wide commands? */
3411 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3412 	    "cmd notification type 0x%x qid %d idx %d\n",
3413 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3414 
3415 	data = &ring->data[pkt->hdr.idx];
3416 
3417 	/* If the command was mapped in an mbuf, free it. */
3418 	if (data->m != NULL) {
3419 		bus_dmamap_sync(ring->data_dmat, data->map,
3420 		    BUS_DMASYNC_POSTWRITE);
3421 		bus_dmamap_unload(ring->data_dmat, data->map);
3422 		m_freem(data->m);
3423 		data->m = NULL;
3424 	}
3425 	wakeup(&ring->desc[pkt->hdr.idx]);
3426 
3427 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3428 		device_printf(sc->sc_dev,
3429 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3430 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3431 		/* XXX call iwm_force_nmi() */
3432 	}
3433 
3434 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3435 	ring->queued--;
3436 	if (ring->queued == 0)
3437 		iwm_pcie_clear_cmd_in_flight(sc);
3438 }
3439 
3440 #if 0
3441 /*
3442  * necessary only for block ack mode
3443  */
3444 void
3445 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3446 	uint16_t len)
3447 {
3448 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3449 	uint16_t w_val;
3450 
3451 	scd_bc_tbl = sc->sched_dma.vaddr;
3452 
3453 	len += 8; /* magic numbers came naturally from paris */
3454 	len = roundup(len, 4) / 4;
3455 
3456 	w_val = htole16(sta_id << 12 | len);
3457 
3458 	/* Update TX scheduler. */
3459 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3460 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3461 	    BUS_DMASYNC_PREWRITE);
3462 
3463 	/* I really wonder what this is ?!? */
3464 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3465 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3466 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3467 		    BUS_DMASYNC_PREWRITE);
3468 	}
3469 }
3470 #endif
3471 
3472 /*
3473  * Take an 802.11 (non-n) rate, find the relevant rate
3474  * table entry.  return the index into in_ridx[].
3475  *
3476  * The caller then uses that index back into in_ridx
3477  * to figure out the rate index programmed /into/
3478  * the firmware for this given node.
3479  */
3480 static int
3481 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3482     uint8_t rate)
3483 {
3484 	int i;
3485 	uint8_t r;
3486 
3487 	for (i = 0; i < nitems(in->in_ridx); i++) {
3488 		r = iwm_rates[in->in_ridx[i]].rate;
3489 		if (rate == r)
3490 			return (i);
3491 	}
3492 
3493 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3494 	    "%s: couldn't find an entry for rate=%d\n",
3495 	    __func__,
3496 	    rate);
3497 
3498 	/* XXX Return the first */
3499 	/* XXX TODO: have it return the /lowest/ */
3500 	return (0);
3501 }
3502 
3503 static int
3504 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3505 {
3506 	int i;
3507 
3508 	for (i = 0; i < nitems(iwm_rates); i++) {
3509 		if (iwm_rates[i].rate == rate)
3510 			return (i);
3511 	}
3512 	/* XXX error? */
3513 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3514 	    "%s: couldn't find an entry for rate=%d\n",
3515 	    __func__,
3516 	    rate);
3517 	return (0);
3518 }
3519 
3520 /*
3521  * Fill in the rate related information for a transmit command.
3522  */
3523 static const struct iwm_rate *
3524 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3525 	struct mbuf *m, struct iwm_tx_cmd *tx)
3526 {
3527 	struct ieee80211_node *ni = &in->in_ni;
3528 	struct ieee80211_frame *wh;
3529 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3530 	const struct iwm_rate *rinfo;
3531 	int type;
3532 	int ridx, rate_flags;
3533 
3534 	wh = mtod(m, struct ieee80211_frame *);
3535 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3536 
3537 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3538 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3539 
3540 	if (type == IEEE80211_FC0_TYPE_MGT ||
3541 	    type == IEEE80211_FC0_TYPE_CTL ||
3542 	    (m->m_flags & M_EAPOL) != 0) {
3543 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3544 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3545 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3546 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3547 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3548 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3549 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3550 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3551 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3552 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3553 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3554 	} else {
3555 		int i;
3556 
3557 		/* for data frames, use RS table */
3558 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3559 		/* XXX pass pktlen */
3560 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3561 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3562 		ridx = in->in_ridx[i];
3563 
3564 		/* This is the index into the programmed table */
3565 		tx->initial_rate_index = i;
3566 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3567 
3568 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3569 		    "%s: start with i=%d, txrate %d\n",
3570 		    __func__, i, iwm_rates[ridx].rate);
3571 	}
3572 
3573 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3574 	    "%s: frame type=%d txrate %d\n",
3575 	        __func__, type, iwm_rates[ridx].rate);
3576 
3577 	rinfo = &iwm_rates[ridx];
3578 
3579 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3580 	    __func__, ridx,
3581 	    rinfo->rate,
3582 	    !! (IWM_RIDX_IS_CCK(ridx))
3583 	    );
3584 
3585 	/* XXX TODO: hard-coded TX antenna? */
3586 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3587 	if (IWM_RIDX_IS_CCK(ridx))
3588 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3589 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3590 
3591 	return rinfo;
3592 }
3593 
3594 #define TB0_SIZE 16
3595 static int
3596 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3597 {
3598 	struct ieee80211com *ic = &sc->sc_ic;
3599 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3600 	struct iwm_node *in = IWM_NODE(ni);
3601 	struct iwm_tx_ring *ring;
3602 	struct iwm_tx_data *data;
3603 	struct iwm_tfd *desc;
3604 	struct iwm_device_cmd *cmd;
3605 	struct iwm_tx_cmd *tx;
3606 	struct ieee80211_frame *wh;
3607 	struct ieee80211_key *k = NULL;
3608 	struct mbuf *m1;
3609 	const struct iwm_rate *rinfo;
3610 	uint32_t flags;
3611 	u_int hdrlen;
3612 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3613 	int nsegs;
3614 	uint8_t tid, type;
3615 	int i, totlen, error, pad;
3616 
3617 	wh = mtod(m, struct ieee80211_frame *);
3618 	hdrlen = ieee80211_anyhdrsize(wh);
3619 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3620 	tid = 0;
3621 	ring = &sc->txq[ac];
3622 	desc = &ring->desc[ring->cur];
3623 	memset(desc, 0, sizeof(*desc));
3624 	data = &ring->data[ring->cur];
3625 
3626 	/* Fill out iwm_tx_cmd to send to the firmware */
3627 	cmd = &ring->cmd[ring->cur];
3628 	cmd->hdr.code = IWM_TX_CMD;
3629 	cmd->hdr.flags = 0;
3630 	cmd->hdr.qid = ring->qid;
3631 	cmd->hdr.idx = ring->cur;
3632 
3633 	tx = (void *)cmd->data;
3634 	memset(tx, 0, sizeof(*tx));
3635 
3636 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3637 
3638 	/* Encrypt the frame if need be. */
3639 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3640 		/* Retrieve key for TX && do software encryption. */
3641 		k = ieee80211_crypto_encap(ni, m);
3642 		if (k == NULL) {
3643 			m_freem(m);
3644 			return (ENOBUFS);
3645 		}
3646 		/* 802.11 header may have moved. */
3647 		wh = mtod(m, struct ieee80211_frame *);
3648 	}
3649 
3650 	if (ieee80211_radiotap_active_vap(vap)) {
3651 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3652 
3653 		tap->wt_flags = 0;
3654 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3655 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3656 		tap->wt_rate = rinfo->rate;
3657 		if (k != NULL)
3658 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3659 		ieee80211_radiotap_tx(vap, m);
3660 	}
3661 
3662 
3663 	totlen = m->m_pkthdr.len;
3664 
3665 	flags = 0;
3666 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3667 		flags |= IWM_TX_CMD_FLG_ACK;
3668 	}
3669 
3670 	if (type == IEEE80211_FC0_TYPE_DATA
3671 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3672 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3673 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3674 	}
3675 
3676 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3677 	    type != IEEE80211_FC0_TYPE_DATA)
3678 		tx->sta_id = sc->sc_aux_sta.sta_id;
3679 	else
3680 		tx->sta_id = IWM_STATION_ID;
3681 
3682 	if (type == IEEE80211_FC0_TYPE_MGT) {
3683 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3684 
3685 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3686 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3687 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3688 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3689 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3690 		} else {
3691 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3692 		}
3693 	} else {
3694 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3695 	}
3696 
3697 	if (hdrlen & 3) {
3698 		/* First segment length must be a multiple of 4. */
3699 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3700 		pad = 4 - (hdrlen & 3);
3701 	} else
3702 		pad = 0;
3703 
3704 	tx->driver_txop = 0;
3705 	tx->next_frame_len = 0;
3706 
3707 	tx->len = htole16(totlen);
3708 	tx->tid_tspec = tid;
3709 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3710 
3711 	/* Set physical address of "scratch area". */
3712 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3713 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3714 
3715 	/* Copy 802.11 header in TX command. */
3716 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3717 
3718 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3719 
3720 	tx->sec_ctl = 0;
3721 	tx->tx_flags |= htole32(flags);
3722 
3723 	/* Trim 802.11 header. */
3724 	m_adj(m, hdrlen);
3725 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3726 	    segs, &nsegs, BUS_DMA_NOWAIT);
3727 	if (error != 0) {
3728 		if (error != EFBIG) {
3729 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3730 			    error);
3731 			m_freem(m);
3732 			return error;
3733 		}
3734 		/* Too many DMA segments, linearize mbuf. */
3735 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3736 		if (m1 == NULL) {
3737 			device_printf(sc->sc_dev,
3738 			    "%s: could not defrag mbuf\n", __func__);
3739 			m_freem(m);
3740 			return (ENOBUFS);
3741 		}
3742 		m = m1;
3743 
3744 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3745 		    segs, &nsegs, BUS_DMA_NOWAIT);
3746 		if (error != 0) {
3747 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3748 			    error);
3749 			m_freem(m);
3750 			return error;
3751 		}
3752 	}
3753 	data->m = m;
3754 	data->in = in;
3755 	data->done = 0;
3756 
3757 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3758 	    "sending txd %p, in %p\n", data, data->in);
3759 	KASSERT(data->in != NULL, ("node is NULL"));
3760 
3761 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3762 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3763 	    ring->qid, ring->cur, totlen, nsegs,
3764 	    le32toh(tx->tx_flags),
3765 	    le32toh(tx->rate_n_flags),
3766 	    tx->initial_rate_index
3767 	    );
3768 
3769 	/* Fill TX descriptor. */
3770 	desc->num_tbs = 2 + nsegs;
3771 
3772 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3773 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3774 	    (TB0_SIZE << 4);
3775 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3776 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3777 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3778 	      + hdrlen + pad - TB0_SIZE) << 4);
3779 
3780 	/* Other DMA segments are for data payload. */
3781 	for (i = 0; i < nsegs; i++) {
3782 		seg = &segs[i];
3783 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3784 		desc->tbs[i+2].hi_n_len = \
3785 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3786 		    | ((seg->ds_len) << 4);
3787 	}
3788 
3789 	bus_dmamap_sync(ring->data_dmat, data->map,
3790 	    BUS_DMASYNC_PREWRITE);
3791 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3792 	    BUS_DMASYNC_PREWRITE);
3793 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3794 	    BUS_DMASYNC_PREWRITE);
3795 
3796 #if 0
3797 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3798 #endif
3799 
3800 	/* Kick TX ring. */
3801 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3802 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3803 
3804 	/* Mark TX ring as full if we reach a certain threshold. */
3805 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3806 		sc->qfullmsk |= 1 << ring->qid;
3807 	}
3808 
3809 	return 0;
3810 }
3811 
3812 static int
3813 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3814     const struct ieee80211_bpf_params *params)
3815 {
3816 	struct ieee80211com *ic = ni->ni_ic;
3817 	struct iwm_softc *sc = ic->ic_softc;
3818 	int error = 0;
3819 
3820 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3821 	    "->%s begin\n", __func__);
3822 
3823 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3824 		m_freem(m);
3825 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3826 		    "<-%s not RUNNING\n", __func__);
3827 		return (ENETDOWN);
3828         }
3829 
3830 	IWM_LOCK(sc);
3831 	/* XXX fix this */
3832         if (params == NULL) {
3833 		error = iwm_tx(sc, m, ni, 0);
3834 	} else {
3835 		error = iwm_tx(sc, m, ni, 0);
3836 	}
3837 	sc->sc_tx_timer = 5;
3838 	IWM_UNLOCK(sc);
3839 
3840         return (error);
3841 }
3842 
3843 /*
3844  * mvm/tx.c
3845  */
3846 
3847 /*
3848  * Note that there are transports that buffer frames before they reach
3849  * the firmware. This means that after flush_tx_path is called, the
3850  * queue might not be empty. The race-free way to handle this is to:
3851  * 1) set the station as draining
3852  * 2) flush the Tx path
3853  * 3) wait for the transport queues to be empty
3854  */
3855 int
3856 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3857 {
3858 	int ret;
3859 	struct iwm_tx_path_flush_cmd flush_cmd = {
3860 		.queues_ctl = htole32(tfd_msk),
3861 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3862 	};
3863 
3864 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3865 	    sizeof(flush_cmd), &flush_cmd);
3866 	if (ret)
3867                 device_printf(sc->sc_dev,
3868 		    "Flushing tx queue failed: %d\n", ret);
3869 	return ret;
3870 }
3871 
3872 /*
3873  * BEGIN mvm/quota.c
3874  */
3875 
3876 static int
3877 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3878 {
3879 	struct iwm_time_quota_cmd cmd;
3880 	int i, idx, ret, num_active_macs, quota, quota_rem;
3881 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3882 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3883 	uint16_t id;
3884 
3885 	memset(&cmd, 0, sizeof(cmd));
3886 
3887 	/* currently, PHY ID == binding ID */
3888 	if (ivp) {
3889 		id = ivp->phy_ctxt->id;
3890 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3891 		colors[id] = ivp->phy_ctxt->color;
3892 
3893 		if (1)
3894 			n_ifs[id] = 1;
3895 	}
3896 
3897 	/*
3898 	 * The FW's scheduling session consists of
3899 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3900 	 * equally between all the bindings that require quota
3901 	 */
3902 	num_active_macs = 0;
3903 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3904 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3905 		num_active_macs += n_ifs[i];
3906 	}
3907 
3908 	quota = 0;
3909 	quota_rem = 0;
3910 	if (num_active_macs) {
3911 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3912 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3913 	}
3914 
3915 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3916 		if (colors[i] < 0)
3917 			continue;
3918 
3919 		cmd.quotas[idx].id_and_color =
3920 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3921 
3922 		if (n_ifs[i] <= 0) {
3923 			cmd.quotas[idx].quota = htole32(0);
3924 			cmd.quotas[idx].max_duration = htole32(0);
3925 		} else {
3926 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3927 			cmd.quotas[idx].max_duration = htole32(0);
3928 		}
3929 		idx++;
3930 	}
3931 
3932 	/* Give the remainder of the session to the first binding */
3933 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3934 
3935 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3936 	    sizeof(cmd), &cmd);
3937 	if (ret)
3938 		device_printf(sc->sc_dev,
3939 		    "%s: Failed to send quota: %d\n", __func__, ret);
3940 	return ret;
3941 }
3942 
3943 /*
3944  * END mvm/quota.c
3945  */
3946 
3947 /*
3948  * ieee80211 routines
3949  */
3950 
3951 /*
3952  * Change to AUTH state in 80211 state machine.  Roughly matches what
3953  * Linux does in bss_info_changed().
3954  */
3955 static int
3956 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3957 {
3958 	struct ieee80211_node *ni;
3959 	struct iwm_node *in;
3960 	struct iwm_vap *iv = IWM_VAP(vap);
3961 	uint32_t duration;
3962 	int error;
3963 
3964 	/*
3965 	 * XXX i have a feeling that the vap node is being
3966 	 * freed from underneath us. Grr.
3967 	 */
3968 	ni = ieee80211_ref_node(vap->iv_bss);
3969 	in = IWM_NODE(ni);
3970 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3971 	    "%s: called; vap=%p, bss ni=%p\n",
3972 	    __func__,
3973 	    vap,
3974 	    ni);
3975 
3976 	in->in_assoc = 0;
3977 
3978 	/*
3979 	 * Firmware bug - it'll crash if the beacon interval is less
3980 	 * than 16. We can't avoid connecting at all, so refuse the
3981 	 * station state change, this will cause net80211 to abandon
3982 	 * attempts to connect to this AP, and eventually wpa_s will
3983 	 * blacklist the AP...
3984 	 */
3985 	if (ni->ni_intval < 16) {
3986 		device_printf(sc->sc_dev,
3987 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3988 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3989 		error = EINVAL;
3990 		goto out;
3991 	}
3992 
3993 	error = iwm_allow_mcast(vap, sc);
3994 	if (error) {
3995 		device_printf(sc->sc_dev,
3996 		    "%s: failed to set multicast\n", __func__);
3997 		goto out;
3998 	}
3999 
4000 	/*
4001 	 * This is where it deviates from what Linux does.
4002 	 *
4003 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4004 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4005 	 * and always does a mac_ctx_changed().
4006 	 *
4007 	 * The openbsd port doesn't attempt to do that - it reset things
4008 	 * at odd states and does the add here.
4009 	 *
4010 	 * So, until the state handling is fixed (ie, we never reset
4011 	 * the NIC except for a firmware failure, which should drag
4012 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4013 	 * contexts that are required), let's do a dirty hack here.
4014 	 */
4015 	if (iv->is_uploaded) {
4016 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4017 			device_printf(sc->sc_dev,
4018 			    "%s: failed to update MAC\n", __func__);
4019 			goto out;
4020 		}
4021 	} else {
4022 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4023 			device_printf(sc->sc_dev,
4024 			    "%s: failed to add MAC\n", __func__);
4025 			goto out;
4026 		}
4027 	}
4028 
4029 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4030 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4031 		device_printf(sc->sc_dev,
4032 		    "%s: failed update phy ctxt\n", __func__);
4033 		goto out;
4034 	}
4035 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4036 
4037 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4038 		device_printf(sc->sc_dev,
4039 		    "%s: binding update cmd\n", __func__);
4040 		goto out;
4041 	}
4042 	/*
4043 	 * Authentication becomes unreliable when powersaving is left enabled
4044 	 * here. Powersaving will be activated again when association has
4045 	 * finished or is aborted.
4046 	 */
4047 	iv->ps_disabled = TRUE;
4048 	error = iwm_mvm_power_update_mac(sc);
4049 	iv->ps_disabled = FALSE;
4050 	if (error != 0) {
4051 		device_printf(sc->sc_dev,
4052 		    "%s: failed to update power management\n",
4053 		    __func__);
4054 		goto out;
4055 	}
4056 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4057 		device_printf(sc->sc_dev,
4058 		    "%s: failed to add sta\n", __func__);
4059 		goto out;
4060 	}
4061 
4062 	/*
4063 	 * Prevent the FW from wandering off channel during association
4064 	 * by "protecting" the session with a time event.
4065 	 */
4066 	/* XXX duration is in units of TU, not MS */
4067 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4068 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4069 	DELAY(100);
4070 
4071 	error = 0;
4072 out:
4073 	ieee80211_free_node(ni);
4074 	return (error);
4075 }
4076 
4077 static int
4078 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4079 {
4080 	uint32_t tfd_msk;
4081 
4082 	/*
4083 	 * Ok, so *technically* the proper set of calls for going
4084 	 * from RUN back to SCAN is:
4085 	 *
4086 	 * iwm_mvm_power_mac_disable(sc, in);
4087 	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4088 	 * iwm_mvm_rm_sta(sc, in);
4089 	 * iwm_mvm_update_quotas(sc, NULL);
4090 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4091 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4092 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4093 	 *
4094 	 * However, that freezes the device not matter which permutations
4095 	 * and modifications are attempted.  Obviously, this driver is missing
4096 	 * something since it works in the Linux driver, but figuring out what
4097 	 * is missing is a little more complicated.  Now, since we're going
4098 	 * back to nothing anyway, we'll just do a complete device reset.
4099 	 * Up your's, device!
4100 	 */
4101 	/*
4102 	 * Just using 0xf for the queues mask is fine as long as we only
4103 	 * get here from RUN state.
4104 	 */
4105 	tfd_msk = 0xf;
4106 	mbufq_drain(&sc->sc_snd);
4107 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4108 	/*
4109 	 * We seem to get away with just synchronously sending the
4110 	 * IWM_TXPATH_FLUSH command.
4111 	 */
4112 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4113 	iwm_stop_device(sc);
4114 	iwm_init_hw(sc);
4115 	if (in)
4116 		in->in_assoc = 0;
4117 	return 0;
4118 
4119 #if 0
4120 	int error;
4121 
4122 	iwm_mvm_power_mac_disable(sc, in);
4123 
4124 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4125 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4126 		return error;
4127 	}
4128 
4129 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4130 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4131 		return error;
4132 	}
4133 	error = iwm_mvm_rm_sta(sc, in);
4134 	in->in_assoc = 0;
4135 	iwm_mvm_update_quotas(sc, NULL);
4136 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4137 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4138 		return error;
4139 	}
4140 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4141 
4142 	iwm_mvm_mac_ctxt_remove(sc, in);
4143 
4144 	return error;
4145 #endif
4146 }
4147 
4148 static struct ieee80211_node *
4149 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4150 {
4151 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4152 	    M_NOWAIT | M_ZERO);
4153 }
4154 
4155 uint8_t
4156 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4157 {
4158 	int i;
4159 	uint8_t rval;
4160 
4161 	for (i = 0; i < rs->rs_nrates; i++) {
4162 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4163 		if (rval == iwm_rates[ridx].rate)
4164 			return rs->rs_rates[i];
4165 	}
4166 
4167 	return 0;
4168 }
4169 
4170 static void
4171 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4172 {
4173 	struct ieee80211_node *ni = &in->in_ni;
4174 	struct iwm_lq_cmd *lq = &in->in_lq;
4175 	int nrates = ni->ni_rates.rs_nrates;
4176 	int i, ridx, tab = 0;
4177 //	int txant = 0;
4178 
4179 	if (nrates > nitems(lq->rs_table)) {
4180 		device_printf(sc->sc_dev,
4181 		    "%s: node supports %d rates, driver handles "
4182 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4183 		return;
4184 	}
4185 	if (nrates == 0) {
4186 		device_printf(sc->sc_dev,
4187 		    "%s: node supports 0 rates, odd!\n", __func__);
4188 		return;
4189 	}
4190 
4191 	/*
4192 	 * XXX .. and most of iwm_node is not initialised explicitly;
4193 	 * it's all just 0x0 passed to the firmware.
4194 	 */
4195 
4196 	/* first figure out which rates we should support */
4197 	/* XXX TODO: this isn't 11n aware /at all/ */
4198 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4199 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4200 	    "%s: nrates=%d\n", __func__, nrates);
4201 
4202 	/*
4203 	 * Loop over nrates and populate in_ridx from the highest
4204 	 * rate to the lowest rate.  Remember, in_ridx[] has
4205 	 * IEEE80211_RATE_MAXSIZE entries!
4206 	 */
4207 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4208 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4209 
4210 		/* Map 802.11 rate to HW rate index. */
4211 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4212 			if (iwm_rates[ridx].rate == rate)
4213 				break;
4214 		if (ridx > IWM_RIDX_MAX) {
4215 			device_printf(sc->sc_dev,
4216 			    "%s: WARNING: device rate for %d not found!\n",
4217 			    __func__, rate);
4218 		} else {
4219 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4220 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4221 			    __func__,
4222 			    i,
4223 			    rate,
4224 			    ridx);
4225 			in->in_ridx[i] = ridx;
4226 		}
4227 	}
4228 
4229 	/* then construct a lq_cmd based on those */
4230 	memset(lq, 0, sizeof(*lq));
4231 	lq->sta_id = IWM_STATION_ID;
4232 
4233 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4234 	if (ni->ni_flags & IEEE80211_NODE_HT)
4235 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4236 
4237 	/*
4238 	 * are these used? (we don't do SISO or MIMO)
4239 	 * need to set them to non-zero, though, or we get an error.
4240 	 */
4241 	lq->single_stream_ant_msk = 1;
4242 	lq->dual_stream_ant_msk = 1;
4243 
4244 	/*
4245 	 * Build the actual rate selection table.
4246 	 * The lowest bits are the rates.  Additionally,
4247 	 * CCK needs bit 9 to be set.  The rest of the bits
4248 	 * we add to the table select the tx antenna
4249 	 * Note that we add the rates in the highest rate first
4250 	 * (opposite of ni_rates).
4251 	 */
4252 	/*
4253 	 * XXX TODO: this should be looping over the min of nrates
4254 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4255 	 */
4256 	for (i = 0; i < nrates; i++) {
4257 		int nextant;
4258 
4259 #if 0
4260 		if (txant == 0)
4261 			txant = iwm_mvm_get_valid_tx_ant(sc);
4262 		nextant = 1<<(ffs(txant)-1);
4263 		txant &= ~nextant;
4264 #else
4265 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4266 #endif
4267 		/*
4268 		 * Map the rate id into a rate index into
4269 		 * our hardware table containing the
4270 		 * configuration to use for this rate.
4271 		 */
4272 		ridx = in->in_ridx[i];
4273 		tab = iwm_rates[ridx].plcp;
4274 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4275 		if (IWM_RIDX_IS_CCK(ridx))
4276 			tab |= IWM_RATE_MCS_CCK_MSK;
4277 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4278 		    "station rate i=%d, rate=%d, hw=%x\n",
4279 		    i, iwm_rates[ridx].rate, tab);
4280 		lq->rs_table[i] = htole32(tab);
4281 	}
4282 	/* then fill the rest with the lowest possible rate */
4283 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4284 		KASSERT(tab != 0, ("invalid tab"));
4285 		lq->rs_table[i] = htole32(tab);
4286 	}
4287 }
4288 
4289 static int
4290 iwm_media_change(struct ifnet *ifp)
4291 {
4292 	struct ieee80211vap *vap = ifp->if_softc;
4293 	struct ieee80211com *ic = vap->iv_ic;
4294 	struct iwm_softc *sc = ic->ic_softc;
4295 	int error;
4296 
4297 	error = ieee80211_media_change(ifp);
4298 	if (error != ENETRESET)
4299 		return error;
4300 
4301 	IWM_LOCK(sc);
4302 	if (ic->ic_nrunning > 0) {
4303 		iwm_stop(sc);
4304 		iwm_init(sc);
4305 	}
4306 	IWM_UNLOCK(sc);
4307 	return error;
4308 }
4309 
4310 
4311 static int
4312 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4313 {
4314 	struct iwm_vap *ivp = IWM_VAP(vap);
4315 	struct ieee80211com *ic = vap->iv_ic;
4316 	struct iwm_softc *sc = ic->ic_softc;
4317 	struct iwm_node *in;
4318 	int error;
4319 
4320 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4321 	    "switching state %s -> %s\n",
4322 	    ieee80211_state_name[vap->iv_state],
4323 	    ieee80211_state_name[nstate]);
4324 	IEEE80211_UNLOCK(ic);
4325 	IWM_LOCK(sc);
4326 
4327 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4328 		iwm_led_blink_stop(sc);
4329 
4330 	/* disable beacon filtering if we're hopping out of RUN */
4331 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4332 		iwm_mvm_disable_beacon_filter(sc);
4333 
4334 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4335 			in->in_assoc = 0;
4336 
4337 		if (nstate == IEEE80211_S_INIT) {
4338 			IWM_UNLOCK(sc);
4339 			IEEE80211_LOCK(ic);
4340 			error = ivp->iv_newstate(vap, nstate, arg);
4341 			IEEE80211_UNLOCK(ic);
4342 			IWM_LOCK(sc);
4343 			iwm_release(sc, NULL);
4344 			IWM_UNLOCK(sc);
4345 			IEEE80211_LOCK(ic);
4346 			return error;
4347 		}
4348 
4349 		/*
4350 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4351 		 * above then the card will be completely reinitialized,
4352 		 * so the driver must do everything necessary to bring the card
4353 		 * from INIT to SCAN.
4354 		 *
4355 		 * Additionally, upon receiving deauth frame from AP,
4356 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4357 		 * state. This will also fail with this driver, so bring the FSM
4358 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4359 		 *
4360 		 * XXX TODO: fix this for FreeBSD!
4361 		 */
4362 		if (nstate == IEEE80211_S_SCAN ||
4363 		    nstate == IEEE80211_S_AUTH ||
4364 		    nstate == IEEE80211_S_ASSOC) {
4365 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4366 			    "Force transition to INIT; MGT=%d\n", arg);
4367 			IWM_UNLOCK(sc);
4368 			IEEE80211_LOCK(ic);
4369 			/* Always pass arg as -1 since we can't Tx right now. */
4370 			/*
4371 			 * XXX arg is just ignored anyway when transitioning
4372 			 *     to IEEE80211_S_INIT.
4373 			 */
4374 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4375 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4376 			    "Going INIT->SCAN\n");
4377 			nstate = IEEE80211_S_SCAN;
4378 			IEEE80211_UNLOCK(ic);
4379 			IWM_LOCK(sc);
4380 		}
4381 	}
4382 
4383 	switch (nstate) {
4384 	case IEEE80211_S_INIT:
4385 	case IEEE80211_S_SCAN:
4386 		if (vap->iv_state == IEEE80211_S_AUTH ||
4387 		    vap->iv_state == IEEE80211_S_ASSOC) {
4388 			int myerr;
4389 			IWM_UNLOCK(sc);
4390 			IEEE80211_LOCK(ic);
4391 			myerr = ivp->iv_newstate(vap, nstate, arg);
4392 			IEEE80211_UNLOCK(ic);
4393 			IWM_LOCK(sc);
4394 			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4395                         if (error) {
4396                                 device_printf(sc->sc_dev,
4397 				    "%s: Failed to remove station: %d\n",
4398 				    __func__, error);
4399 			}
4400 			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4401                         if (error) {
4402                                 device_printf(sc->sc_dev,
4403                                     "%s: Failed to change mac context: %d\n",
4404                                     __func__, error);
4405                         }
4406                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4407                         if (error) {
4408                                 device_printf(sc->sc_dev,
4409                                     "%s: Failed to remove channel ctx: %d\n",
4410                                     __func__, error);
4411                         }
4412 			ivp->phy_ctxt = NULL;
4413 			error = iwm_mvm_power_update_mac(sc);
4414 			if (error != 0) {
4415 				device_printf(sc->sc_dev,
4416 				    "%s: failed to update power management\n",
4417 				    __func__);
4418 			}
4419 			IWM_UNLOCK(sc);
4420 			IEEE80211_LOCK(ic);
4421 			return myerr;
4422 		}
4423 		break;
4424 
4425 	case IEEE80211_S_AUTH:
4426 		if ((error = iwm_auth(vap, sc)) != 0) {
4427 			device_printf(sc->sc_dev,
4428 			    "%s: could not move to auth state: %d\n",
4429 			    __func__, error);
4430 		}
4431 		break;
4432 
4433 	case IEEE80211_S_ASSOC:
4434 		/*
4435 		 * EBS may be disabled due to previous failures reported by FW.
4436 		 * Reset EBS status here assuming environment has been changed.
4437 		 */
4438 		sc->last_ebs_successful = TRUE;
4439 		break;
4440 
4441 	case IEEE80211_S_RUN:
4442 	{
4443 		struct iwm_host_cmd cmd = {
4444 			.id = IWM_LQ_CMD,
4445 			.len = { sizeof(in->in_lq), },
4446 			.flags = IWM_CMD_SYNC,
4447 		};
4448 
4449 		in = IWM_NODE(vap->iv_bss);
4450 		/* Update the association state, now we have it all */
4451 		/* (eg associd comes in at this point */
4452 		error = iwm_mvm_update_sta(sc, in);
4453 		if (error != 0) {
4454 			device_printf(sc->sc_dev,
4455 			    "%s: failed to update STA\n", __func__);
4456 			IWM_UNLOCK(sc);
4457 			IEEE80211_LOCK(ic);
4458 			return error;
4459 		}
4460 		in->in_assoc = 1;
4461 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4462 		if (error != 0) {
4463 			device_printf(sc->sc_dev,
4464 			    "%s: failed to update MAC: %d\n", __func__, error);
4465 		}
4466 
4467 		iwm_mvm_sf_update(sc, vap, FALSE);
4468 		iwm_mvm_enable_beacon_filter(sc, ivp);
4469 		iwm_mvm_power_update_mac(sc);
4470 		iwm_mvm_update_quotas(sc, ivp);
4471 		iwm_setrates(sc, in);
4472 
4473 		cmd.data[0] = &in->in_lq;
4474 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4475 			device_printf(sc->sc_dev,
4476 			    "%s: IWM_LQ_CMD failed\n", __func__);
4477 		}
4478 
4479 		iwm_mvm_led_enable(sc);
4480 		break;
4481 	}
4482 
4483 	default:
4484 		break;
4485 	}
4486 	IWM_UNLOCK(sc);
4487 	IEEE80211_LOCK(ic);
4488 
4489 	return (ivp->iv_newstate(vap, nstate, arg));
4490 }
4491 
4492 void
4493 iwm_endscan_cb(void *arg, int pending)
4494 {
4495 	struct iwm_softc *sc = arg;
4496 	struct ieee80211com *ic = &sc->sc_ic;
4497 
4498 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4499 	    "%s: scan ended\n",
4500 	    __func__);
4501 
4502 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4503 }
4504 
4505 static int
4506 iwm_send_bt_init_conf(struct iwm_softc *sc)
4507 {
4508 	struct iwm_bt_coex_cmd bt_cmd;
4509 
4510 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4511 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4512 
4513 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4514 	    &bt_cmd);
4515 }
4516 
4517 static boolean_t
4518 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4519 {
4520 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4521 	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4522 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4523 
4524 	if (iwm_lar_disable)
4525 		return FALSE;
4526 
4527 	/*
4528 	 * Enable LAR only if it is supported by the FW (TLV) &&
4529 	 * enabled in the NVM
4530 	 */
4531 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4532 		return nvm_lar && tlv_lar;
4533 	else
4534 		return tlv_lar;
4535 }
4536 
4537 static boolean_t
4538 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4539 {
4540 	return fw_has_api(&sc->ucode_capa,
4541 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4542 	       fw_has_capa(&sc->ucode_capa,
4543 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4544 }
4545 
4546 static int
4547 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4548 {
4549 	struct iwm_mcc_update_cmd mcc_cmd;
4550 	struct iwm_host_cmd hcmd = {
4551 		.id = IWM_MCC_UPDATE_CMD,
4552 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4553 		.data = { &mcc_cmd },
4554 	};
4555 	int ret;
4556 #ifdef IWM_DEBUG
4557 	struct iwm_rx_packet *pkt;
4558 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4559 	struct iwm_mcc_update_resp *mcc_resp;
4560 	int n_channels;
4561 	uint16_t mcc;
4562 #endif
4563 	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4564 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4565 
4566 	if (!iwm_mvm_is_lar_supported(sc)) {
4567 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4568 		    __func__);
4569 		return 0;
4570 	}
4571 
4572 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4573 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4574 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4575 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4576 	else
4577 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4578 
4579 	if (resp_v2)
4580 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4581 	else
4582 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4583 
4584 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4585 	    "send MCC update to FW with '%c%c' src = %d\n",
4586 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4587 
4588 	ret = iwm_send_cmd(sc, &hcmd);
4589 	if (ret)
4590 		return ret;
4591 
4592 #ifdef IWM_DEBUG
4593 	pkt = hcmd.resp_pkt;
4594 
4595 	/* Extract MCC response */
4596 	if (resp_v2) {
4597 		mcc_resp = (void *)pkt->data;
4598 		mcc = mcc_resp->mcc;
4599 		n_channels =  le32toh(mcc_resp->n_channels);
4600 	} else {
4601 		mcc_resp_v1 = (void *)pkt->data;
4602 		mcc = mcc_resp_v1->mcc;
4603 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4604 	}
4605 
4606 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4607 	if (mcc == 0)
4608 		mcc = 0x3030;  /* "00" - world */
4609 
4610 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4611 	    "regulatory domain '%c%c' (%d channels available)\n",
4612 	    mcc >> 8, mcc & 0xff, n_channels);
4613 #endif
4614 	iwm_free_resp(sc, &hcmd);
4615 
4616 	return 0;
4617 }
4618 
4619 static void
4620 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4621 {
4622 	struct iwm_host_cmd cmd = {
4623 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4624 		.len = { sizeof(uint32_t), },
4625 		.data = { &backoff, },
4626 	};
4627 
4628 	if (iwm_send_cmd(sc, &cmd) != 0) {
4629 		device_printf(sc->sc_dev,
4630 		    "failed to change thermal tx backoff\n");
4631 	}
4632 }
4633 
4634 static int
4635 iwm_init_hw(struct iwm_softc *sc)
4636 {
4637 	struct ieee80211com *ic = &sc->sc_ic;
4638 	int error, i, ac;
4639 
4640 	sc->sf_state = IWM_SF_UNINIT;
4641 
4642 	if ((error = iwm_start_hw(sc)) != 0) {
4643 		printf("iwm_start_hw: failed %d\n", error);
4644 		return error;
4645 	}
4646 
4647 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4648 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4649 		return error;
4650 	}
4651 
4652 	/*
4653 	 * should stop and start HW since that INIT
4654 	 * image just loaded
4655 	 */
4656 	iwm_stop_device(sc);
4657 	sc->sc_ps_disabled = FALSE;
4658 	if ((error = iwm_start_hw(sc)) != 0) {
4659 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4660 		return error;
4661 	}
4662 
4663 	/* omstart, this time with the regular firmware */
4664 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4665 	if (error) {
4666 		device_printf(sc->sc_dev, "could not load firmware\n");
4667 		goto error;
4668 	}
4669 
4670 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4671 	if (error)
4672 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4673 
4674 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4675 		device_printf(sc->sc_dev, "bt init conf failed\n");
4676 		goto error;
4677 	}
4678 
4679 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4680 	if (error != 0) {
4681 		device_printf(sc->sc_dev, "antenna config failed\n");
4682 		goto error;
4683 	}
4684 
4685 	/* Send phy db control command and then phy db calibration */
4686 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4687 		goto error;
4688 
4689 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4690 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4691 		goto error;
4692 	}
4693 
4694 	/* Add auxiliary station for scanning */
4695 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4696 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4697 		goto error;
4698 	}
4699 
4700 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4701 		/*
4702 		 * The channel used here isn't relevant as it's
4703 		 * going to be overwritten in the other flows.
4704 		 * For now use the first channel we have.
4705 		 */
4706 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4707 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4708 			goto error;
4709 	}
4710 
4711 	/* Initialize tx backoffs to the minimum. */
4712 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4713 		iwm_mvm_tt_tx_backoff(sc, 0);
4714 
4715 	error = iwm_mvm_power_update_device(sc);
4716 	if (error)
4717 		goto error;
4718 
4719 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4720 		goto error;
4721 
4722 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4723 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4724 			goto error;
4725 	}
4726 
4727 	/* Enable Tx queues. */
4728 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4729 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4730 		    iwm_mvm_ac_to_tx_fifo[ac]);
4731 		if (error)
4732 			goto error;
4733 	}
4734 
4735 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4736 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4737 		goto error;
4738 	}
4739 
4740 	return 0;
4741 
4742  error:
4743 	iwm_stop_device(sc);
4744 	return error;
4745 }
4746 
4747 /* Allow multicast from our BSSID. */
4748 static int
4749 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4750 {
4751 	struct ieee80211_node *ni = vap->iv_bss;
4752 	struct iwm_mcast_filter_cmd *cmd;
4753 	size_t size;
4754 	int error;
4755 
4756 	size = roundup(sizeof(*cmd), 4);
4757 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4758 	if (cmd == NULL)
4759 		return ENOMEM;
4760 	cmd->filter_own = 1;
4761 	cmd->port_id = 0;
4762 	cmd->count = 0;
4763 	cmd->pass_all = 1;
4764 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4765 
4766 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4767 	    IWM_CMD_SYNC, size, cmd);
4768 	free(cmd, M_DEVBUF);
4769 
4770 	return (error);
4771 }
4772 
4773 /*
4774  * ifnet interfaces
4775  */
4776 
4777 static void
4778 iwm_init(struct iwm_softc *sc)
4779 {
4780 	int error;
4781 
4782 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4783 		return;
4784 	}
4785 	sc->sc_generation++;
4786 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4787 
4788 	if ((error = iwm_init_hw(sc)) != 0) {
4789 		printf("iwm_init_hw failed %d\n", error);
4790 		iwm_stop(sc);
4791 		return;
4792 	}
4793 
4794 	/*
4795 	 * Ok, firmware loaded and we are jogging
4796 	 */
4797 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4798 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4799 }
4800 
4801 static int
4802 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4803 {
4804 	struct iwm_softc *sc;
4805 	int error;
4806 
4807 	sc = ic->ic_softc;
4808 
4809 	IWM_LOCK(sc);
4810 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4811 		IWM_UNLOCK(sc);
4812 		return (ENXIO);
4813 	}
4814 	error = mbufq_enqueue(&sc->sc_snd, m);
4815 	if (error) {
4816 		IWM_UNLOCK(sc);
4817 		return (error);
4818 	}
4819 	iwm_start(sc);
4820 	IWM_UNLOCK(sc);
4821 	return (0);
4822 }
4823 
4824 /*
4825  * Dequeue packets from sendq and call send.
4826  */
4827 static void
4828 iwm_start(struct iwm_softc *sc)
4829 {
4830 	struct ieee80211_node *ni;
4831 	struct mbuf *m;
4832 	int ac = 0;
4833 
4834 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4835 	while (sc->qfullmsk == 0 &&
4836 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4837 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4838 		if (iwm_tx(sc, m, ni, ac) != 0) {
4839 			if_inc_counter(ni->ni_vap->iv_ifp,
4840 			    IFCOUNTER_OERRORS, 1);
4841 			ieee80211_free_node(ni);
4842 			continue;
4843 		}
4844 		sc->sc_tx_timer = 15;
4845 	}
4846 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4847 }
4848 
4849 static void
4850 iwm_stop(struct iwm_softc *sc)
4851 {
4852 
4853 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4854 	sc->sc_flags |= IWM_FLAG_STOPPED;
4855 	sc->sc_generation++;
4856 	iwm_led_blink_stop(sc);
4857 	sc->sc_tx_timer = 0;
4858 	iwm_stop_device(sc);
4859 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4860 }
4861 
4862 static void
4863 iwm_watchdog(void *arg)
4864 {
4865 	struct iwm_softc *sc = arg;
4866 	struct ieee80211com *ic = &sc->sc_ic;
4867 
4868 	if (sc->sc_tx_timer > 0) {
4869 		if (--sc->sc_tx_timer == 0) {
4870 			device_printf(sc->sc_dev, "device timeout\n");
4871 #ifdef IWM_DEBUG
4872 			iwm_nic_error(sc);
4873 #endif
4874 			ieee80211_restart_all(ic);
4875 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4876 			return;
4877 		}
4878 	}
4879 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4880 }
4881 
4882 static void
4883 iwm_parent(struct ieee80211com *ic)
4884 {
4885 	struct iwm_softc *sc = ic->ic_softc;
4886 	int startall = 0;
4887 
4888 	IWM_LOCK(sc);
4889 	if (ic->ic_nrunning > 0) {
4890 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4891 			iwm_init(sc);
4892 			startall = 1;
4893 		}
4894 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4895 		iwm_stop(sc);
4896 	IWM_UNLOCK(sc);
4897 	if (startall)
4898 		ieee80211_start_all(ic);
4899 }
4900 
4901 /*
4902  * The interrupt side of things
4903  */
4904 
4905 /*
4906  * error dumping routines are from iwlwifi/mvm/utils.c
4907  */
4908 
4909 /*
4910  * Note: This structure is read from the device with IO accesses,
4911  * and the reading already does the endian conversion. As it is
4912  * read with uint32_t-sized accesses, any members with a different size
4913  * need to be ordered correctly though!
4914  */
4915 struct iwm_error_event_table {
4916 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4917 	uint32_t error_id;		/* type of error */
4918 	uint32_t trm_hw_status0;	/* TRM HW status */
4919 	uint32_t trm_hw_status1;	/* TRM HW status */
4920 	uint32_t blink2;		/* branch link */
4921 	uint32_t ilink1;		/* interrupt link */
4922 	uint32_t ilink2;		/* interrupt link */
4923 	uint32_t data1;		/* error-specific data */
4924 	uint32_t data2;		/* error-specific data */
4925 	uint32_t data3;		/* error-specific data */
4926 	uint32_t bcon_time;		/* beacon timer */
4927 	uint32_t tsf_low;		/* network timestamp function timer */
4928 	uint32_t tsf_hi;		/* network timestamp function timer */
4929 	uint32_t gp1;		/* GP1 timer register */
4930 	uint32_t gp2;		/* GP2 timer register */
4931 	uint32_t fw_rev_type;	/* firmware revision type */
4932 	uint32_t major;		/* uCode version major */
4933 	uint32_t minor;		/* uCode version minor */
4934 	uint32_t hw_ver;		/* HW Silicon version */
4935 	uint32_t brd_ver;		/* HW board version */
4936 	uint32_t log_pc;		/* log program counter */
4937 	uint32_t frame_ptr;		/* frame pointer */
4938 	uint32_t stack_ptr;		/* stack pointer */
4939 	uint32_t hcmd;		/* last host command header */
4940 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4941 				 * rxtx_flag */
4942 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4943 				 * host_flag */
4944 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4945 				 * enc_flag */
4946 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4947 				 * time_flag */
4948 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4949 				 * wico interrupt */
4950 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4951 	uint32_t wait_event;		/* wait event() caller address */
4952 	uint32_t l2p_control;	/* L2pControlField */
4953 	uint32_t l2p_duration;	/* L2pDurationField */
4954 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4955 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4956 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4957 				 * (LMPM_PMG_SEL) */
4958 	uint32_t u_timestamp;	/* indicate when the date and time of the
4959 				 * compilation */
4960 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4961 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4962 
4963 /*
4964  * UMAC error struct - relevant starting from family 8000 chip.
4965  * Note: This structure is read from the device with IO accesses,
4966  * and the reading already does the endian conversion. As it is
4967  * read with u32-sized accesses, any members with a different size
4968  * need to be ordered correctly though!
4969  */
4970 struct iwm_umac_error_event_table {
4971 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4972 	uint32_t error_id;	/* type of error */
4973 	uint32_t blink1;	/* branch link */
4974 	uint32_t blink2;	/* branch link */
4975 	uint32_t ilink1;	/* interrupt link */
4976 	uint32_t ilink2;	/* interrupt link */
4977 	uint32_t data1;		/* error-specific data */
4978 	uint32_t data2;		/* error-specific data */
4979 	uint32_t data3;		/* error-specific data */
4980 	uint32_t umac_major;
4981 	uint32_t umac_minor;
4982 	uint32_t frame_pointer;	/* core register 27*/
4983 	uint32_t stack_pointer;	/* core register 28 */
4984 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4985 	uint32_t nic_isr_pref;	/* ISR status register */
4986 } __packed;
4987 
4988 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4989 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4990 
4991 #ifdef IWM_DEBUG
4992 struct {
4993 	const char *name;
4994 	uint8_t num;
4995 } advanced_lookup[] = {
4996 	{ "NMI_INTERRUPT_WDG", 0x34 },
4997 	{ "SYSASSERT", 0x35 },
4998 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4999 	{ "BAD_COMMAND", 0x38 },
5000 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5001 	{ "FATAL_ERROR", 0x3D },
5002 	{ "NMI_TRM_HW_ERR", 0x46 },
5003 	{ "NMI_INTERRUPT_TRM", 0x4C },
5004 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5005 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5006 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5007 	{ "NMI_INTERRUPT_HOST", 0x66 },
5008 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5009 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5010 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5011 	{ "ADVANCED_SYSASSERT", 0 },
5012 };
5013 
5014 static const char *
5015 iwm_desc_lookup(uint32_t num)
5016 {
5017 	int i;
5018 
5019 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5020 		if (advanced_lookup[i].num == num)
5021 			return advanced_lookup[i].name;
5022 
5023 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5024 	return advanced_lookup[i].name;
5025 }
5026 
5027 static void
5028 iwm_nic_umac_error(struct iwm_softc *sc)
5029 {
5030 	struct iwm_umac_error_event_table table;
5031 	uint32_t base;
5032 
5033 	base = sc->umac_error_event_table;
5034 
5035 	if (base < 0x800000) {
5036 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5037 		    base);
5038 		return;
5039 	}
5040 
5041 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5042 		device_printf(sc->sc_dev, "reading errlog failed\n");
5043 		return;
5044 	}
5045 
5046 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5047 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5048 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5049 		    sc->sc_flags, table.valid);
5050 	}
5051 
5052 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5053 		iwm_desc_lookup(table.error_id));
5054 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5055 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5056 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5057 	    table.ilink1);
5058 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5059 	    table.ilink2);
5060 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5061 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5062 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5063 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5064 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5065 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5066 	    table.frame_pointer);
5067 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5068 	    table.stack_pointer);
5069 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5070 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5071 	    table.nic_isr_pref);
5072 }
5073 
5074 /*
5075  * Support for dumping the error log seemed like a good idea ...
5076  * but it's mostly hex junk and the only sensible thing is the
5077  * hw/ucode revision (which we know anyway).  Since it's here,
5078  * I'll just leave it in, just in case e.g. the Intel guys want to
5079  * help us decipher some "ADVANCED_SYSASSERT" later.
5080  */
5081 static void
5082 iwm_nic_error(struct iwm_softc *sc)
5083 {
5084 	struct iwm_error_event_table table;
5085 	uint32_t base;
5086 
5087 	device_printf(sc->sc_dev, "dumping device error log\n");
5088 	base = sc->error_event_table;
5089 	if (base < 0x800000) {
5090 		device_printf(sc->sc_dev,
5091 		    "Invalid error log pointer 0x%08x\n", base);
5092 		return;
5093 	}
5094 
5095 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5096 		device_printf(sc->sc_dev, "reading errlog failed\n");
5097 		return;
5098 	}
5099 
5100 	if (!table.valid) {
5101 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5102 		return;
5103 	}
5104 
5105 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5106 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5107 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5108 		    sc->sc_flags, table.valid);
5109 	}
5110 
5111 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5112 	    iwm_desc_lookup(table.error_id));
5113 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5114 	    table.trm_hw_status0);
5115 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5116 	    table.trm_hw_status1);
5117 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5118 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5119 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5120 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5121 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5122 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5123 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5124 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5125 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5126 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5127 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5128 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5129 	    table.fw_rev_type);
5130 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5131 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5132 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5133 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5134 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5135 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5136 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5137 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5138 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5139 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5140 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5141 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5142 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5143 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5144 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5145 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5146 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5147 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5148 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5149 
5150 	if (sc->umac_error_event_table)
5151 		iwm_nic_umac_error(sc);
5152 }
5153 #endif
5154 
5155 static void
5156 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5157 {
5158 	struct ieee80211com *ic = &sc->sc_ic;
5159 	struct iwm_cmd_response *cresp;
5160 	struct mbuf *m1;
5161 	uint32_t offset = 0;
5162 	uint32_t maxoff = IWM_RBUF_SIZE;
5163 	uint32_t nextoff;
5164 	boolean_t stolen = FALSE;
5165 
5166 #define HAVEROOM(a)	\
5167     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5168 
5169 	while (HAVEROOM(offset)) {
5170 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5171 		    offset);
5172 		int qid, idx, code, len;
5173 
5174 		qid = pkt->hdr.qid;
5175 		idx = pkt->hdr.idx;
5176 
5177 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5178 
5179 		/*
5180 		 * randomly get these from the firmware, no idea why.
5181 		 * they at least seem harmless, so just ignore them for now
5182 		 */
5183 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5184 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5185 			break;
5186 		}
5187 
5188 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5189 		    "rx packet qid=%d idx=%d type=%x\n",
5190 		    qid & ~0x80, pkt->hdr.idx, code);
5191 
5192 		len = iwm_rx_packet_len(pkt);
5193 		len += sizeof(uint32_t); /* account for status word */
5194 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5195 
5196 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5197 
5198 		switch (code) {
5199 		case IWM_REPLY_RX_PHY_CMD:
5200 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5201 			break;
5202 
5203 		case IWM_REPLY_RX_MPDU_CMD: {
5204 			/*
5205 			 * If this is the last frame in the RX buffer, we
5206 			 * can directly feed the mbuf to the sharks here.
5207 			 */
5208 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5209 			    struct iwm_rx_packet *, nextoff);
5210 			if (!HAVEROOM(nextoff) ||
5211 			    (nextpkt->hdr.code == 0 &&
5212 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5213 			     nextpkt->hdr.idx == 0) ||
5214 			    (nextpkt->len_n_flags ==
5215 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5216 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5217 					stolen = FALSE;
5218 					/* Make sure we abort the loop */
5219 					nextoff = maxoff;
5220 				}
5221 				break;
5222 			}
5223 
5224 			/*
5225 			 * Use m_copym instead of m_split, because that
5226 			 * makes it easier to keep a valid rx buffer in
5227 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5228 			 *
5229 			 * We need to start m_copym() at offset 0, to get the
5230 			 * M_PKTHDR flag preserved.
5231 			 */
5232 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5233 			if (m1) {
5234 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5235 					stolen = TRUE;
5236 				else
5237 					m_freem(m1);
5238 			}
5239 			break;
5240 		}
5241 
5242 		case IWM_TX_CMD:
5243 			iwm_mvm_rx_tx_cmd(sc, pkt);
5244 			break;
5245 
5246 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5247 			struct iwm_missed_beacons_notif *resp;
5248 			int missed;
5249 
5250 			/* XXX look at mac_id to determine interface ID */
5251 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5252 
5253 			resp = (void *)pkt->data;
5254 			missed = le32toh(resp->consec_missed_beacons);
5255 
5256 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5257 			    "%s: MISSED_BEACON: mac_id=%d, "
5258 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5259 			    "num_rx=%d\n",
5260 			    __func__,
5261 			    le32toh(resp->mac_id),
5262 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5263 			    le32toh(resp->consec_missed_beacons),
5264 			    le32toh(resp->num_expected_beacons),
5265 			    le32toh(resp->num_recvd_beacons));
5266 
5267 			/* Be paranoid */
5268 			if (vap == NULL)
5269 				break;
5270 
5271 			/* XXX no net80211 locking? */
5272 			if (vap->iv_state == IEEE80211_S_RUN &&
5273 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5274 				if (missed > vap->iv_bmissthreshold) {
5275 					/* XXX bad locking; turn into task */
5276 					IWM_UNLOCK(sc);
5277 					ieee80211_beacon_miss(ic);
5278 					IWM_LOCK(sc);
5279 				}
5280 			}
5281 
5282 			break;
5283 		}
5284 
5285 		case IWM_MFUART_LOAD_NOTIFICATION:
5286 			break;
5287 
5288 		case IWM_MVM_ALIVE:
5289 			break;
5290 
5291 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5292 			break;
5293 
5294 		case IWM_STATISTICS_NOTIFICATION: {
5295 			struct iwm_notif_statistics *stats;
5296 			stats = (void *)pkt->data;
5297 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5298 			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5299 			break;
5300 		}
5301 
5302 		case IWM_NVM_ACCESS_CMD:
5303 		case IWM_MCC_UPDATE_CMD:
5304 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5305 				memcpy(sc->sc_cmd_resp,
5306 				    pkt, sizeof(sc->sc_cmd_resp));
5307 			}
5308 			break;
5309 
5310 		case IWM_MCC_CHUB_UPDATE_CMD: {
5311 			struct iwm_mcc_chub_notif *notif;
5312 			notif = (void *)pkt->data;
5313 
5314 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5315 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5316 			sc->sc_fw_mcc[2] = '\0';
5317 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5318 			    "fw source %d sent CC '%s'\n",
5319 			    notif->source_id, sc->sc_fw_mcc);
5320 			break;
5321 		}
5322 
5323 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5324 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5325 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5326 			struct iwm_dts_measurement_notif_v1 *notif;
5327 
5328 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5329 				device_printf(sc->sc_dev,
5330 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5331 				break;
5332 			}
5333 			notif = (void *)pkt->data;
5334 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5335 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5336 			    notif->temp);
5337 			break;
5338 		}
5339 
5340 		case IWM_PHY_CONFIGURATION_CMD:
5341 		case IWM_TX_ANT_CONFIGURATION_CMD:
5342 		case IWM_ADD_STA:
5343 		case IWM_MAC_CONTEXT_CMD:
5344 		case IWM_REPLY_SF_CFG_CMD:
5345 		case IWM_POWER_TABLE_CMD:
5346 		case IWM_PHY_CONTEXT_CMD:
5347 		case IWM_BINDING_CONTEXT_CMD:
5348 		case IWM_TIME_EVENT_CMD:
5349 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5350 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5351 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5352 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5353 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5354 		case IWM_REPLY_BEACON_FILTERING_CMD:
5355 		case IWM_MAC_PM_POWER_TABLE:
5356 		case IWM_TIME_QUOTA_CMD:
5357 		case IWM_REMOVE_STA:
5358 		case IWM_TXPATH_FLUSH:
5359 		case IWM_LQ_CMD:
5360 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5361 				 IWM_FW_PAGING_BLOCK_CMD):
5362 		case IWM_BT_CONFIG:
5363 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5364 			cresp = (void *)pkt->data;
5365 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5366 				memcpy(sc->sc_cmd_resp,
5367 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5368 			}
5369 			break;
5370 
5371 		/* ignore */
5372 		case IWM_PHY_DB_CMD:
5373 			break;
5374 
5375 		case IWM_INIT_COMPLETE_NOTIF:
5376 			break;
5377 
5378 		case IWM_SCAN_OFFLOAD_COMPLETE:
5379 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5380 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5381 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5382 				ieee80211_runtask(ic, &sc->sc_es_task);
5383 			}
5384 			break;
5385 
5386 		case IWM_SCAN_ITERATION_COMPLETE: {
5387 			struct iwm_lmac_scan_complete_notif *notif;
5388 			notif = (void *)pkt->data;
5389 			break;
5390 		}
5391 
5392 		case IWM_SCAN_COMPLETE_UMAC:
5393 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5394 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5395 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5396 				ieee80211_runtask(ic, &sc->sc_es_task);
5397 			}
5398 			break;
5399 
5400 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5401 			struct iwm_umac_scan_iter_complete_notif *notif;
5402 			notif = (void *)pkt->data;
5403 
5404 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5405 			    "complete, status=0x%x, %d channels scanned\n",
5406 			    notif->status, notif->scanned_channels);
5407 			break;
5408 		}
5409 
5410 		case IWM_REPLY_ERROR: {
5411 			struct iwm_error_resp *resp;
5412 			resp = (void *)pkt->data;
5413 
5414 			device_printf(sc->sc_dev,
5415 			    "firmware error 0x%x, cmd 0x%x\n",
5416 			    le32toh(resp->error_type),
5417 			    resp->cmd_id);
5418 			break;
5419 		}
5420 
5421 		case IWM_TIME_EVENT_NOTIFICATION: {
5422 			struct iwm_time_event_notif *notif;
5423 			notif = (void *)pkt->data;
5424 
5425 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5426 			    "TE notif status = 0x%x action = 0x%x\n",
5427 			    notif->status, notif->action);
5428 			break;
5429 		}
5430 
5431 		/*
5432 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5433 		 * messages. Just ignore them for now.
5434 		 */
5435 		case IWM_DEBUG_LOG_MSG:
5436 			break;
5437 
5438 		case IWM_MCAST_FILTER_CMD:
5439 			break;
5440 
5441 		case IWM_SCD_QUEUE_CFG: {
5442 			struct iwm_scd_txq_cfg_rsp *rsp;
5443 			rsp = (void *)pkt->data;
5444 
5445 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5446 			    "queue cfg token=0x%x sta_id=%d "
5447 			    "tid=%d scd_queue=%d\n",
5448 			    rsp->token, rsp->sta_id, rsp->tid,
5449 			    rsp->scd_queue);
5450 			break;
5451 		}
5452 
5453 		default:
5454 			device_printf(sc->sc_dev,
5455 			    "frame %d/%d %x UNHANDLED (this should "
5456 			    "not happen)\n", qid & ~0x80, idx,
5457 			    pkt->len_n_flags);
5458 			break;
5459 		}
5460 
5461 		/*
5462 		 * Why test bit 0x80?  The Linux driver:
5463 		 *
5464 		 * There is one exception:  uCode sets bit 15 when it
5465 		 * originates the response/notification, i.e. when the
5466 		 * response/notification is not a direct response to a
5467 		 * command sent by the driver.  For example, uCode issues
5468 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5469 		 * it is not a direct response to any driver command.
5470 		 *
5471 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5472 		 * uses a slightly different format for pkt->hdr, and "qid"
5473 		 * is actually the upper byte of a two-byte field.
5474 		 */
5475 		if (!(qid & (1 << 7)))
5476 			iwm_cmd_done(sc, pkt);
5477 
5478 		offset = nextoff;
5479 	}
5480 	if (stolen)
5481 		m_freem(m);
5482 #undef HAVEROOM
5483 }
5484 
5485 /*
5486  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5487  * Basic structure from if_iwn
5488  */
5489 static void
5490 iwm_notif_intr(struct iwm_softc *sc)
5491 {
5492 	uint16_t hw;
5493 
5494 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5495 	    BUS_DMASYNC_POSTREAD);
5496 
5497 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5498 
5499 	/*
5500 	 * Process responses
5501 	 */
5502 	while (sc->rxq.cur != hw) {
5503 		struct iwm_rx_ring *ring = &sc->rxq;
5504 		struct iwm_rx_data *data = &ring->data[ring->cur];
5505 
5506 		bus_dmamap_sync(ring->data_dmat, data->map,
5507 		    BUS_DMASYNC_POSTREAD);
5508 
5509 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5510 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5511 		iwm_handle_rxb(sc, data->m);
5512 
5513 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5514 	}
5515 
5516 	/*
5517 	 * Tell the firmware that it can reuse the ring entries that
5518 	 * we have just processed.
5519 	 * Seems like the hardware gets upset unless we align
5520 	 * the write by 8??
5521 	 */
5522 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5523 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5524 }
5525 
5526 static void
5527 iwm_intr(void *arg)
5528 {
5529 	struct iwm_softc *sc = arg;
5530 	int handled = 0;
5531 	int r1, r2, rv = 0;
5532 	int isperiodic = 0;
5533 
5534 	IWM_LOCK(sc);
5535 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5536 
5537 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5538 		uint32_t *ict = sc->ict_dma.vaddr;
5539 		int tmp;
5540 
5541 		tmp = htole32(ict[sc->ict_cur]);
5542 		if (!tmp)
5543 			goto out_ena;
5544 
5545 		/*
5546 		 * ok, there was something.  keep plowing until we have all.
5547 		 */
5548 		r1 = r2 = 0;
5549 		while (tmp) {
5550 			r1 |= tmp;
5551 			ict[sc->ict_cur] = 0;
5552 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5553 			tmp = htole32(ict[sc->ict_cur]);
5554 		}
5555 
5556 		/* this is where the fun begins.  don't ask */
5557 		if (r1 == 0xffffffff)
5558 			r1 = 0;
5559 
5560 		/* i am not expected to understand this */
5561 		if (r1 & 0xc0000)
5562 			r1 |= 0x8000;
5563 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5564 	} else {
5565 		r1 = IWM_READ(sc, IWM_CSR_INT);
5566 		/* "hardware gone" (where, fishing?) */
5567 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5568 			goto out;
5569 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5570 	}
5571 	if (r1 == 0 && r2 == 0) {
5572 		goto out_ena;
5573 	}
5574 
5575 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5576 
5577 	/* Safely ignore these bits for debug checks below */
5578 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5579 
5580 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5581 		int i;
5582 		struct ieee80211com *ic = &sc->sc_ic;
5583 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5584 
5585 #ifdef IWM_DEBUG
5586 		iwm_nic_error(sc);
5587 #endif
5588 		/* Dump driver status (TX and RX rings) while we're here. */
5589 		device_printf(sc->sc_dev, "driver status:\n");
5590 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5591 			struct iwm_tx_ring *ring = &sc->txq[i];
5592 			device_printf(sc->sc_dev,
5593 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5594 			    "queued=%-3d\n",
5595 			    i, ring->qid, ring->cur, ring->queued);
5596 		}
5597 		device_printf(sc->sc_dev,
5598 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5599 		device_printf(sc->sc_dev,
5600 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5601 
5602 		/* Don't stop the device; just do a VAP restart */
5603 		IWM_UNLOCK(sc);
5604 
5605 		if (vap == NULL) {
5606 			printf("%s: null vap\n", __func__);
5607 			return;
5608 		}
5609 
5610 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5611 		    "restarting\n", __func__, vap->iv_state);
5612 
5613 		ieee80211_restart_all(ic);
5614 		return;
5615 	}
5616 
5617 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5618 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5619 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5620 		iwm_stop(sc);
5621 		rv = 1;
5622 		goto out;
5623 	}
5624 
5625 	/* firmware chunk loaded */
5626 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5627 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5628 		handled |= IWM_CSR_INT_BIT_FH_TX;
5629 		sc->sc_fw_chunk_done = 1;
5630 		wakeup(&sc->sc_fw);
5631 	}
5632 
5633 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5634 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5635 		if (iwm_check_rfkill(sc)) {
5636 			device_printf(sc->sc_dev,
5637 			    "%s: rfkill switch, disabling interface\n",
5638 			    __func__);
5639 			iwm_stop(sc);
5640 		}
5641 	}
5642 
5643 	/*
5644 	 * The Linux driver uses periodic interrupts to avoid races.
5645 	 * We cargo-cult like it's going out of fashion.
5646 	 */
5647 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5648 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5649 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5650 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5651 			IWM_WRITE_1(sc,
5652 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5653 		isperiodic = 1;
5654 	}
5655 
5656 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5657 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5658 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5659 
5660 		iwm_notif_intr(sc);
5661 
5662 		/* enable periodic interrupt, see above */
5663 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5664 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5665 			    IWM_CSR_INT_PERIODIC_ENA);
5666 	}
5667 
5668 	if (__predict_false(r1 & ~handled))
5669 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5670 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5671 	rv = 1;
5672 
5673  out_ena:
5674 	iwm_restore_interrupts(sc);
5675  out:
5676 	IWM_UNLOCK(sc);
5677 	return;
5678 }
5679 
5680 /*
5681  * Autoconf glue-sniffing
5682  */
5683 #define	PCI_VENDOR_INTEL		0x8086
5684 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5685 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5686 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5687 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5688 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5689 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5690 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5691 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5692 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5693 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5694 
5695 static const struct iwm_devices {
5696 	uint16_t		device;
5697 	const struct iwm_cfg	*cfg;
5698 } iwm_devices[] = {
5699 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5700 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5701 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5702 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5703 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5704 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5705 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5706 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5707 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5708 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5709 };
5710 
5711 static int
5712 iwm_probe(device_t dev)
5713 {
5714 	int i;
5715 
5716 	for (i = 0; i < nitems(iwm_devices); i++) {
5717 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5718 		    pci_get_device(dev) == iwm_devices[i].device) {
5719 			device_set_desc(dev, iwm_devices[i].cfg->name);
5720 			return (BUS_PROBE_DEFAULT);
5721 		}
5722 	}
5723 
5724 	return (ENXIO);
5725 }
5726 
5727 static int
5728 iwm_dev_check(device_t dev)
5729 {
5730 	struct iwm_softc *sc;
5731 	uint16_t devid;
5732 	int i;
5733 
5734 	sc = device_get_softc(dev);
5735 
5736 	devid = pci_get_device(dev);
5737 	for (i = 0; i < nitems(iwm_devices); i++) {
5738 		if (iwm_devices[i].device == devid) {
5739 			sc->cfg = iwm_devices[i].cfg;
5740 			return (0);
5741 		}
5742 	}
5743 	device_printf(dev, "unknown adapter type\n");
5744 	return ENXIO;
5745 }
5746 
5747 /* PCI registers */
5748 #define PCI_CFG_RETRY_TIMEOUT	0x041
5749 
5750 static int
5751 iwm_pci_attach(device_t dev)
5752 {
5753 	struct iwm_softc *sc;
5754 	int count, error, rid;
5755 	uint16_t reg;
5756 
5757 	sc = device_get_softc(dev);
5758 
5759 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5760 	 * PCI Tx retries from interfering with C3 CPU state */
5761 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5762 
5763 	/* Enable bus-mastering and hardware bug workaround. */
5764 	pci_enable_busmaster(dev);
5765 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5766 	/* if !MSI */
5767 	if (reg & PCIM_STATUS_INTxSTATE) {
5768 		reg &= ~PCIM_STATUS_INTxSTATE;
5769 	}
5770 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5771 
5772 	rid = PCIR_BAR(0);
5773 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5774 	    RF_ACTIVE);
5775 	if (sc->sc_mem == NULL) {
5776 		device_printf(sc->sc_dev, "can't map mem space\n");
5777 		return (ENXIO);
5778 	}
5779 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5780 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5781 
5782 	/* Install interrupt handler. */
5783 	count = 1;
5784 	rid = 0;
5785 	if (pci_alloc_msi(dev, &count) == 0)
5786 		rid = 1;
5787 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5788 	    (rid != 0 ? 0 : RF_SHAREABLE));
5789 	if (sc->sc_irq == NULL) {
5790 		device_printf(dev, "can't map interrupt\n");
5791 			return (ENXIO);
5792 	}
5793 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5794 	    NULL, iwm_intr, sc, &sc->sc_ih);
5795 	if (sc->sc_ih == NULL) {
5796 		device_printf(dev, "can't establish interrupt");
5797 			return (ENXIO);
5798 	}
5799 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5800 
5801 	return (0);
5802 }
5803 
5804 static void
5805 iwm_pci_detach(device_t dev)
5806 {
5807 	struct iwm_softc *sc = device_get_softc(dev);
5808 
5809 	if (sc->sc_irq != NULL) {
5810 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5811 		bus_release_resource(dev, SYS_RES_IRQ,
5812 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5813 		pci_release_msi(dev);
5814         }
5815 	if (sc->sc_mem != NULL)
5816 		bus_release_resource(dev, SYS_RES_MEMORY,
5817 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5818 }
5819 
5820 
5821 
5822 static int
5823 iwm_attach(device_t dev)
5824 {
5825 	struct iwm_softc *sc = device_get_softc(dev);
5826 	struct ieee80211com *ic = &sc->sc_ic;
5827 	int error;
5828 	int txq_i, i;
5829 
5830 	sc->sc_dev = dev;
5831 	sc->sc_attached = 1;
5832 	IWM_LOCK_INIT(sc);
5833 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5834 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5835 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5836 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5837 
5838 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5839 	if (sc->sc_notif_wait == NULL) {
5840 		device_printf(dev, "failed to init notification wait struct\n");
5841 		goto fail;
5842 	}
5843 
5844 	sc->sf_state = IWM_SF_UNINIT;
5845 
5846 	/* Init phy db */
5847 	sc->sc_phy_db = iwm_phy_db_init(sc);
5848 	if (!sc->sc_phy_db) {
5849 		device_printf(dev, "Cannot init phy_db\n");
5850 		goto fail;
5851 	}
5852 
5853 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5854 	sc->last_ebs_successful = TRUE;
5855 
5856 	/* PCI attach */
5857 	error = iwm_pci_attach(dev);
5858 	if (error != 0)
5859 		goto fail;
5860 
5861 	sc->sc_wantresp = -1;
5862 
5863 	/* Check device type */
5864 	error = iwm_dev_check(dev);
5865 	if (error != 0)
5866 		goto fail;
5867 
5868 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5869 	/*
5870 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5871 	 * changed, and now the revision step also includes bit 0-1 (no more
5872 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5873 	 * in the old format.
5874 	 */
5875 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5876 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5877 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5878 
5879 	if (iwm_prepare_card_hw(sc) != 0) {
5880 		device_printf(dev, "could not initialize hardware\n");
5881 		goto fail;
5882 	}
5883 
5884 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5885 		int ret;
5886 		uint32_t hw_step;
5887 
5888 		/*
5889 		 * In order to recognize C step the driver should read the
5890 		 * chip version id located at the AUX bus MISC address.
5891 		 */
5892 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5893 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5894 		DELAY(2);
5895 
5896 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5897 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5898 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5899 				   25000);
5900 		if (!ret) {
5901 			device_printf(sc->sc_dev,
5902 			    "Failed to wake up the nic\n");
5903 			goto fail;
5904 		}
5905 
5906 		if (iwm_nic_lock(sc)) {
5907 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5908 			hw_step |= IWM_ENABLE_WFPM;
5909 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5910 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5911 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5912 			if (hw_step == 0x3)
5913 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5914 						(IWM_SILICON_C_STEP << 2);
5915 			iwm_nic_unlock(sc);
5916 		} else {
5917 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5918 			goto fail;
5919 		}
5920 	}
5921 
5922 	/* special-case 7265D, it has the same PCI IDs. */
5923 	if (sc->cfg == &iwm7265_cfg &&
5924 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5925 		sc->cfg = &iwm7265d_cfg;
5926 	}
5927 
5928 	/* Allocate DMA memory for firmware transfers. */
5929 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5930 		device_printf(dev, "could not allocate memory for firmware\n");
5931 		goto fail;
5932 	}
5933 
5934 	/* Allocate "Keep Warm" page. */
5935 	if ((error = iwm_alloc_kw(sc)) != 0) {
5936 		device_printf(dev, "could not allocate keep warm page\n");
5937 		goto fail;
5938 	}
5939 
5940 	/* We use ICT interrupts */
5941 	if ((error = iwm_alloc_ict(sc)) != 0) {
5942 		device_printf(dev, "could not allocate ICT table\n");
5943 		goto fail;
5944 	}
5945 
5946 	/* Allocate TX scheduler "rings". */
5947 	if ((error = iwm_alloc_sched(sc)) != 0) {
5948 		device_printf(dev, "could not allocate TX scheduler rings\n");
5949 		goto fail;
5950 	}
5951 
5952 	/* Allocate TX rings */
5953 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5954 		if ((error = iwm_alloc_tx_ring(sc,
5955 		    &sc->txq[txq_i], txq_i)) != 0) {
5956 			device_printf(dev,
5957 			    "could not allocate TX ring %d\n",
5958 			    txq_i);
5959 			goto fail;
5960 		}
5961 	}
5962 
5963 	/* Allocate RX ring. */
5964 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5965 		device_printf(dev, "could not allocate RX ring\n");
5966 		goto fail;
5967 	}
5968 
5969 	/* Clear pending interrupts. */
5970 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5971 
5972 	ic->ic_softc = sc;
5973 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5974 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5975 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5976 
5977 	/* Set device capabilities. */
5978 	ic->ic_caps =
5979 	    IEEE80211_C_STA |
5980 	    IEEE80211_C_WPA |		/* WPA/RSN */
5981 	    IEEE80211_C_WME |
5982 	    IEEE80211_C_PMGT |
5983 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5984 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5985 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5986 	    ;
5987 	/* Advertise full-offload scanning */
5988 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5989 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5990 		sc->sc_phyctxt[i].id = i;
5991 		sc->sc_phyctxt[i].color = 0;
5992 		sc->sc_phyctxt[i].ref = 0;
5993 		sc->sc_phyctxt[i].channel = NULL;
5994 	}
5995 
5996 	/* Default noise floor */
5997 	sc->sc_noise = -96;
5998 
5999 	/* Max RSSI */
6000 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6001 
6002 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6003 	sc->sc_preinit_hook.ich_arg = sc;
6004 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6005 		device_printf(dev, "config_intrhook_establish failed\n");
6006 		goto fail;
6007 	}
6008 
6009 #ifdef IWM_DEBUG
6010 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6011 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6012 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6013 #endif
6014 
6015 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6016 	    "<-%s\n", __func__);
6017 
6018 	return 0;
6019 
6020 	/* Free allocated memory if something failed during attachment. */
6021 fail:
6022 	iwm_detach_local(sc, 0);
6023 
6024 	return ENXIO;
6025 }
6026 
6027 static int
6028 iwm_is_valid_ether_addr(uint8_t *addr)
6029 {
6030 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6031 
6032 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6033 		return (FALSE);
6034 
6035 	return (TRUE);
6036 }
6037 
6038 static int
6039 iwm_wme_update(struct ieee80211com *ic)
6040 {
6041 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6042 	struct iwm_softc *sc = ic->ic_softc;
6043 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6044 	struct iwm_vap *ivp = IWM_VAP(vap);
6045 	struct iwm_node *in;
6046 	struct wmeParams tmp[WME_NUM_AC];
6047 	int aci, error;
6048 
6049 	if (vap == NULL)
6050 		return (0);
6051 
6052 	IEEE80211_LOCK(ic);
6053 	for (aci = 0; aci < WME_NUM_AC; aci++)
6054 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6055 	IEEE80211_UNLOCK(ic);
6056 
6057 	IWM_LOCK(sc);
6058 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6059 		const struct wmeParams *ac = &tmp[aci];
6060 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6061 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6062 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6063 		ivp->queue_params[aci].edca_txop =
6064 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6065 	}
6066 	ivp->have_wme = TRUE;
6067 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6068 		in = IWM_NODE(vap->iv_bss);
6069 		if (in->in_assoc) {
6070 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6071 				device_printf(sc->sc_dev,
6072 				    "%s: failed to update MAC\n", __func__);
6073 			}
6074 		}
6075 	}
6076 	IWM_UNLOCK(sc);
6077 
6078 	return (0);
6079 #undef IWM_EXP2
6080 }
6081 
6082 static void
6083 iwm_preinit(void *arg)
6084 {
6085 	struct iwm_softc *sc = arg;
6086 	device_t dev = sc->sc_dev;
6087 	struct ieee80211com *ic = &sc->sc_ic;
6088 	int error;
6089 
6090 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6091 	    "->%s\n", __func__);
6092 
6093 	IWM_LOCK(sc);
6094 	if ((error = iwm_start_hw(sc)) != 0) {
6095 		device_printf(dev, "could not initialize hardware\n");
6096 		IWM_UNLOCK(sc);
6097 		goto fail;
6098 	}
6099 
6100 	error = iwm_run_init_mvm_ucode(sc, 1);
6101 	iwm_stop_device(sc);
6102 	if (error) {
6103 		IWM_UNLOCK(sc);
6104 		goto fail;
6105 	}
6106 	device_printf(dev,
6107 	    "hw rev 0x%x, fw ver %s, address %s\n",
6108 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6109 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6110 
6111 	/* not all hardware can do 5GHz band */
6112 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6113 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6114 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6115 	IWM_UNLOCK(sc);
6116 
6117 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6118 	    ic->ic_channels);
6119 
6120 	/*
6121 	 * At this point we've committed - if we fail to do setup,
6122 	 * we now also have to tear down the net80211 state.
6123 	 */
6124 	ieee80211_ifattach(ic);
6125 	ic->ic_vap_create = iwm_vap_create;
6126 	ic->ic_vap_delete = iwm_vap_delete;
6127 	ic->ic_raw_xmit = iwm_raw_xmit;
6128 	ic->ic_node_alloc = iwm_node_alloc;
6129 	ic->ic_scan_start = iwm_scan_start;
6130 	ic->ic_scan_end = iwm_scan_end;
6131 	ic->ic_update_mcast = iwm_update_mcast;
6132 	ic->ic_getradiocaps = iwm_init_channel_map;
6133 	ic->ic_set_channel = iwm_set_channel;
6134 	ic->ic_scan_curchan = iwm_scan_curchan;
6135 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6136 	ic->ic_wme.wme_update = iwm_wme_update;
6137 	ic->ic_parent = iwm_parent;
6138 	ic->ic_transmit = iwm_transmit;
6139 	iwm_radiotap_attach(sc);
6140 	if (bootverbose)
6141 		ieee80211_announce(ic);
6142 
6143 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6144 	    "<-%s\n", __func__);
6145 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6146 
6147 	return;
6148 fail:
6149 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6150 	iwm_detach_local(sc, 0);
6151 }
6152 
6153 /*
6154  * Attach the interface to 802.11 radiotap.
6155  */
6156 static void
6157 iwm_radiotap_attach(struct iwm_softc *sc)
6158 {
6159         struct ieee80211com *ic = &sc->sc_ic;
6160 
6161 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6162 	    "->%s begin\n", __func__);
6163         ieee80211_radiotap_attach(ic,
6164             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6165                 IWM_TX_RADIOTAP_PRESENT,
6166             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6167                 IWM_RX_RADIOTAP_PRESENT);
6168 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6169 	    "->%s end\n", __func__);
6170 }
6171 
6172 static struct ieee80211vap *
6173 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6174     enum ieee80211_opmode opmode, int flags,
6175     const uint8_t bssid[IEEE80211_ADDR_LEN],
6176     const uint8_t mac[IEEE80211_ADDR_LEN])
6177 {
6178 	struct iwm_vap *ivp;
6179 	struct ieee80211vap *vap;
6180 
6181 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6182 		return NULL;
6183 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6184 	vap = &ivp->iv_vap;
6185 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6186 	vap->iv_bmissthreshold = 10;            /* override default */
6187 	/* Override with driver methods. */
6188 	ivp->iv_newstate = vap->iv_newstate;
6189 	vap->iv_newstate = iwm_newstate;
6190 
6191 	ivp->id = IWM_DEFAULT_MACID;
6192 	ivp->color = IWM_DEFAULT_COLOR;
6193 
6194 	ivp->have_wme = FALSE;
6195 	ivp->ps_disabled = FALSE;
6196 
6197 	ieee80211_ratectl_init(vap);
6198 	/* Complete setup. */
6199 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6200 	    mac);
6201 	ic->ic_opmode = opmode;
6202 
6203 	return vap;
6204 }
6205 
6206 static void
6207 iwm_vap_delete(struct ieee80211vap *vap)
6208 {
6209 	struct iwm_vap *ivp = IWM_VAP(vap);
6210 
6211 	ieee80211_ratectl_deinit(vap);
6212 	ieee80211_vap_detach(vap);
6213 	free(ivp, M_80211_VAP);
6214 }
6215 
6216 static void
6217 iwm_scan_start(struct ieee80211com *ic)
6218 {
6219 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6220 	struct iwm_softc *sc = ic->ic_softc;
6221 	int error;
6222 
6223 	IWM_LOCK(sc);
6224 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6225 		/* This should not be possible */
6226 		device_printf(sc->sc_dev,
6227 		    "%s: Previous scan not completed yet\n", __func__);
6228 	}
6229 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6230 		error = iwm_mvm_umac_scan(sc);
6231 	else
6232 		error = iwm_mvm_lmac_scan(sc);
6233 	if (error != 0) {
6234 		device_printf(sc->sc_dev, "could not initiate scan\n");
6235 		IWM_UNLOCK(sc);
6236 		ieee80211_cancel_scan(vap);
6237 	} else {
6238 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6239 		iwm_led_blink_start(sc);
6240 		IWM_UNLOCK(sc);
6241 	}
6242 }
6243 
6244 static void
6245 iwm_scan_end(struct ieee80211com *ic)
6246 {
6247 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6248 	struct iwm_softc *sc = ic->ic_softc;
6249 
6250 	IWM_LOCK(sc);
6251 	iwm_led_blink_stop(sc);
6252 	if (vap->iv_state == IEEE80211_S_RUN)
6253 		iwm_mvm_led_enable(sc);
6254 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6255 		/*
6256 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6257 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6258 		 * taskqueue.
6259 		 */
6260 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6261 		iwm_mvm_scan_stop_wait(sc);
6262 	}
6263 	IWM_UNLOCK(sc);
6264 
6265 	/*
6266 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6267 	 * This is to make sure that it won't call ieee80211_scan_done
6268 	 * when we have already started the next scan.
6269 	 */
6270 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6271 }
6272 
6273 static void
6274 iwm_update_mcast(struct ieee80211com *ic)
6275 {
6276 }
6277 
6278 static void
6279 iwm_set_channel(struct ieee80211com *ic)
6280 {
6281 }
6282 
6283 static void
6284 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6285 {
6286 }
6287 
6288 static void
6289 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6290 {
6291 	return;
6292 }
6293 
6294 void
6295 iwm_init_task(void *arg1)
6296 {
6297 	struct iwm_softc *sc = arg1;
6298 
6299 	IWM_LOCK(sc);
6300 	while (sc->sc_flags & IWM_FLAG_BUSY)
6301 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6302 	sc->sc_flags |= IWM_FLAG_BUSY;
6303 	iwm_stop(sc);
6304 	if (sc->sc_ic.ic_nrunning > 0)
6305 		iwm_init(sc);
6306 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6307 	wakeup(&sc->sc_flags);
6308 	IWM_UNLOCK(sc);
6309 }
6310 
6311 static int
6312 iwm_resume(device_t dev)
6313 {
6314 	struct iwm_softc *sc = device_get_softc(dev);
6315 	int do_reinit = 0;
6316 
6317 	/*
6318 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6319 	 * PCI Tx retries from interfering with C3 CPU state.
6320 	 */
6321 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6322 	iwm_init_task(device_get_softc(dev));
6323 
6324 	IWM_LOCK(sc);
6325 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6326 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6327 		do_reinit = 1;
6328 	}
6329 	IWM_UNLOCK(sc);
6330 
6331 	if (do_reinit)
6332 		ieee80211_resume_all(&sc->sc_ic);
6333 
6334 	return 0;
6335 }
6336 
6337 static int
6338 iwm_suspend(device_t dev)
6339 {
6340 	int do_stop = 0;
6341 	struct iwm_softc *sc = device_get_softc(dev);
6342 
6343 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6344 
6345 	ieee80211_suspend_all(&sc->sc_ic);
6346 
6347 	if (do_stop) {
6348 		IWM_LOCK(sc);
6349 		iwm_stop(sc);
6350 		sc->sc_flags |= IWM_FLAG_SCANNING;
6351 		IWM_UNLOCK(sc);
6352 	}
6353 
6354 	return (0);
6355 }
6356 
6357 static int
6358 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6359 {
6360 	struct iwm_fw_info *fw = &sc->sc_fw;
6361 	device_t dev = sc->sc_dev;
6362 	int i;
6363 
6364 	if (!sc->sc_attached)
6365 		return 0;
6366 	sc->sc_attached = 0;
6367 
6368 	if (do_net80211)
6369 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6370 
6371 	callout_drain(&sc->sc_led_blink_to);
6372 	callout_drain(&sc->sc_watchdog_to);
6373 	iwm_stop_device(sc);
6374 	if (do_net80211) {
6375 		ieee80211_ifdetach(&sc->sc_ic);
6376 	}
6377 
6378 	iwm_phy_db_free(sc->sc_phy_db);
6379 	sc->sc_phy_db = NULL;
6380 
6381 	iwm_free_nvm_data(sc->nvm_data);
6382 
6383 	/* Free descriptor rings */
6384 	iwm_free_rx_ring(sc, &sc->rxq);
6385 	for (i = 0; i < nitems(sc->txq); i++)
6386 		iwm_free_tx_ring(sc, &sc->txq[i]);
6387 
6388 	/* Free firmware */
6389 	if (fw->fw_fp != NULL)
6390 		iwm_fw_info_free(fw);
6391 
6392 	/* Free scheduler */
6393 	iwm_dma_contig_free(&sc->sched_dma);
6394 	iwm_dma_contig_free(&sc->ict_dma);
6395 	iwm_dma_contig_free(&sc->kw_dma);
6396 	iwm_dma_contig_free(&sc->fw_dma);
6397 
6398 	iwm_free_fw_paging(sc);
6399 
6400 	/* Finished with the hardware - detach things */
6401 	iwm_pci_detach(dev);
6402 
6403 	if (sc->sc_notif_wait != NULL) {
6404 		iwm_notification_wait_free(sc->sc_notif_wait);
6405 		sc->sc_notif_wait = NULL;
6406 	}
6407 
6408 	mbufq_drain(&sc->sc_snd);
6409 	IWM_LOCK_DESTROY(sc);
6410 
6411 	return (0);
6412 }
6413 
6414 static int
6415 iwm_detach(device_t dev)
6416 {
6417 	struct iwm_softc *sc = device_get_softc(dev);
6418 
6419 	return (iwm_detach_local(sc, 1));
6420 }
6421 
6422 static device_method_t iwm_pci_methods[] = {
6423         /* Device interface */
6424         DEVMETHOD(device_probe,         iwm_probe),
6425         DEVMETHOD(device_attach,        iwm_attach),
6426         DEVMETHOD(device_detach,        iwm_detach),
6427         DEVMETHOD(device_suspend,       iwm_suspend),
6428         DEVMETHOD(device_resume,        iwm_resume),
6429 
6430         DEVMETHOD_END
6431 };
6432 
6433 static driver_t iwm_pci_driver = {
6434         "iwm",
6435         iwm_pci_methods,
6436         sizeof (struct iwm_softc)
6437 };
6438 
6439 static devclass_t iwm_devclass;
6440 
6441 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6442 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6443 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6444 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6445