xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_media_change(struct ifnet *);
358 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
359 static void	iwm_endscan_cb(void *, int);
360 static int	iwm_send_bt_init_conf(struct iwm_softc *);
361 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
362 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
363 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
364 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
365 static int	iwm_init_hw(struct iwm_softc *);
366 static void	iwm_init(struct iwm_softc *);
367 static void	iwm_start(struct iwm_softc *);
368 static void	iwm_stop(struct iwm_softc *);
369 static void	iwm_watchdog(void *);
370 static void	iwm_parent(struct ieee80211com *);
371 #ifdef IWM_DEBUG
372 static const char *
373 		iwm_desc_lookup(uint32_t);
374 static void	iwm_nic_error(struct iwm_softc *);
375 static void	iwm_nic_umac_error(struct iwm_softc *);
376 #endif
377 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
378 static void	iwm_notif_intr(struct iwm_softc *);
379 static void	iwm_intr(void *);
380 static int	iwm_attach(device_t);
381 static int	iwm_is_valid_ether_addr(uint8_t *);
382 static void	iwm_preinit(void *);
383 static int	iwm_detach_local(struct iwm_softc *sc, int);
384 static void	iwm_init_task(void *);
385 static void	iwm_radiotap_attach(struct iwm_softc *);
386 static struct ieee80211vap *
387 		iwm_vap_create(struct ieee80211com *,
388 		               const char [IFNAMSIZ], int,
389 		               enum ieee80211_opmode, int,
390 		               const uint8_t [IEEE80211_ADDR_LEN],
391 		               const uint8_t [IEEE80211_ADDR_LEN]);
392 static void	iwm_vap_delete(struct ieee80211vap *);
393 static void	iwm_xmit_queue_drain(struct iwm_softc *);
394 static void	iwm_scan_start(struct ieee80211com *);
395 static void	iwm_scan_end(struct ieee80211com *);
396 static void	iwm_update_mcast(struct ieee80211com *);
397 static void	iwm_set_channel(struct ieee80211com *);
398 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
399 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
400 static int	iwm_detach(device_t);
401 
402 static int	iwm_lar_disable = 0;
403 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
404 
405 /*
406  * Firmware parser.
407  */
408 
409 static int
410 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
411 {
412 	const struct iwm_fw_cscheme_list *l = (const void *)data;
413 
414 	if (dlen < sizeof(*l) ||
415 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
416 		return EINVAL;
417 
418 	/* we don't actually store anything for now, always use s/w crypto */
419 
420 	return 0;
421 }
422 
423 static int
424 iwm_firmware_store_section(struct iwm_softc *sc,
425     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
426 {
427 	struct iwm_fw_img *fws;
428 	struct iwm_fw_desc *fwone;
429 
430 	if (type >= IWM_UCODE_TYPE_MAX)
431 		return EINVAL;
432 	if (dlen < sizeof(uint32_t))
433 		return EINVAL;
434 
435 	fws = &sc->sc_fw.img[type];
436 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
437 		return EINVAL;
438 
439 	fwone = &fws->sec[fws->fw_count];
440 
441 	/* first 32bit are device load offset */
442 	memcpy(&fwone->offset, data, sizeof(uint32_t));
443 
444 	/* rest is data */
445 	fwone->data = data + sizeof(uint32_t);
446 	fwone->len = dlen - sizeof(uint32_t);
447 
448 	fws->fw_count++;
449 
450 	return 0;
451 }
452 
453 #define IWM_DEFAULT_SCAN_CHANNELS 40
454 
455 /* iwlwifi: iwl-drv.c */
456 struct iwm_tlv_calib_data {
457 	uint32_t ucode_type;
458 	struct iwm_tlv_calib_ctrl calib;
459 } __packed;
460 
461 static int
462 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
463 {
464 	const struct iwm_tlv_calib_data *def_calib = data;
465 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
466 
467 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
468 		device_printf(sc->sc_dev,
469 		    "Wrong ucode_type %u for default "
470 		    "calibration.\n", ucode_type);
471 		return EINVAL;
472 	}
473 
474 	sc->sc_default_calib[ucode_type].flow_trigger =
475 	    def_calib->calib.flow_trigger;
476 	sc->sc_default_calib[ucode_type].event_trigger =
477 	    def_calib->calib.event_trigger;
478 
479 	return 0;
480 }
481 
482 static int
483 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
484 			struct iwm_ucode_capabilities *capa)
485 {
486 	const struct iwm_ucode_api *ucode_api = (const void *)data;
487 	uint32_t api_index = le32toh(ucode_api->api_index);
488 	uint32_t api_flags = le32toh(ucode_api->api_flags);
489 	int i;
490 
491 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
492 		device_printf(sc->sc_dev,
493 		    "api flags index %d larger than supported by driver\n",
494 		    api_index);
495 		/* don't return an error so we can load FW that has more bits */
496 		return 0;
497 	}
498 
499 	for (i = 0; i < 32; i++) {
500 		if (api_flags & (1U << i))
501 			setbit(capa->enabled_api, i + 32 * api_index);
502 	}
503 
504 	return 0;
505 }
506 
507 static int
508 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
509 			   struct iwm_ucode_capabilities *capa)
510 {
511 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
512 	uint32_t api_index = le32toh(ucode_capa->api_index);
513 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
514 	int i;
515 
516 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
517 		device_printf(sc->sc_dev,
518 		    "capa flags index %d larger than supported by driver\n",
519 		    api_index);
520 		/* don't return an error so we can load FW that has more bits */
521 		return 0;
522 	}
523 
524 	for (i = 0; i < 32; i++) {
525 		if (api_flags & (1U << i))
526 			setbit(capa->enabled_capa, i + 32 * api_index);
527 	}
528 
529 	return 0;
530 }
531 
532 static void
533 iwm_fw_info_free(struct iwm_fw_info *fw)
534 {
535 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
536 	fw->fw_fp = NULL;
537 	memset(fw->img, 0, sizeof(fw->img));
538 }
539 
540 static int
541 iwm_read_firmware(struct iwm_softc *sc)
542 {
543 	struct iwm_fw_info *fw = &sc->sc_fw;
544 	const struct iwm_tlv_ucode_header *uhdr;
545 	const struct iwm_ucode_tlv *tlv;
546 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
547 	enum iwm_ucode_tlv_type tlv_type;
548 	const struct firmware *fwp;
549 	const uint8_t *data;
550 	uint32_t tlv_len;
551 	uint32_t usniffer_img;
552 	const uint8_t *tlv_data;
553 	uint32_t paging_mem_size;
554 	int num_of_cpus;
555 	int error = 0;
556 	size_t len;
557 
558 	/*
559 	 * Load firmware into driver memory.
560 	 * fw_fp will be set.
561 	 */
562 	fwp = firmware_get(sc->cfg->fw_name);
563 	if (fwp == NULL) {
564 		device_printf(sc->sc_dev,
565 		    "could not read firmware %s (error %d)\n",
566 		    sc->cfg->fw_name, error);
567 		goto out;
568 	}
569 	fw->fw_fp = fwp;
570 
571 	/* (Re-)Initialize default values. */
572 	capa->flags = 0;
573 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
574 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
575 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
576 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
577 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
578 
579 	/*
580 	 * Parse firmware contents
581 	 */
582 
583 	uhdr = (const void *)fw->fw_fp->data;
584 	if (*(const uint32_t *)fw->fw_fp->data != 0
585 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
586 		device_printf(sc->sc_dev, "invalid firmware %s\n",
587 		    sc->cfg->fw_name);
588 		error = EINVAL;
589 		goto out;
590 	}
591 
592 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
593 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
595 	    IWM_UCODE_API(le32toh(uhdr->ver)));
596 	data = uhdr->data;
597 	len = fw->fw_fp->datasize - sizeof(*uhdr);
598 
599 	while (len >= sizeof(*tlv)) {
600 		len -= sizeof(*tlv);
601 		tlv = (const void *)data;
602 
603 		tlv_len = le32toh(tlv->length);
604 		tlv_type = le32toh(tlv->type);
605 		tlv_data = tlv->data;
606 
607 		if (len < tlv_len) {
608 			device_printf(sc->sc_dev,
609 			    "firmware too short: %zu bytes\n",
610 			    len);
611 			error = EINVAL;
612 			goto parse_out;
613 		}
614 		len -= roundup2(tlv_len, 4);
615 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
616 
617 		switch ((int)tlv_type) {
618 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
619 			if (tlv_len != sizeof(uint32_t)) {
620 				device_printf(sc->sc_dev,
621 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
622 				    __func__, tlv_len);
623 				error = EINVAL;
624 				goto parse_out;
625 			}
626 			capa->max_probe_length =
627 			    le32_to_cpup((const uint32_t *)tlv_data);
628 			/* limit it to something sensible */
629 			if (capa->max_probe_length >
630 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
631 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
632 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
633 				    "ridiculous\n", __func__);
634 				error = EINVAL;
635 				goto parse_out;
636 			}
637 			break;
638 		case IWM_UCODE_TLV_PAN:
639 			if (tlv_len) {
640 				device_printf(sc->sc_dev,
641 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
642 				    __func__, tlv_len);
643 				error = EINVAL;
644 				goto parse_out;
645 			}
646 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
647 			break;
648 		case IWM_UCODE_TLV_FLAGS:
649 			if (tlv_len < sizeof(uint32_t)) {
650 				device_printf(sc->sc_dev,
651 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
652 				    __func__, tlv_len);
653 				error = EINVAL;
654 				goto parse_out;
655 			}
656 			if (tlv_len % sizeof(uint32_t)) {
657 				device_printf(sc->sc_dev,
658 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
659 				    __func__, tlv_len);
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			/*
664 			 * Apparently there can be many flags, but Linux driver
665 			 * parses only the first one, and so do we.
666 			 *
667 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
668 			 * Intentional or a bug?  Observations from
669 			 * current firmware file:
670 			 *  1) TLV_PAN is parsed first
671 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
672 			 * ==> this resets TLV_PAN to itself... hnnnk
673 			 */
674 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
675 			break;
676 		case IWM_UCODE_TLV_CSCHEME:
677 			if ((error = iwm_store_cscheme(sc,
678 			    tlv_data, tlv_len)) != 0) {
679 				device_printf(sc->sc_dev,
680 				    "%s: iwm_store_cscheme(): returned %d\n",
681 				    __func__, error);
682 				goto parse_out;
683 			}
684 			break;
685 		case IWM_UCODE_TLV_NUM_OF_CPU:
686 			if (tlv_len != sizeof(uint32_t)) {
687 				device_printf(sc->sc_dev,
688 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
689 				    __func__, tlv_len);
690 				error = EINVAL;
691 				goto parse_out;
692 			}
693 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
694 			if (num_of_cpus == 2) {
695 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
696 					TRUE;
697 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
698 					TRUE;
699 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
700 					TRUE;
701 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
702 				device_printf(sc->sc_dev,
703 				    "%s: Driver supports only 1 or 2 CPUs\n",
704 				    __func__);
705 				error = EINVAL;
706 				goto parse_out;
707 			}
708 			break;
709 		case IWM_UCODE_TLV_SEC_RT:
710 			if ((error = iwm_firmware_store_section(sc,
711 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
712 				device_printf(sc->sc_dev,
713 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
714 				    __func__, error);
715 				goto parse_out;
716 			}
717 			break;
718 		case IWM_UCODE_TLV_SEC_INIT:
719 			if ((error = iwm_firmware_store_section(sc,
720 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
721 				device_printf(sc->sc_dev,
722 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
723 				    __func__, error);
724 				goto parse_out;
725 			}
726 			break;
727 		case IWM_UCODE_TLV_SEC_WOWLAN:
728 			if ((error = iwm_firmware_store_section(sc,
729 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
730 				device_printf(sc->sc_dev,
731 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
732 				    __func__, error);
733 				goto parse_out;
734 			}
735 			break;
736 		case IWM_UCODE_TLV_DEF_CALIB:
737 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
740 				    __func__, tlv_len,
741 				    sizeof(struct iwm_tlv_calib_data));
742 				error = EINVAL;
743 				goto parse_out;
744 			}
745 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
746 				device_printf(sc->sc_dev,
747 				    "%s: iwm_set_default_calib() failed: %d\n",
748 				    __func__, error);
749 				goto parse_out;
750 			}
751 			break;
752 		case IWM_UCODE_TLV_PHY_SKU:
753 			if (tlv_len != sizeof(uint32_t)) {
754 				error = EINVAL;
755 				device_printf(sc->sc_dev,
756 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
757 				    __func__, tlv_len);
758 				goto parse_out;
759 			}
760 			sc->sc_fw.phy_config =
761 			    le32_to_cpup((const uint32_t *)tlv_data);
762 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
763 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
764 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
765 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
766 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
767 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
768 			break;
769 
770 		case IWM_UCODE_TLV_API_CHANGES_SET: {
771 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
772 				error = EINVAL;
773 				goto parse_out;
774 			}
775 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
776 				error = EINVAL;
777 				goto parse_out;
778 			}
779 			break;
780 		}
781 
782 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
783 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
784 				error = EINVAL;
785 				goto parse_out;
786 			}
787 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
788 				error = EINVAL;
789 				goto parse_out;
790 			}
791 			break;
792 		}
793 
794 		case IWM_UCODE_TLV_CMD_VERSIONS:
795 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
796 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
797 			/* ignore, not used by current driver */
798 			break;
799 
800 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
801 			if ((error = iwm_firmware_store_section(sc,
802 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
803 			    tlv_len)) != 0)
804 				goto parse_out;
805 			break;
806 
807 		case IWM_UCODE_TLV_PAGING:
808 			if (tlv_len != sizeof(uint32_t)) {
809 				error = EINVAL;
810 				goto parse_out;
811 			}
812 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
813 
814 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
815 			    "%s: Paging: paging enabled (size = %u bytes)\n",
816 			    __func__, paging_mem_size);
817 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
818 				device_printf(sc->sc_dev,
819 					"%s: Paging: driver supports up to %u bytes for paging image\n",
820 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
821 				error = EINVAL;
822 				goto out;
823 			}
824 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
825 				device_printf(sc->sc_dev,
826 				    "%s: Paging: image isn't multiple %u\n",
827 				    __func__, IWM_FW_PAGING_SIZE);
828 				error = EINVAL;
829 				goto out;
830 			}
831 
832 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
833 			    paging_mem_size;
834 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
835 			sc->sc_fw.img[usniffer_img].paging_mem_size =
836 			    paging_mem_size;
837 			break;
838 
839 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
840 			if (tlv_len != sizeof(uint32_t)) {
841 				error = EINVAL;
842 				goto parse_out;
843 			}
844 			capa->n_scan_channels =
845 			    le32_to_cpup((const uint32_t *)tlv_data);
846 			break;
847 
848 		case IWM_UCODE_TLV_FW_VERSION:
849 			if (tlv_len != sizeof(uint32_t) * 3) {
850 				error = EINVAL;
851 				goto parse_out;
852 			}
853 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
854 			    "%u.%u.%u",
855 			    le32toh(((const uint32_t *)tlv_data)[0]),
856 			    le32toh(((const uint32_t *)tlv_data)[1]),
857 			    le32toh(((const uint32_t *)tlv_data)[2]));
858 			break;
859 
860 		case IWM_UCODE_TLV_FW_MEM_SEG:
861 			break;
862 
863 		default:
864 			device_printf(sc->sc_dev,
865 			    "%s: unknown firmware section %d, abort\n",
866 			    __func__, tlv_type);
867 			error = EINVAL;
868 			goto parse_out;
869 		}
870 	}
871 
872 	KASSERT(error == 0, ("unhandled error"));
873 
874  parse_out:
875 	if (error) {
876 		device_printf(sc->sc_dev, "firmware parse error %d, "
877 		    "section type %d\n", error, tlv_type);
878 	}
879 
880  out:
881 	if (error) {
882 		if (fw->fw_fp != NULL)
883 			iwm_fw_info_free(fw);
884 	}
885 
886 	return error;
887 }
888 
889 /*
890  * DMA resource routines
891  */
892 
893 /* fwmem is used to load firmware onto the card */
894 static int
895 iwm_alloc_fwmem(struct iwm_softc *sc)
896 {
897 	/* Must be aligned on a 16-byte boundary. */
898 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
899 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
900 }
901 
902 /* tx scheduler rings.  not used? */
903 static int
904 iwm_alloc_sched(struct iwm_softc *sc)
905 {
906 	/* TX scheduler rings must be aligned on a 1KB boundary. */
907 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
908 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
909 }
910 
911 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
912 static int
913 iwm_alloc_kw(struct iwm_softc *sc)
914 {
915 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
916 }
917 
918 /* interrupt cause table */
919 static int
920 iwm_alloc_ict(struct iwm_softc *sc)
921 {
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
923 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
924 }
925 
926 static int
927 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
928 {
929 	bus_size_t size;
930 	size_t descsz;
931 	int count, i, error;
932 
933 	ring->cur = 0;
934 	if (sc->cfg->mqrx_supported) {
935 		count = IWM_RX_MQ_RING_COUNT;
936 		descsz = sizeof(uint64_t);
937 	} else {
938 		count = IWM_RX_LEGACY_RING_COUNT;
939 		descsz = sizeof(uint32_t);
940 	}
941 
942 	/* Allocate RX descriptors (256-byte aligned). */
943 	size = count * descsz;
944 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
945 	    256);
946 	if (error != 0) {
947 		device_printf(sc->sc_dev,
948 		    "could not allocate RX ring DMA memory\n");
949 		goto fail;
950 	}
951 	ring->desc = ring->free_desc_dma.vaddr;
952 
953 	/* Allocate RX status area (16-byte aligned). */
954 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
955 	    sizeof(*ring->stat), 16);
956 	if (error != 0) {
957 		device_printf(sc->sc_dev,
958 		    "could not allocate RX status DMA memory\n");
959 		goto fail;
960 	}
961 	ring->stat = ring->stat_dma.vaddr;
962 
963 	if (sc->cfg->mqrx_supported) {
964 		size = count * sizeof(uint32_t);
965 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
966 		    size, 256);
967 		if (error != 0) {
968 			device_printf(sc->sc_dev,
969 			    "could not allocate RX ring DMA memory\n");
970 			goto fail;
971 		}
972 	}
973 
974         /* Create RX buffer DMA tag. */
975         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
976             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
977             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
978         if (error != 0) {
979                 device_printf(sc->sc_dev,
980                     "%s: could not create RX buf DMA tag, error %d\n",
981                     __func__, error);
982                 goto fail;
983         }
984 
985 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
986 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
987 	if (error != 0) {
988 		device_printf(sc->sc_dev,
989 		    "%s: could not create RX buf DMA map, error %d\n",
990 		    __func__, error);
991 		goto fail;
992 	}
993 
994 	/*
995 	 * Allocate and map RX buffers.
996 	 */
997 	for (i = 0; i < count; i++) {
998 		struct iwm_rx_data *data = &ring->data[i];
999 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1000 		if (error != 0) {
1001 			device_printf(sc->sc_dev,
1002 			    "%s: could not create RX buf DMA map, error %d\n",
1003 			    __func__, error);
1004 			goto fail;
1005 		}
1006 		data->m = NULL;
1007 
1008 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1009 			goto fail;
1010 		}
1011 	}
1012 	return 0;
1013 
1014 fail:	iwm_free_rx_ring(sc, ring);
1015 	return error;
1016 }
1017 
1018 static void
1019 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1020 {
1021 	/* Reset the ring state */
1022 	ring->cur = 0;
1023 
1024 	/*
1025 	 * The hw rx ring index in shared memory must also be cleared,
1026 	 * otherwise the discrepancy can cause reprocessing chaos.
1027 	 */
1028 	if (sc->rxq.stat)
1029 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1030 }
1031 
1032 static void
1033 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1034 {
1035 	int count, i;
1036 
1037 	iwm_dma_contig_free(&ring->free_desc_dma);
1038 	iwm_dma_contig_free(&ring->stat_dma);
1039 	iwm_dma_contig_free(&ring->used_desc_dma);
1040 
1041 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1042 	    IWM_RX_LEGACY_RING_COUNT;
1043 
1044 	for (i = 0; i < count; i++) {
1045 		struct iwm_rx_data *data = &ring->data[i];
1046 
1047 		if (data->m != NULL) {
1048 			bus_dmamap_sync(ring->data_dmat, data->map,
1049 			    BUS_DMASYNC_POSTREAD);
1050 			bus_dmamap_unload(ring->data_dmat, data->map);
1051 			m_freem(data->m);
1052 			data->m = NULL;
1053 		}
1054 		if (data->map != NULL) {
1055 			bus_dmamap_destroy(ring->data_dmat, data->map);
1056 			data->map = NULL;
1057 		}
1058 	}
1059 	if (ring->spare_map != NULL) {
1060 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1061 		ring->spare_map = NULL;
1062 	}
1063 	if (ring->data_dmat != NULL) {
1064 		bus_dma_tag_destroy(ring->data_dmat);
1065 		ring->data_dmat = NULL;
1066 	}
1067 }
1068 
1069 static int
1070 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1071 {
1072 	bus_addr_t paddr;
1073 	bus_size_t size;
1074 	size_t maxsize;
1075 	int nsegments;
1076 	int i, error;
1077 
1078 	ring->qid = qid;
1079 	ring->queued = 0;
1080 	ring->cur = 0;
1081 
1082 	/* Allocate TX descriptors (256-byte aligned). */
1083 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1084 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1085 	if (error != 0) {
1086 		device_printf(sc->sc_dev,
1087 		    "could not allocate TX ring DMA memory\n");
1088 		goto fail;
1089 	}
1090 	ring->desc = ring->desc_dma.vaddr;
1091 
1092 	/*
1093 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1094 	 * to allocate commands space for other rings.
1095 	 */
1096 	if (qid > IWM_CMD_QUEUE)
1097 		return 0;
1098 
1099 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1100 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1101 	if (error != 0) {
1102 		device_printf(sc->sc_dev,
1103 		    "could not allocate TX cmd DMA memory\n");
1104 		goto fail;
1105 	}
1106 	ring->cmd = ring->cmd_dma.vaddr;
1107 
1108 	/* FW commands may require more mapped space than packets. */
1109 	if (qid == IWM_CMD_QUEUE) {
1110 		maxsize = IWM_RBUF_SIZE;
1111 		nsegments = 1;
1112 	} else {
1113 		maxsize = MCLBYTES;
1114 		nsegments = IWM_MAX_SCATTER - 2;
1115 	}
1116 
1117 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1118 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1119             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1120 	if (error != 0) {
1121 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1122 		goto fail;
1123 	}
1124 
1125 	paddr = ring->cmd_dma.paddr;
1126 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127 		struct iwm_tx_data *data = &ring->data[i];
1128 
1129 		data->cmd_paddr = paddr;
1130 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1131 		    + offsetof(struct iwm_tx_cmd, scratch);
1132 		paddr += sizeof(struct iwm_device_cmd);
1133 
1134 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1135 		if (error != 0) {
1136 			device_printf(sc->sc_dev,
1137 			    "could not create TX buf DMA map\n");
1138 			goto fail;
1139 		}
1140 	}
1141 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1142 	    ("invalid physical address"));
1143 	return 0;
1144 
1145 fail:	iwm_free_tx_ring(sc, ring);
1146 	return error;
1147 }
1148 
1149 static void
1150 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1151 {
1152 	int i;
1153 
1154 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1155 		struct iwm_tx_data *data = &ring->data[i];
1156 
1157 		if (data->m != NULL) {
1158 			bus_dmamap_sync(ring->data_dmat, data->map,
1159 			    BUS_DMASYNC_POSTWRITE);
1160 			bus_dmamap_unload(ring->data_dmat, data->map);
1161 			m_freem(data->m);
1162 			data->m = NULL;
1163 		}
1164 	}
1165 	/* Clear TX descriptors. */
1166 	memset(ring->desc, 0, ring->desc_dma.size);
1167 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1168 	    BUS_DMASYNC_PREWRITE);
1169 	sc->qfullmsk &= ~(1 << ring->qid);
1170 	ring->queued = 0;
1171 	ring->cur = 0;
1172 
1173 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174 		iwm_pcie_clear_cmd_in_flight(sc);
1175 }
1176 
1177 static void
1178 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1179 {
1180 	int i;
1181 
1182 	iwm_dma_contig_free(&ring->desc_dma);
1183 	iwm_dma_contig_free(&ring->cmd_dma);
1184 
1185 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186 		struct iwm_tx_data *data = &ring->data[i];
1187 
1188 		if (data->m != NULL) {
1189 			bus_dmamap_sync(ring->data_dmat, data->map,
1190 			    BUS_DMASYNC_POSTWRITE);
1191 			bus_dmamap_unload(ring->data_dmat, data->map);
1192 			m_freem(data->m);
1193 			data->m = NULL;
1194 		}
1195 		if (data->map != NULL) {
1196 			bus_dmamap_destroy(ring->data_dmat, data->map);
1197 			data->map = NULL;
1198 		}
1199 	}
1200 	if (ring->data_dmat != NULL) {
1201 		bus_dma_tag_destroy(ring->data_dmat);
1202 		ring->data_dmat = NULL;
1203 	}
1204 }
1205 
1206 /*
1207  * High-level hardware frobbing routines
1208  */
1209 
1210 static void
1211 iwm_enable_interrupts(struct iwm_softc *sc)
1212 {
1213 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1214 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1215 }
1216 
1217 static void
1218 iwm_restore_interrupts(struct iwm_softc *sc)
1219 {
1220 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221 }
1222 
1223 static void
1224 iwm_disable_interrupts(struct iwm_softc *sc)
1225 {
1226 	/* disable interrupts */
1227 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1228 
1229 	/* acknowledge all interrupts */
1230 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1232 }
1233 
1234 static void
1235 iwm_ict_reset(struct iwm_softc *sc)
1236 {
1237 	iwm_disable_interrupts(sc);
1238 
1239 	/* Reset ICT table. */
1240 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1241 	sc->ict_cur = 0;
1242 
1243 	/* Set physical address of ICT table (4KB aligned). */
1244 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1245 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1247 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1248 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1249 
1250 	/* Switch to ICT interrupt mode in driver. */
1251 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1252 
1253 	/* Re-enable interrupts. */
1254 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255 	iwm_enable_interrupts(sc);
1256 }
1257 
1258 /* iwlwifi pcie/trans.c */
1259 
1260 /*
1261  * Since this .. hard-resets things, it's time to actually
1262  * mark the first vap (if any) as having no mac context.
1263  * It's annoying, but since the driver is potentially being
1264  * stop/start'ed whilst active (thanks openbsd port!) we
1265  * have to correctly track this.
1266  */
1267 static void
1268 iwm_stop_device(struct iwm_softc *sc)
1269 {
1270 	struct ieee80211com *ic = &sc->sc_ic;
1271 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1272 	int chnl, qid;
1273 	uint32_t mask = 0;
1274 
1275 	/* tell the device to stop sending interrupts */
1276 	iwm_disable_interrupts(sc);
1277 
1278 	/*
1279 	 * FreeBSD-local: mark the first vap as not-uploaded,
1280 	 * so the next transition through auth/assoc
1281 	 * will correctly populate the MAC context.
1282 	 */
1283 	if (vap) {
1284 		struct iwm_vap *iv = IWM_VAP(vap);
1285 		iv->phy_ctxt = NULL;
1286 		iv->is_uploaded = 0;
1287 	}
1288 	sc->sc_firmware_state = 0;
1289 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1290 
1291 	/* device going down, Stop using ICT table */
1292 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1293 
1294 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1295 
1296 	if (iwm_nic_lock(sc)) {
1297 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1298 
1299 		/* Stop each Tx DMA channel */
1300 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1301 			IWM_WRITE(sc,
1302 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1303 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1304 		}
1305 
1306 		/* Wait for DMA channels to be idle */
1307 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1308 		    5000)) {
1309 			device_printf(sc->sc_dev,
1310 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1311 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1312 		}
1313 		iwm_nic_unlock(sc);
1314 	}
1315 	iwm_pcie_rx_stop(sc);
1316 
1317 	/* Stop RX ring. */
1318 	iwm_reset_rx_ring(sc, &sc->rxq);
1319 
1320 	/* Reset all TX rings. */
1321 	for (qid = 0; qid < nitems(sc->txq); qid++)
1322 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1323 
1324 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1325 		/* Power-down device's busmaster DMA clocks */
1326 		if (iwm_nic_lock(sc)) {
1327 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1328 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1329 			iwm_nic_unlock(sc);
1330 		}
1331 		DELAY(5);
1332 	}
1333 
1334 	/* Make sure (redundant) we've released our request to stay awake */
1335 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1336 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1337 
1338 	/* Stop the device, and put it in low power state */
1339 	iwm_apm_stop(sc);
1340 
1341 	/* stop and reset the on-board processor */
1342 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1343 	DELAY(5000);
1344 
1345 	/*
1346 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1347 	 */
1348 	iwm_disable_interrupts(sc);
1349 
1350 	/*
1351 	 * Even if we stop the HW, we still want the RF kill
1352 	 * interrupt
1353 	 */
1354 	iwm_enable_rfkill_int(sc);
1355 	iwm_check_rfkill(sc);
1356 
1357 	iwm_prepare_card_hw(sc);
1358 }
1359 
1360 /* iwlwifi: mvm/ops.c */
1361 static void
1362 iwm_nic_config(struct iwm_softc *sc)
1363 {
1364 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1365 	uint32_t reg_val = 0;
1366 	uint32_t phy_config = iwm_get_phy_config(sc);
1367 
1368 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1370 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1372 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1374 
1375 	/* SKU control */
1376 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1378 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1380 
1381 	/* radio configuration */
1382 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1383 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1384 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1385 
1386 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1393 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1394 	    reg_val);
1395 
1396 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1397 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1398 	    radio_cfg_step, radio_cfg_dash);
1399 
1400 	/*
1401 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1402 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1403 	 * to lose ownership and not being able to obtain it back.
1404 	 */
1405 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1406 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1407 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1408 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1409 	}
1410 }
1411 
1412 static int
1413 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1414 {
1415 	int enabled;
1416 
1417 	if (!iwm_nic_lock(sc))
1418 		return EBUSY;
1419 
1420 	/* Stop RX DMA. */
1421 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1422 	/* Disable RX used and free queue operation. */
1423 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1424 
1425 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1426 	    sc->rxq.free_desc_dma.paddr);
1427 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1428 	    sc->rxq.used_desc_dma.paddr);
1429 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1430 	    sc->rxq.stat_dma.paddr);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1433 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1434 
1435 	/* We configure only queue 0 for now. */
1436 	enabled = ((1 << 0) << 16) | (1 << 0);
1437 
1438 	/* Enable RX DMA, 4KB buffer size. */
1439 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1440 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1441 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1442 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1443 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1444 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1445 
1446 	/* Enable RX DMA snooping. */
1447 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1448 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1449 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1450 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1451 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1452 
1453 	/* Enable the configured queue(s). */
1454 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1455 
1456 	iwm_nic_unlock(sc);
1457 
1458 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1459 
1460 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1461 
1462 	return (0);
1463 }
1464 
1465 static int
1466 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1467 {
1468 
1469 	/* Stop Rx DMA */
1470 	iwm_pcie_rx_stop(sc);
1471 
1472 	if (!iwm_nic_lock(sc))
1473 		return EBUSY;
1474 
1475 	/* reset and flush pointers */
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1477 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1479 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1480 
1481 	/* Set physical address of RX ring (256-byte aligned). */
1482 	IWM_WRITE(sc,
1483 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1484 	    sc->rxq.free_desc_dma.paddr >> 8);
1485 
1486 	/* Set physical address of RX status (16-byte aligned). */
1487 	IWM_WRITE(sc,
1488 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1489 
1490 	/* Enable Rx DMA
1491 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1492 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1493 	 *      the credit mechanism in 5000 HW RX FIFO
1494 	 * Direct rx interrupts to hosts
1495 	 * Rx buffer size 4 or 8k or 12k
1496 	 * RB timeout 0x10
1497 	 * 256 RBDs
1498 	 */
1499 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1500 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1501 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1502 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1503 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1504 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1505 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1506 
1507 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1508 
1509 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1510 	if (sc->cfg->host_interrupt_operation_mode)
1511 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1512 
1513 	iwm_nic_unlock(sc);
1514 
1515 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1516 
1517 	return 0;
1518 }
1519 
1520 static int
1521 iwm_nic_rx_init(struct iwm_softc *sc)
1522 {
1523 	if (sc->cfg->mqrx_supported)
1524 		return iwm_nic_rx_mq_init(sc);
1525 	else
1526 		return iwm_nic_rx_legacy_init(sc);
1527 }
1528 
1529 static int
1530 iwm_nic_tx_init(struct iwm_softc *sc)
1531 {
1532 	int qid;
1533 
1534 	if (!iwm_nic_lock(sc))
1535 		return EBUSY;
1536 
1537 	/* Deactivate TX scheduler. */
1538 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1539 
1540 	/* Set physical address of "keep warm" page (16-byte aligned). */
1541 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1542 
1543 	/* Initialize TX rings. */
1544 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1545 		struct iwm_tx_ring *txq = &sc->txq[qid];
1546 
1547 		/* Set physical address of TX ring (256-byte aligned). */
1548 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1549 		    txq->desc_dma.paddr >> 8);
1550 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1551 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1552 		    __func__,
1553 		    qid, txq->desc,
1554 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1555 	}
1556 
1557 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1558 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1559 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1560 
1561 	iwm_nic_unlock(sc);
1562 
1563 	return 0;
1564 }
1565 
1566 static int
1567 iwm_nic_init(struct iwm_softc *sc)
1568 {
1569 	int error;
1570 
1571 	iwm_apm_init(sc);
1572 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1573 		iwm_set_pwr(sc);
1574 
1575 	iwm_nic_config(sc);
1576 
1577 	if ((error = iwm_nic_rx_init(sc)) != 0)
1578 		return error;
1579 
1580 	/*
1581 	 * Ditto for TX, from iwn
1582 	 */
1583 	if ((error = iwm_nic_tx_init(sc)) != 0)
1584 		return error;
1585 
1586 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1587 	    "%s: shadow registers enabled\n", __func__);
1588 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1589 
1590 	return 0;
1591 }
1592 
1593 int
1594 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1595 {
1596 	int qmsk;
1597 
1598 	qmsk = 1 << qid;
1599 
1600 	if (!iwm_nic_lock(sc)) {
1601 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1602 		    __func__, qid);
1603 		return EBUSY;
1604 	}
1605 
1606 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1607 
1608 	if (qid == IWM_CMD_QUEUE) {
1609 		/* Disable the scheduler. */
1610 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1611 
1612 		/* Stop the TX queue prior to configuration. */
1613 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1614 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1615 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1616 
1617 		iwm_nic_unlock(sc);
1618 
1619 		/* Disable aggregations for this queue. */
1620 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1621 
1622 		if (!iwm_nic_lock(sc)) {
1623 			device_printf(sc->sc_dev,
1624 			    "%s: cannot enable txq %d\n", __func__, qid);
1625 			return EBUSY;
1626 		}
1627 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1628 		iwm_nic_unlock(sc);
1629 
1630 		iwm_write_mem32(sc,
1631 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1632 		/* Set scheduler window size and frame limit. */
1633 		iwm_write_mem32(sc,
1634 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1635 		    sizeof(uint32_t),
1636 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1637 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1638 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1639 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1640 
1641 		if (!iwm_nic_lock(sc)) {
1642 			device_printf(sc->sc_dev,
1643 			    "%s: cannot enable txq %d\n", __func__, qid);
1644 			return EBUSY;
1645 		}
1646 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1647 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1648 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1649 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1650 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1651 
1652 		/* Enable the scheduler for this queue. */
1653 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1654 	} else {
1655 		struct iwm_scd_txq_cfg_cmd cmd;
1656 		int error;
1657 
1658 		iwm_nic_unlock(sc);
1659 
1660 		memset(&cmd, 0, sizeof(cmd));
1661 		cmd.scd_queue = qid;
1662 		cmd.enable = 1;
1663 		cmd.sta_id = sta_id;
1664 		cmd.tx_fifo = fifo;
1665 		cmd.aggregate = 0;
1666 		cmd.window = IWM_FRAME_LIMIT;
1667 
1668 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1669 		    sizeof(cmd), &cmd);
1670 		if (error) {
1671 			device_printf(sc->sc_dev,
1672 			    "cannot enable txq %d\n", qid);
1673 			return error;
1674 		}
1675 
1676 		if (!iwm_nic_lock(sc))
1677 			return EBUSY;
1678 	}
1679 
1680 	iwm_nic_unlock(sc);
1681 
1682 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1683 	    __func__, qid, fifo);
1684 
1685 	return 0;
1686 }
1687 
1688 static int
1689 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1690 {
1691 	int error, chnl;
1692 
1693 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1694 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1695 
1696 	if (!iwm_nic_lock(sc))
1697 		return EBUSY;
1698 
1699 	iwm_ict_reset(sc);
1700 
1701 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1702 	if (scd_base_addr != 0 &&
1703 	    scd_base_addr != sc->scd_base_addr) {
1704 		device_printf(sc->sc_dev,
1705 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1706 		    __func__, sc->scd_base_addr, scd_base_addr);
1707 	}
1708 
1709 	iwm_nic_unlock(sc);
1710 
1711 	/* reset context data, TX status and translation data */
1712 	error = iwm_write_mem(sc,
1713 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1714 	    NULL, clear_dwords);
1715 	if (error)
1716 		return EBUSY;
1717 
1718 	if (!iwm_nic_lock(sc))
1719 		return EBUSY;
1720 
1721 	/* Set physical address of TX scheduler rings (1KB aligned). */
1722 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1723 
1724 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1725 
1726 	iwm_nic_unlock(sc);
1727 
1728 	/* enable command channel */
1729 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1730 	if (error)
1731 		return error;
1732 
1733 	if (!iwm_nic_lock(sc))
1734 		return EBUSY;
1735 
1736 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1737 
1738 	/* Enable DMA channels. */
1739 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1740 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1742 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1743 	}
1744 
1745 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1746 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1747 
1748 	iwm_nic_unlock(sc);
1749 
1750 	/* Enable L1-Active */
1751 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1752 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1753 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1754 	}
1755 
1756 	return error;
1757 }
1758 
1759 /*
1760  * NVM read access and content parsing.  We do not support
1761  * external NVM or writing NVM.
1762  * iwlwifi/mvm/nvm.c
1763  */
1764 
1765 /* Default NVM size to read */
1766 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1767 
1768 #define IWM_NVM_WRITE_OPCODE 1
1769 #define IWM_NVM_READ_OPCODE 0
1770 
1771 /* load nvm chunk response */
1772 enum {
1773 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1774 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1775 };
1776 
1777 static int
1778 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1779 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1780 {
1781 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1782 		.offset = htole16(offset),
1783 		.length = htole16(length),
1784 		.type = htole16(section),
1785 		.op_code = IWM_NVM_READ_OPCODE,
1786 	};
1787 	struct iwm_nvm_access_resp *nvm_resp;
1788 	struct iwm_rx_packet *pkt;
1789 	struct iwm_host_cmd cmd = {
1790 		.id = IWM_NVM_ACCESS_CMD,
1791 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1792 		.data = { &nvm_access_cmd, },
1793 	};
1794 	int ret, bytes_read, offset_read;
1795 	uint8_t *resp_data;
1796 
1797 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1798 
1799 	ret = iwm_send_cmd(sc, &cmd);
1800 	if (ret) {
1801 		device_printf(sc->sc_dev,
1802 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1803 		return ret;
1804 	}
1805 
1806 	pkt = cmd.resp_pkt;
1807 
1808 	/* Extract NVM response */
1809 	nvm_resp = (void *)pkt->data;
1810 	ret = le16toh(nvm_resp->status);
1811 	bytes_read = le16toh(nvm_resp->length);
1812 	offset_read = le16toh(nvm_resp->offset);
1813 	resp_data = nvm_resp->data;
1814 	if (ret) {
1815 		if ((offset != 0) &&
1816 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1817 			/*
1818 			 * meaning of NOT_VALID_ADDRESS:
1819 			 * driver try to read chunk from address that is
1820 			 * multiple of 2K and got an error since addr is empty.
1821 			 * meaning of (offset != 0): driver already
1822 			 * read valid data from another chunk so this case
1823 			 * is not an error.
1824 			 */
1825 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1826 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1827 				    offset);
1828 			*len = 0;
1829 			ret = 0;
1830 		} else {
1831 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1832 				    "NVM access command failed with status %d\n", ret);
1833 			ret = EIO;
1834 		}
1835 		goto exit;
1836 	}
1837 
1838 	if (offset_read != offset) {
1839 		device_printf(sc->sc_dev,
1840 		    "NVM ACCESS response with invalid offset %d\n",
1841 		    offset_read);
1842 		ret = EINVAL;
1843 		goto exit;
1844 	}
1845 
1846 	if (bytes_read > length) {
1847 		device_printf(sc->sc_dev,
1848 		    "NVM ACCESS response with too much data "
1849 		    "(%d bytes requested, %d bytes received)\n",
1850 		    length, bytes_read);
1851 		ret = EINVAL;
1852 		goto exit;
1853 	}
1854 
1855 	/* Write data to NVM */
1856 	memcpy(data + offset, resp_data, bytes_read);
1857 	*len = bytes_read;
1858 
1859  exit:
1860 	iwm_free_resp(sc, &cmd);
1861 	return ret;
1862 }
1863 
1864 /*
1865  * Reads an NVM section completely.
1866  * NICs prior to 7000 family don't have a real NVM, but just read
1867  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1868  * by uCode, we need to manually check in this case that we don't
1869  * overflow and try to read more than the EEPROM size.
1870  * For 7000 family NICs, we supply the maximal size we can read, and
1871  * the uCode fills the response with as much data as we can,
1872  * without overflowing, so no check is needed.
1873  */
1874 static int
1875 iwm_nvm_read_section(struct iwm_softc *sc,
1876 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1877 {
1878 	uint16_t seglen, length, offset = 0;
1879 	int ret;
1880 
1881 	/* Set nvm section read length */
1882 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1883 
1884 	seglen = length;
1885 
1886 	/* Read the NVM until exhausted (reading less than requested) */
1887 	while (seglen == length) {
1888 		/* Check no memory assumptions fail and cause an overflow */
1889 		if ((size_read + offset + length) >
1890 		    sc->cfg->eeprom_size) {
1891 			device_printf(sc->sc_dev,
1892 			    "EEPROM size is too small for NVM\n");
1893 			return ENOBUFS;
1894 		}
1895 
1896 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1897 		if (ret) {
1898 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1899 				    "Cannot read NVM from section %d offset %d, length %d\n",
1900 				    section, offset, length);
1901 			return ret;
1902 		}
1903 		offset += seglen;
1904 	}
1905 
1906 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1907 		    "NVM section %d read completed\n", section);
1908 	*len = offset;
1909 	return 0;
1910 }
1911 
1912 /*
1913  * BEGIN IWM_NVM_PARSE
1914  */
1915 
1916 /* iwlwifi/iwl-nvm-parse.c */
1917 
1918 /* NVM offsets (in words) definitions */
1919 enum iwm_nvm_offsets {
1920 	/* NVM HW-Section offset (in words) definitions */
1921 	IWM_HW_ADDR = 0x15,
1922 
1923 /* NVM SW-Section offset (in words) definitions */
1924 	IWM_NVM_SW_SECTION = 0x1C0,
1925 	IWM_NVM_VERSION = 0,
1926 	IWM_RADIO_CFG = 1,
1927 	IWM_SKU = 2,
1928 	IWM_N_HW_ADDRS = 3,
1929 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1930 
1931 /* NVM calibration section offset (in words) definitions */
1932 	IWM_NVM_CALIB_SECTION = 0x2B8,
1933 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1934 };
1935 
1936 enum iwm_8000_nvm_offsets {
1937 	/* NVM HW-Section offset (in words) definitions */
1938 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1939 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1940 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1941 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1942 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1943 
1944 	/* NVM SW-Section offset (in words) definitions */
1945 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1946 	IWM_NVM_VERSION_8000 = 0,
1947 	IWM_RADIO_CFG_8000 = 0,
1948 	IWM_SKU_8000 = 2,
1949 	IWM_N_HW_ADDRS_8000 = 3,
1950 
1951 	/* NVM REGULATORY -Section offset (in words) definitions */
1952 	IWM_NVM_CHANNELS_8000 = 0,
1953 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1954 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1955 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1956 
1957 	/* NVM calibration section offset (in words) definitions */
1958 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1959 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1960 };
1961 
1962 /* SKU Capabilities (actual values from NVM definition) */
1963 enum nvm_sku_bits {
1964 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1965 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1966 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1967 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1968 };
1969 
1970 /* radio config bits (actual values from NVM definition) */
1971 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1972 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1973 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1974 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1975 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1976 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1977 
1978 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1979 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1980 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1981 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1982 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1983 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1984 
1985 /**
1986  * enum iwm_nvm_channel_flags - channel flags in NVM
1987  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1988  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1989  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1990  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1991  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1992  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1993  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1994  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1995  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1996  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1997  */
1998 enum iwm_nvm_channel_flags {
1999 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2000 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2001 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2002 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2003 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2004 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2005 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2006 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2007 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2008 };
2009 
2010 /*
2011  * Translate EEPROM flags to net80211.
2012  */
2013 static uint32_t
2014 iwm_eeprom_channel_flags(uint16_t ch_flags)
2015 {
2016 	uint32_t nflags;
2017 
2018 	nflags = 0;
2019 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2020 		nflags |= IEEE80211_CHAN_PASSIVE;
2021 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2022 		nflags |= IEEE80211_CHAN_NOADHOC;
2023 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2024 		nflags |= IEEE80211_CHAN_DFS;
2025 		/* Just in case. */
2026 		nflags |= IEEE80211_CHAN_NOADHOC;
2027 	}
2028 
2029 	return (nflags);
2030 }
2031 
2032 static void
2033 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2034     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2035     const uint8_t bands[])
2036 {
2037 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2038 	uint32_t nflags;
2039 	uint16_t ch_flags;
2040 	uint8_t ieee;
2041 	int error;
2042 
2043 	for (; ch_idx < ch_num; ch_idx++) {
2044 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2045 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2046 			ieee = iwm_nvm_channels[ch_idx];
2047 		else
2048 			ieee = iwm_nvm_channels_8000[ch_idx];
2049 
2050 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2051 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2052 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2053 			    ieee, ch_flags,
2054 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2055 			    "5.2" : "2.4");
2056 			continue;
2057 		}
2058 
2059 		nflags = iwm_eeprom_channel_flags(ch_flags);
2060 		error = ieee80211_add_channel(chans, maxchans, nchans,
2061 		    ieee, 0, 0, nflags, bands);
2062 		if (error != 0)
2063 			break;
2064 
2065 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2066 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2067 		    ieee, ch_flags,
2068 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2069 		    "5.2" : "2.4");
2070 	}
2071 }
2072 
2073 static void
2074 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2075     struct ieee80211_channel chans[])
2076 {
2077 	struct iwm_softc *sc = ic->ic_softc;
2078 	struct iwm_nvm_data *data = sc->nvm_data;
2079 	uint8_t bands[IEEE80211_MODE_BYTES];
2080 	size_t ch_num;
2081 
2082 	memset(bands, 0, sizeof(bands));
2083 	/* 1-13: 11b/g channels. */
2084 	setbit(bands, IEEE80211_MODE_11B);
2085 	setbit(bands, IEEE80211_MODE_11G);
2086 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2087 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2088 
2089 	/* 14: 11b channel only. */
2090 	clrbit(bands, IEEE80211_MODE_11G);
2091 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2092 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2093 
2094 	if (data->sku_cap_band_52GHz_enable) {
2095 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2096 			ch_num = nitems(iwm_nvm_channels);
2097 		else
2098 			ch_num = nitems(iwm_nvm_channels_8000);
2099 		memset(bands, 0, sizeof(bands));
2100 		setbit(bands, IEEE80211_MODE_11A);
2101 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2102 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2103 	}
2104 }
2105 
2106 static void
2107 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2108 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2109 {
2110 	const uint8_t *hw_addr;
2111 
2112 	if (mac_override) {
2113 		static const uint8_t reserved_mac[] = {
2114 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2115 		};
2116 
2117 		hw_addr = (const uint8_t *)(mac_override +
2118 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2119 
2120 		/*
2121 		 * Store the MAC address from MAO section.
2122 		 * No byte swapping is required in MAO section
2123 		 */
2124 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2125 
2126 		/*
2127 		 * Force the use of the OTP MAC address in case of reserved MAC
2128 		 * address in the NVM, or if address is given but invalid.
2129 		 */
2130 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2131 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2132 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2133 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2134 			return;
2135 
2136 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2137 		    "%s: mac address from nvm override section invalid\n",
2138 		    __func__);
2139 	}
2140 
2141 	if (nvm_hw) {
2142 		/* read the mac address from WFMP registers */
2143 		uint32_t mac_addr0 =
2144 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2145 		uint32_t mac_addr1 =
2146 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2147 
2148 		hw_addr = (const uint8_t *)&mac_addr0;
2149 		data->hw_addr[0] = hw_addr[3];
2150 		data->hw_addr[1] = hw_addr[2];
2151 		data->hw_addr[2] = hw_addr[1];
2152 		data->hw_addr[3] = hw_addr[0];
2153 
2154 		hw_addr = (const uint8_t *)&mac_addr1;
2155 		data->hw_addr[4] = hw_addr[1];
2156 		data->hw_addr[5] = hw_addr[0];
2157 
2158 		return;
2159 	}
2160 
2161 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2162 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2163 }
2164 
2165 static int
2166 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2167 	    const uint16_t *phy_sku)
2168 {
2169 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2170 		return le16_to_cpup(nvm_sw + IWM_SKU);
2171 
2172 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2173 }
2174 
2175 static int
2176 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2177 {
2178 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2179 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2180 	else
2181 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2182 						IWM_NVM_VERSION_8000));
2183 }
2184 
2185 static int
2186 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2187 		  const uint16_t *phy_sku)
2188 {
2189         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2190                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2191 
2192         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2193 }
2194 
2195 static int
2196 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2197 {
2198 	int n_hw_addr;
2199 
2200 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2201 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2202 
2203 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2204 
2205         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2206 }
2207 
2208 static void
2209 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2210 		  uint32_t radio_cfg)
2211 {
2212 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2213 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2214 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2215 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2216 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2217 		return;
2218 	}
2219 
2220 	/* set the radio configuration for family 8000 */
2221 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2222 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2223 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2224 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2225 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2226 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2227 }
2228 
2229 static int
2230 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2231 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2232 {
2233 #ifdef notyet /* for FAMILY 9000 */
2234 	if (cfg->mac_addr_from_csr) {
2235 		iwm_set_hw_address_from_csr(sc, data);
2236         } else
2237 #endif
2238 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2239 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2240 
2241 		/* The byte order is little endian 16 bit, meaning 214365 */
2242 		data->hw_addr[0] = hw_addr[1];
2243 		data->hw_addr[1] = hw_addr[0];
2244 		data->hw_addr[2] = hw_addr[3];
2245 		data->hw_addr[3] = hw_addr[2];
2246 		data->hw_addr[4] = hw_addr[5];
2247 		data->hw_addr[5] = hw_addr[4];
2248 	} else {
2249 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2250 	}
2251 
2252 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2253 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2254 		return EINVAL;
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 static struct iwm_nvm_data *
2261 iwm_parse_nvm_data(struct iwm_softc *sc,
2262 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2263 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2264 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2265 {
2266 	struct iwm_nvm_data *data;
2267 	uint32_t sku, radio_cfg;
2268 	uint16_t lar_config;
2269 
2270 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2271 		data = malloc(sizeof(*data) +
2272 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2273 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2274 	} else {
2275 		data = malloc(sizeof(*data) +
2276 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2277 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2278 	}
2279 	if (!data)
2280 		return NULL;
2281 
2282 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2283 
2284 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2285 	iwm_set_radio_cfg(sc, data, radio_cfg);
2286 
2287 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2288 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2289 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2290 	data->sku_cap_11n_enable = 0;
2291 
2292 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2293 
2294 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2295 		/* TODO: use IWL_NVM_EXT */
2296 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2297 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2298 				       IWM_NVM_LAR_OFFSET_8000;
2299 
2300 		lar_config = le16_to_cpup(regulatory + lar_offset);
2301 		data->lar_enabled = !!(lar_config &
2302 				       IWM_NVM_LAR_ENABLED_8000);
2303 	}
2304 
2305 	/* If no valid mac address was found - bail out */
2306 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2307 		free(data, M_DEVBUF);
2308 		return NULL;
2309 	}
2310 
2311 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2312 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2313 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2314 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2315 	} else {
2316 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2317 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2318 	}
2319 
2320 	return data;
2321 }
2322 
2323 static void
2324 iwm_free_nvm_data(struct iwm_nvm_data *data)
2325 {
2326 	if (data != NULL)
2327 		free(data, M_DEVBUF);
2328 }
2329 
2330 static struct iwm_nvm_data *
2331 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2332 {
2333 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2334 
2335 	/* Checking for required sections */
2336 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2337 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2338 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2339 			device_printf(sc->sc_dev,
2340 			    "Can't parse empty OTP/NVM sections\n");
2341 			return NULL;
2342 		}
2343 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2344 		/* SW and REGULATORY sections are mandatory */
2345 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2346 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2347 			device_printf(sc->sc_dev,
2348 			    "Can't parse empty OTP/NVM sections\n");
2349 			return NULL;
2350 		}
2351 		/* MAC_OVERRIDE or at least HW section must exist */
2352 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2353 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2354 			device_printf(sc->sc_dev,
2355 			    "Can't parse mac_address, empty sections\n");
2356 			return NULL;
2357 		}
2358 
2359 		/* PHY_SKU section is mandatory in B0 */
2360 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2361 			device_printf(sc->sc_dev,
2362 			    "Can't parse phy_sku in B0, empty sections\n");
2363 			return NULL;
2364 		}
2365 	} else {
2366 		panic("unknown device family %d\n", sc->cfg->device_family);
2367 	}
2368 
2369 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2370 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2371 	calib = (const uint16_t *)
2372 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2373 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2374 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2375 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2376 	mac_override = (const uint16_t *)
2377 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2378 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2379 
2380 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2381 	    phy_sku, regulatory);
2382 }
2383 
2384 static int
2385 iwm_nvm_init(struct iwm_softc *sc)
2386 {
2387 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2388 	int i, ret, section;
2389 	uint32_t size_read = 0;
2390 	uint8_t *nvm_buffer, *temp;
2391 	uint16_t len;
2392 
2393 	memset(nvm_sections, 0, sizeof(nvm_sections));
2394 
2395 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2396 		return EINVAL;
2397 
2398 	/* load NVM values from nic */
2399 	/* Read From FW NVM */
2400 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2401 
2402 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2403 	if (!nvm_buffer)
2404 		return ENOMEM;
2405 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2406 		/* we override the constness for initial read */
2407 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2408 					   &len, size_read);
2409 		if (ret)
2410 			continue;
2411 		size_read += len;
2412 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2413 		if (!temp) {
2414 			ret = ENOMEM;
2415 			break;
2416 		}
2417 		memcpy(temp, nvm_buffer, len);
2418 
2419 		nvm_sections[section].data = temp;
2420 		nvm_sections[section].length = len;
2421 	}
2422 	if (!size_read)
2423 		device_printf(sc->sc_dev, "OTP is blank\n");
2424 	free(nvm_buffer, M_DEVBUF);
2425 
2426 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2427 	if (!sc->nvm_data)
2428 		return EINVAL;
2429 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2430 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2431 
2432 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2433 		if (nvm_sections[i].data != NULL)
2434 			free(nvm_sections[i].data, M_DEVBUF);
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int
2441 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2442 	const struct iwm_fw_desc *section)
2443 {
2444 	struct iwm_dma_info *dma = &sc->fw_dma;
2445 	uint8_t *v_addr;
2446 	bus_addr_t p_addr;
2447 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2448 	int ret = 0;
2449 
2450 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2451 		    "%s: [%d] uCode section being loaded...\n",
2452 		    __func__, section_num);
2453 
2454 	v_addr = dma->vaddr;
2455 	p_addr = dma->paddr;
2456 
2457 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2458 		uint32_t copy_size, dst_addr;
2459 		int extended_addr = FALSE;
2460 
2461 		copy_size = MIN(chunk_sz, section->len - offset);
2462 		dst_addr = section->offset + offset;
2463 
2464 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2465 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2466 			extended_addr = TRUE;
2467 
2468 		if (extended_addr)
2469 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2470 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2471 
2472 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2473 		    copy_size);
2474 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2475 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2476 						   copy_size);
2477 
2478 		if (extended_addr)
2479 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2480 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2481 
2482 		if (ret) {
2483 			device_printf(sc->sc_dev,
2484 			    "%s: Could not load the [%d] uCode section\n",
2485 			    __func__, section_num);
2486 			break;
2487 		}
2488 	}
2489 
2490 	return ret;
2491 }
2492 
2493 /*
2494  * ucode
2495  */
2496 static int
2497 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2498 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2499 {
2500 	sc->sc_fw_chunk_done = 0;
2501 
2502 	if (!iwm_nic_lock(sc))
2503 		return EBUSY;
2504 
2505 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2506 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2507 
2508 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2509 	    dst_addr);
2510 
2511 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2512 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2513 
2514 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2515 	    (iwm_get_dma_hi_addr(phy_addr)
2516 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2517 
2518 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2519 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2520 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2521 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2522 
2523 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2524 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2525 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2526 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2527 
2528 	iwm_nic_unlock(sc);
2529 
2530 	/* wait up to 5s for this segment to load */
2531 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2532 
2533 	if (!sc->sc_fw_chunk_done) {
2534 		device_printf(sc->sc_dev,
2535 		    "fw chunk addr 0x%x len %d failed to load\n",
2536 		    dst_addr, byte_cnt);
2537 		return ETIMEDOUT;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static int
2544 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2545 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2546 {
2547 	int shift_param;
2548 	int i, ret = 0, sec_num = 0x1;
2549 	uint32_t val, last_read_idx = 0;
2550 
2551 	if (cpu == 1) {
2552 		shift_param = 0;
2553 		*first_ucode_section = 0;
2554 	} else {
2555 		shift_param = 16;
2556 		(*first_ucode_section)++;
2557 	}
2558 
2559 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2560 		last_read_idx = i;
2561 
2562 		/*
2563 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2564 		 * CPU1 to CPU2.
2565 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2566 		 * CPU2 non paged to CPU2 paging sec.
2567 		 */
2568 		if (!image->sec[i].data ||
2569 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2570 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2571 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2572 				    "Break since Data not valid or Empty section, sec = %d\n",
2573 				    i);
2574 			break;
2575 		}
2576 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2577 		if (ret)
2578 			return ret;
2579 
2580 		/* Notify the ucode of the loaded section number and status */
2581 		if (iwm_nic_lock(sc)) {
2582 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2583 			val = val | (sec_num << shift_param);
2584 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2585 			sec_num = (sec_num << 1) | 0x1;
2586 			iwm_nic_unlock(sc);
2587 		}
2588 	}
2589 
2590 	*first_ucode_section = last_read_idx;
2591 
2592 	iwm_enable_interrupts(sc);
2593 
2594 	if (iwm_nic_lock(sc)) {
2595 		if (cpu == 1)
2596 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2597 		else
2598 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2599 		iwm_nic_unlock(sc);
2600 	}
2601 
2602 	return 0;
2603 }
2604 
2605 static int
2606 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2607 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2608 {
2609 	int shift_param;
2610 	int i, ret = 0;
2611 	uint32_t last_read_idx = 0;
2612 
2613 	if (cpu == 1) {
2614 		shift_param = 0;
2615 		*first_ucode_section = 0;
2616 	} else {
2617 		shift_param = 16;
2618 		(*first_ucode_section)++;
2619 	}
2620 
2621 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2622 		last_read_idx = i;
2623 
2624 		/*
2625 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2626 		 * CPU1 to CPU2.
2627 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2628 		 * CPU2 non paged to CPU2 paging sec.
2629 		 */
2630 		if (!image->sec[i].data ||
2631 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2632 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2633 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2634 				    "Break since Data not valid or Empty section, sec = %d\n",
2635 				     i);
2636 			break;
2637 		}
2638 
2639 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2640 		if (ret)
2641 			return ret;
2642 	}
2643 
2644 	*first_ucode_section = last_read_idx;
2645 
2646 	return 0;
2647 
2648 }
2649 
2650 static int
2651 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2652 {
2653 	int ret = 0;
2654 	int first_ucode_section;
2655 
2656 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2657 		     image->is_dual_cpus ? "Dual" : "Single");
2658 
2659 	/* load to FW the binary non secured sections of CPU1 */
2660 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2661 	if (ret)
2662 		return ret;
2663 
2664 	if (image->is_dual_cpus) {
2665 		/* set CPU2 header address */
2666 		if (iwm_nic_lock(sc)) {
2667 			iwm_write_prph(sc,
2668 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2669 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2670 			iwm_nic_unlock(sc);
2671 		}
2672 
2673 		/* load to FW the binary sections of CPU2 */
2674 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2675 						 &first_ucode_section);
2676 		if (ret)
2677 			return ret;
2678 	}
2679 
2680 	iwm_enable_interrupts(sc);
2681 
2682 	/* release CPU reset */
2683 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2684 
2685 	return 0;
2686 }
2687 
2688 int
2689 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2690 	const struct iwm_fw_img *image)
2691 {
2692 	int ret = 0;
2693 	int first_ucode_section;
2694 
2695 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2696 		    image->is_dual_cpus ? "Dual" : "Single");
2697 
2698 	/* configure the ucode to be ready to get the secured image */
2699 	/* release CPU reset */
2700 	if (iwm_nic_lock(sc)) {
2701 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2702 		    IWM_RELEASE_CPU_RESET_BIT);
2703 		iwm_nic_unlock(sc);
2704 	}
2705 
2706 	/* load to FW the binary Secured sections of CPU1 */
2707 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2708 	    &first_ucode_section);
2709 	if (ret)
2710 		return ret;
2711 
2712 	/* load to FW the binary sections of CPU2 */
2713 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2714 	    &first_ucode_section);
2715 }
2716 
2717 /* XXX Get rid of this definition */
2718 static inline void
2719 iwm_enable_fw_load_int(struct iwm_softc *sc)
2720 {
2721 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2722 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2723 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2724 }
2725 
2726 /* XXX Add proper rfkill support code */
2727 static int
2728 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2729 {
2730 	int ret;
2731 
2732 	/* This may fail if AMT took ownership of the device */
2733 	if (iwm_prepare_card_hw(sc)) {
2734 		device_printf(sc->sc_dev,
2735 		    "%s: Exit HW not ready\n", __func__);
2736 		ret = EIO;
2737 		goto out;
2738 	}
2739 
2740 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2741 
2742 	iwm_disable_interrupts(sc);
2743 
2744 	/* make sure rfkill handshake bits are cleared */
2745 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2746 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2747 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2748 
2749 	/* clear (again), then enable host interrupts */
2750 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2751 
2752 	ret = iwm_nic_init(sc);
2753 	if (ret) {
2754 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2755 		goto out;
2756 	}
2757 
2758 	/*
2759 	 * Now, we load the firmware and don't want to be interrupted, even
2760 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2761 	 * FH_TX interrupt which is needed to load the firmware). If the
2762 	 * RF-Kill switch is toggled, we will find out after having loaded
2763 	 * the firmware and return the proper value to the caller.
2764 	 */
2765 	iwm_enable_fw_load_int(sc);
2766 
2767 	/* really make sure rfkill handshake bits are cleared */
2768 	/* maybe we should write a few times more?  just to make sure */
2769 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2770 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2771 
2772 	/* Load the given image to the HW */
2773 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2774 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2775 	else
2776 		ret = iwm_pcie_load_given_ucode(sc, fw);
2777 
2778 	/* XXX re-check RF-Kill state */
2779 
2780 out:
2781 	return ret;
2782 }
2783 
2784 static int
2785 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2786 {
2787 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2788 		.valid = htole32(valid_tx_ant),
2789 	};
2790 
2791 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2792 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2793 }
2794 
2795 /* iwlwifi: mvm/fw.c */
2796 static int
2797 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2798 {
2799 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2800 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2801 
2802 	/* Set parameters */
2803 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2804 	phy_cfg_cmd.calib_control.event_trigger =
2805 	    sc->sc_default_calib[ucode_type].event_trigger;
2806 	phy_cfg_cmd.calib_control.flow_trigger =
2807 	    sc->sc_default_calib[ucode_type].flow_trigger;
2808 
2809 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2810 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2811 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2812 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2813 }
2814 
2815 static int
2816 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2817 {
2818 	struct iwm_alive_data *alive_data = data;
2819 	struct iwm_alive_resp_v3 *palive3;
2820 	struct iwm_alive_resp *palive;
2821 	struct iwm_umac_alive *umac;
2822 	struct iwm_lmac_alive *lmac1;
2823 	struct iwm_lmac_alive *lmac2 = NULL;
2824 	uint16_t status;
2825 
2826 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2827 		palive = (void *)pkt->data;
2828 		umac = &palive->umac_data;
2829 		lmac1 = &palive->lmac_data[0];
2830 		lmac2 = &palive->lmac_data[1];
2831 		status = le16toh(palive->status);
2832 	} else {
2833 		palive3 = (void *)pkt->data;
2834 		umac = &palive3->umac_data;
2835 		lmac1 = &palive3->lmac_data;
2836 		status = le16toh(palive3->status);
2837 	}
2838 
2839 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2840 	if (lmac2)
2841 		sc->error_event_table[1] =
2842 			le32toh(lmac2->error_event_table_ptr);
2843 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2844 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2845 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2846 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2847 	if (sc->umac_error_event_table)
2848 		sc->support_umac_log = TRUE;
2849 
2850 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2851 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2852 		    status, lmac1->ver_type, lmac1->ver_subtype);
2853 
2854 	if (lmac2)
2855 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2856 
2857 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2858 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2859 		    le32toh(umac->umac_major),
2860 		    le32toh(umac->umac_minor));
2861 
2862 	return TRUE;
2863 }
2864 
2865 static int
2866 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2867 	struct iwm_rx_packet *pkt, void *data)
2868 {
2869 	struct iwm_phy_db *phy_db = data;
2870 
2871 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2872 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2873 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2874 			    __func__, pkt->hdr.code);
2875 		}
2876 		return TRUE;
2877 	}
2878 
2879 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2880 		device_printf(sc->sc_dev,
2881 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2882 	}
2883 
2884 	return FALSE;
2885 }
2886 
2887 static int
2888 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2889 	enum iwm_ucode_type ucode_type)
2890 {
2891 	struct iwm_notification_wait alive_wait;
2892 	struct iwm_alive_data alive_data;
2893 	const struct iwm_fw_img *fw;
2894 	enum iwm_ucode_type old_type = sc->cur_ucode;
2895 	int error;
2896 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2897 
2898 	fw = &sc->sc_fw.img[ucode_type];
2899 	sc->cur_ucode = ucode_type;
2900 	sc->ucode_loaded = FALSE;
2901 
2902 	memset(&alive_data, 0, sizeof(alive_data));
2903 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2904 				   alive_cmd, nitems(alive_cmd),
2905 				   iwm_alive_fn, &alive_data);
2906 
2907 	error = iwm_start_fw(sc, fw);
2908 	if (error) {
2909 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2910 		sc->cur_ucode = old_type;
2911 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2912 		return error;
2913 	}
2914 
2915 	/*
2916 	 * Some things may run in the background now, but we
2917 	 * just wait for the ALIVE notification here.
2918 	 */
2919 	IWM_UNLOCK(sc);
2920 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2921 				      IWM_UCODE_ALIVE_TIMEOUT);
2922 	IWM_LOCK(sc);
2923 	if (error) {
2924 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2925 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2926 			if (iwm_nic_lock(sc)) {
2927 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2928 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2929 				iwm_nic_unlock(sc);
2930 			}
2931 			device_printf(sc->sc_dev,
2932 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2933 			    a, b);
2934 		}
2935 		sc->cur_ucode = old_type;
2936 		return error;
2937 	}
2938 
2939 	if (!alive_data.valid) {
2940 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2941 		    __func__);
2942 		sc->cur_ucode = old_type;
2943 		return EIO;
2944 	}
2945 
2946 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2947 
2948 	/*
2949 	 * configure and operate fw paging mechanism.
2950 	 * driver configures the paging flow only once, CPU2 paging image
2951 	 * included in the IWM_UCODE_INIT image.
2952 	 */
2953 	if (fw->paging_mem_size) {
2954 		error = iwm_save_fw_paging(sc, fw);
2955 		if (error) {
2956 			device_printf(sc->sc_dev,
2957 			    "%s: failed to save the FW paging image\n",
2958 			    __func__);
2959 			return error;
2960 		}
2961 
2962 		error = iwm_send_paging_cmd(sc, fw);
2963 		if (error) {
2964 			device_printf(sc->sc_dev,
2965 			    "%s: failed to send the paging cmd\n", __func__);
2966 			iwm_free_fw_paging(sc);
2967 			return error;
2968 		}
2969 	}
2970 
2971 	if (!error)
2972 		sc->ucode_loaded = TRUE;
2973 	return error;
2974 }
2975 
2976 /*
2977  * mvm misc bits
2978  */
2979 
2980 /*
2981  * follows iwlwifi/fw.c
2982  */
2983 static int
2984 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2985 {
2986 	struct iwm_notification_wait calib_wait;
2987 	static const uint16_t init_complete[] = {
2988 		IWM_INIT_COMPLETE_NOTIF,
2989 		IWM_CALIB_RES_NOTIF_PHY_DB
2990 	};
2991 	int ret;
2992 
2993 	/* do not operate with rfkill switch turned on */
2994 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2995 		device_printf(sc->sc_dev,
2996 		    "radio is disabled by hardware switch\n");
2997 		return EPERM;
2998 	}
2999 
3000 	iwm_init_notification_wait(sc->sc_notif_wait,
3001 				   &calib_wait,
3002 				   init_complete,
3003 				   nitems(init_complete),
3004 				   iwm_wait_phy_db_entry,
3005 				   sc->sc_phy_db);
3006 
3007 	/* Will also start the device */
3008 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3009 	if (ret) {
3010 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3011 		    ret);
3012 		goto error;
3013 	}
3014 
3015 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3016 		ret = iwm_send_bt_init_conf(sc);
3017 		if (ret) {
3018 			device_printf(sc->sc_dev,
3019 			    "failed to send bt coex configuration: %d\n", ret);
3020 			goto error;
3021 		}
3022 	}
3023 
3024 	if (justnvm) {
3025 		/* Read nvm */
3026 		ret = iwm_nvm_init(sc);
3027 		if (ret) {
3028 			device_printf(sc->sc_dev, "failed to read nvm\n");
3029 			goto error;
3030 		}
3031 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3032 		goto error;
3033 	}
3034 
3035 	/* Send TX valid antennas before triggering calibrations */
3036 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3037 	if (ret) {
3038 		device_printf(sc->sc_dev,
3039 		    "failed to send antennas before calibration: %d\n", ret);
3040 		goto error;
3041 	}
3042 
3043 	/*
3044 	 * Send phy configurations command to init uCode
3045 	 * to start the 16.0 uCode init image internal calibrations.
3046 	 */
3047 	ret = iwm_send_phy_cfg_cmd(sc);
3048 	if (ret) {
3049 		device_printf(sc->sc_dev,
3050 		    "%s: Failed to run INIT calibrations: %d\n",
3051 		    __func__, ret);
3052 		goto error;
3053 	}
3054 
3055 	/*
3056 	 * Nothing to do but wait for the init complete notification
3057 	 * from the firmware.
3058 	 */
3059 	IWM_UNLOCK(sc);
3060 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3061 	    IWM_UCODE_CALIB_TIMEOUT);
3062 	IWM_LOCK(sc);
3063 
3064 
3065 	goto out;
3066 
3067 error:
3068 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3069 out:
3070 	return ret;
3071 }
3072 
3073 static int
3074 iwm_config_ltr(struct iwm_softc *sc)
3075 {
3076 	struct iwm_ltr_config_cmd cmd = {
3077 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3078 	};
3079 
3080 	if (!sc->sc_ltr_enabled)
3081 		return 0;
3082 
3083 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3084 }
3085 
3086 /*
3087  * receive side
3088  */
3089 
3090 /* (re)stock rx ring, called at init-time and at runtime */
3091 static int
3092 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3093 {
3094 	struct iwm_rx_ring *ring = &sc->rxq;
3095 	struct iwm_rx_data *data = &ring->data[idx];
3096 	struct mbuf *m;
3097 	bus_dmamap_t dmamap;
3098 	bus_dma_segment_t seg;
3099 	int nsegs, error;
3100 
3101 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3102 	if (m == NULL)
3103 		return ENOBUFS;
3104 
3105 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3106 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3107 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3108 	if (error != 0) {
3109 		device_printf(sc->sc_dev,
3110 		    "%s: can't map mbuf, error %d\n", __func__, error);
3111 		m_freem(m);
3112 		return error;
3113 	}
3114 
3115 	if (data->m != NULL)
3116 		bus_dmamap_unload(ring->data_dmat, data->map);
3117 
3118 	/* Swap ring->spare_map with data->map */
3119 	dmamap = data->map;
3120 	data->map = ring->spare_map;
3121 	ring->spare_map = dmamap;
3122 
3123 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3124 	data->m = m;
3125 
3126 	/* Update RX descriptor. */
3127 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3128 	if (sc->cfg->mqrx_supported)
3129 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3130 	else
3131 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3132 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3133 	    BUS_DMASYNC_PREWRITE);
3134 
3135 	return 0;
3136 }
3137 
3138 static void
3139 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3140 {
3141 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3142 
3143 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3144 
3145 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3146 }
3147 
3148 /*
3149  * Retrieve the average noise (in dBm) among receivers.
3150  */
3151 static int
3152 iwm_get_noise(struct iwm_softc *sc,
3153     const struct iwm_statistics_rx_non_phy *stats)
3154 {
3155 	int i, total, nbant, noise;
3156 
3157 	total = nbant = noise = 0;
3158 	for (i = 0; i < 3; i++) {
3159 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3160 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3161 		    __func__,
3162 		    i,
3163 		    noise);
3164 
3165 		if (noise) {
3166 			total += noise;
3167 			nbant++;
3168 		}
3169 	}
3170 
3171 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3172 	    __func__, nbant, total);
3173 #if 0
3174 	/* There should be at least one antenna but check anyway. */
3175 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3176 #else
3177 	/* For now, just hard-code it to -96 to be safe */
3178 	return (-96);
3179 #endif
3180 }
3181 
3182 static void
3183 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3184 {
3185 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3186 
3187 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3188 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3189 }
3190 
3191 /* iwlwifi: mvm/rx.c */
3192 /*
3193  * iwm_get_signal_strength - use new rx PHY INFO API
3194  * values are reported by the fw as positive values - need to negate
3195  * to obtain their dBM.  Account for missing antennas by replacing 0
3196  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3197  */
3198 static int
3199 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3200     struct iwm_rx_phy_info *phy_info)
3201 {
3202 	int energy_a, energy_b, energy_c, max_energy;
3203 	uint32_t val;
3204 
3205 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3206 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3207 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3208 	energy_a = energy_a ? -energy_a : -256;
3209 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3210 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3211 	energy_b = energy_b ? -energy_b : -256;
3212 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3213 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3214 	energy_c = energy_c ? -energy_c : -256;
3215 	max_energy = MAX(energy_a, energy_b);
3216 	max_energy = MAX(max_energy, energy_c);
3217 
3218 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3219 	    "energy In A %d B %d C %d , and max %d\n",
3220 	    energy_a, energy_b, energy_c, max_energy);
3221 
3222 	return max_energy;
3223 }
3224 
3225 static int
3226 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3227     struct iwm_rx_mpdu_desc *desc)
3228 {
3229 	int energy_a, energy_b;
3230 
3231 	energy_a = desc->v1.energy_a;
3232 	energy_b = desc->v1.energy_b;
3233 	energy_a = energy_a ? -energy_a : -256;
3234 	energy_b = energy_b ? -energy_b : -256;
3235 	return MAX(energy_a, energy_b);
3236 }
3237 
3238 /*
3239  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3240  *
3241  * Handles the actual data of the Rx packet from the fw
3242  */
3243 static bool
3244 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3245     bool stolen)
3246 {
3247 	struct ieee80211com *ic = &sc->sc_ic;
3248 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3249 	struct ieee80211_frame *wh;
3250 	struct ieee80211_rx_stats rxs;
3251 	struct iwm_rx_phy_info *phy_info;
3252 	struct iwm_rx_mpdu_res_start *rx_res;
3253 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3254 	uint32_t len;
3255 	uint32_t rx_pkt_status;
3256 	int rssi;
3257 
3258 	phy_info = &sc->sc_last_phy_info;
3259 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3260 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3261 	len = le16toh(rx_res->byte_count);
3262 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3263 
3264 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3265 		device_printf(sc->sc_dev,
3266 		    "dsp size out of range [0,20]: %d\n",
3267 		    phy_info->cfg_phy_cnt);
3268 		return false;
3269 	}
3270 
3271 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3272 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3273 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3274 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3275 		return false;
3276 	}
3277 
3278 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3279 
3280 	/* Map it to relative value */
3281 	rssi = rssi - sc->sc_noise;
3282 
3283 	/* replenish ring for the buffer we're going to feed to the sharks */
3284 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3285 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3286 		    __func__);
3287 		return false;
3288 	}
3289 
3290 	m->m_data = pkt->data + sizeof(*rx_res);
3291 	m->m_pkthdr.len = m->m_len = len;
3292 
3293 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3294 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3295 
3296 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3297 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3298 	    __func__,
3299 	    le16toh(phy_info->channel),
3300 	    le16toh(phy_info->phy_flags));
3301 
3302 	/*
3303 	 * Populate an RX state struct with the provided information.
3304 	 */
3305 	bzero(&rxs, sizeof(rxs));
3306 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3307 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3308 	rxs.c_ieee = le16toh(phy_info->channel);
3309 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3310 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3311 	} else {
3312 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3313 	}
3314 
3315 	/* rssi is in 1/2db units */
3316 	rxs.c_rssi = rssi * 2;
3317 	rxs.c_nf = sc->sc_noise;
3318 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3319 		return false;
3320 
3321 	if (ieee80211_radiotap_active_vap(vap)) {
3322 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3323 
3324 		tap->wr_flags = 0;
3325 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3326 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3327 		tap->wr_chan_freq = htole16(rxs.c_freq);
3328 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3329 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3330 		tap->wr_dbm_antsignal = (int8_t)rssi;
3331 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3332 		tap->wr_tsft = phy_info->system_timestamp;
3333 		switch (phy_info->rate) {
3334 		/* CCK rates. */
3335 		case  10: tap->wr_rate =   2; break;
3336 		case  20: tap->wr_rate =   4; break;
3337 		case  55: tap->wr_rate =  11; break;
3338 		case 110: tap->wr_rate =  22; break;
3339 		/* OFDM rates. */
3340 		case 0xd: tap->wr_rate =  12; break;
3341 		case 0xf: tap->wr_rate =  18; break;
3342 		case 0x5: tap->wr_rate =  24; break;
3343 		case 0x7: tap->wr_rate =  36; break;
3344 		case 0x9: tap->wr_rate =  48; break;
3345 		case 0xb: tap->wr_rate =  72; break;
3346 		case 0x1: tap->wr_rate =  96; break;
3347 		case 0x3: tap->wr_rate = 108; break;
3348 		/* Unknown rate: should not happen. */
3349 		default:  tap->wr_rate =   0;
3350 		}
3351 	}
3352 
3353 	return true;
3354 }
3355 
3356 static bool
3357 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3358     bool stolen)
3359 {
3360 	struct ieee80211com *ic = &sc->sc_ic;
3361 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3362 	struct ieee80211_frame *wh;
3363 	struct ieee80211_rx_stats rxs;
3364 	struct iwm_rx_mpdu_desc *desc;
3365 	struct iwm_rx_packet *pkt;
3366 	int rssi;
3367 	uint32_t hdrlen, len, rate_n_flags;
3368 	uint16_t phy_info;
3369 	uint8_t channel;
3370 
3371 	pkt = mtodo(m, offset);
3372 	desc = (void *)pkt->data;
3373 
3374 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3375 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3376 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3377 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3378 		return false;
3379 	}
3380 
3381 	channel = desc->v1.channel;
3382 	len = le16toh(desc->mpdu_len);
3383 	phy_info = le16toh(desc->phy_info);
3384 	rate_n_flags = desc->v1.rate_n_flags;
3385 
3386 	wh = mtodo(m, sizeof(*desc));
3387 	m->m_data = pkt->data + sizeof(*desc);
3388 	m->m_pkthdr.len = m->m_len = len;
3389 	m->m_len = len;
3390 
3391 	/* Account for padding following the frame header. */
3392 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3393 		hdrlen = ieee80211_anyhdrsize(wh);
3394 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3395 		m->m_data = mtodo(m, 2);
3396 		wh = mtod(m, struct ieee80211_frame *);
3397 	}
3398 
3399 	/* Map it to relative value */
3400 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3401 	rssi = rssi - sc->sc_noise;
3402 
3403 	/* replenish ring for the buffer we're going to feed to the sharks */
3404 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3405 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3406 		    __func__);
3407 		return false;
3408 	}
3409 
3410 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3411 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3412 
3413 	/*
3414 	 * Populate an RX state struct with the provided information.
3415 	 */
3416 	bzero(&rxs, sizeof(rxs));
3417 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3418 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3419 	rxs.c_ieee = channel;
3420 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3421 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3422 
3423 	/* rssi is in 1/2db units */
3424 	rxs.c_rssi = rssi * 2;
3425 	rxs.c_nf = sc->sc_noise;
3426 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3427 		return false;
3428 
3429 	if (ieee80211_radiotap_active_vap(vap)) {
3430 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3431 
3432 		tap->wr_flags = 0;
3433 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3434 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3435 		tap->wr_chan_freq = htole16(rxs.c_freq);
3436 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3437 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3438 		tap->wr_dbm_antsignal = (int8_t)rssi;
3439 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3440 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3441 		switch ((rate_n_flags & 0xff)) {
3442 		/* CCK rates. */
3443 		case  10: tap->wr_rate =   2; break;
3444 		case  20: tap->wr_rate =   4; break;
3445 		case  55: tap->wr_rate =  11; break;
3446 		case 110: tap->wr_rate =  22; break;
3447 		/* OFDM rates. */
3448 		case 0xd: tap->wr_rate =  12; break;
3449 		case 0xf: tap->wr_rate =  18; break;
3450 		case 0x5: tap->wr_rate =  24; break;
3451 		case 0x7: tap->wr_rate =  36; break;
3452 		case 0x9: tap->wr_rate =  48; break;
3453 		case 0xb: tap->wr_rate =  72; break;
3454 		case 0x1: tap->wr_rate =  96; break;
3455 		case 0x3: tap->wr_rate = 108; break;
3456 		/* Unknown rate: should not happen. */
3457 		default:  tap->wr_rate =   0;
3458 		}
3459 	}
3460 
3461 	return true;
3462 }
3463 
3464 static bool
3465 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3466     bool stolen)
3467 {
3468 	struct ieee80211com *ic;
3469 	struct ieee80211_frame *wh;
3470 	struct ieee80211_node *ni;
3471 	bool ret;
3472 
3473 	ic = &sc->sc_ic;
3474 
3475 	ret = sc->cfg->mqrx_supported ?
3476 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3477 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3478 	if (!ret) {
3479 		counter_u64_add(ic->ic_ierrors, 1);
3480 		return (ret);
3481 	}
3482 
3483 	wh = mtod(m, struct ieee80211_frame *);
3484 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3485 
3486 	IWM_UNLOCK(sc);
3487 	if (ni != NULL) {
3488 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3489 		ieee80211_input_mimo(ni, m);
3490 		ieee80211_free_node(ni);
3491 	} else {
3492 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3493 		ieee80211_input_mimo_all(ic, m);
3494 	}
3495 	IWM_LOCK(sc);
3496 
3497 	return true;
3498 }
3499 
3500 static int
3501 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3502 	struct iwm_node *in)
3503 {
3504 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3505 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3506 	struct ieee80211_node *ni = &in->in_ni;
3507 	struct ieee80211vap *vap = ni->ni_vap;
3508 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3509 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3510 	boolean_t rate_matched;
3511 	uint8_t tx_resp_rate;
3512 
3513 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3514 
3515 	/* Update rate control statistics. */
3516 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3517 	    __func__,
3518 	    (int) le16toh(tx_resp->status.status),
3519 	    (int) le16toh(tx_resp->status.sequence),
3520 	    tx_resp->frame_count,
3521 	    tx_resp->bt_kill_count,
3522 	    tx_resp->failure_rts,
3523 	    tx_resp->failure_frame,
3524 	    le32toh(tx_resp->initial_rate),
3525 	    (int) le16toh(tx_resp->wireless_media_time));
3526 
3527 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3528 
3529 	/* For rate control, ignore frames sent at different initial rate */
3530 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3531 
3532 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3533 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3534 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3535 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3536 	}
3537 
3538 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3539 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3540 	txs->short_retries = tx_resp->failure_rts;
3541 	txs->long_retries = tx_resp->failure_frame;
3542 	if (status != IWM_TX_STATUS_SUCCESS &&
3543 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3544 		switch (status) {
3545 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3546 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3547 			break;
3548 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3549 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3550 			break;
3551 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3552 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3553 			break;
3554 		default:
3555 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3556 			break;
3557 		}
3558 	} else {
3559 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3560 	}
3561 
3562 	if (rate_matched) {
3563 		ieee80211_ratectl_tx_complete(ni, txs);
3564 
3565 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3566 		new_rate = vap->iv_bss->ni_txrate;
3567 		if (new_rate != 0 && new_rate != cur_rate) {
3568 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3569 			iwm_setrates(sc, in, rix);
3570 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3571 		}
3572  	}
3573 
3574 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3575 }
3576 
3577 static void
3578 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3579 {
3580 	struct iwm_cmd_header *cmd_hdr;
3581 	struct iwm_tx_ring *ring;
3582 	struct iwm_tx_data *txd;
3583 	struct iwm_node *in;
3584 	struct mbuf *m;
3585 	int idx, qid, qmsk, status;
3586 
3587 	cmd_hdr = &pkt->hdr;
3588 	idx = cmd_hdr->idx;
3589 	qid = cmd_hdr->qid;
3590 
3591 	ring = &sc->txq[qid];
3592 	txd = &ring->data[idx];
3593 	in = txd->in;
3594 	m = txd->m;
3595 
3596 	KASSERT(txd->done == 0, ("txd not done"));
3597 	KASSERT(txd->in != NULL, ("txd without node"));
3598 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3599 
3600 	sc->sc_tx_timer = 0;
3601 
3602 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3603 
3604 	/* Unmap and free mbuf. */
3605 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3606 	bus_dmamap_unload(ring->data_dmat, txd->map);
3607 
3608 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3609 	    "free txd %p, in %p\n", txd, txd->in);
3610 	txd->done = 1;
3611 	txd->m = NULL;
3612 	txd->in = NULL;
3613 
3614 	ieee80211_tx_complete(&in->in_ni, m, status);
3615 
3616 	qmsk = 1 << qid;
3617 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3618 		sc->qfullmsk &= ~qmsk;
3619 		if (sc->qfullmsk == 0)
3620 			iwm_start(sc);
3621 	}
3622 }
3623 
3624 /*
3625  * transmit side
3626  */
3627 
3628 /*
3629  * Process a "command done" firmware notification.  This is where we wakeup
3630  * processes waiting for a synchronous command completion.
3631  * from if_iwn
3632  */
3633 static void
3634 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3635 {
3636 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3637 	struct iwm_tx_data *data;
3638 
3639 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3640 		return;	/* Not a command ack. */
3641 	}
3642 
3643 	/* XXX wide commands? */
3644 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3645 	    "cmd notification type 0x%x qid %d idx %d\n",
3646 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3647 
3648 	data = &ring->data[pkt->hdr.idx];
3649 
3650 	/* If the command was mapped in an mbuf, free it. */
3651 	if (data->m != NULL) {
3652 		bus_dmamap_sync(ring->data_dmat, data->map,
3653 		    BUS_DMASYNC_POSTWRITE);
3654 		bus_dmamap_unload(ring->data_dmat, data->map);
3655 		m_freem(data->m);
3656 		data->m = NULL;
3657 	}
3658 	wakeup(&ring->desc[pkt->hdr.idx]);
3659 
3660 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3661 		device_printf(sc->sc_dev,
3662 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3663 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3664 		/* XXX call iwm_force_nmi() */
3665 	}
3666 
3667 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3668 	ring->queued--;
3669 	if (ring->queued == 0)
3670 		iwm_pcie_clear_cmd_in_flight(sc);
3671 }
3672 
3673 #if 0
3674 /*
3675  * necessary only for block ack mode
3676  */
3677 void
3678 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3679 	uint16_t len)
3680 {
3681 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3682 	uint16_t w_val;
3683 
3684 	scd_bc_tbl = sc->sched_dma.vaddr;
3685 
3686 	len += 8; /* magic numbers came naturally from paris */
3687 	len = roundup(len, 4) / 4;
3688 
3689 	w_val = htole16(sta_id << 12 | len);
3690 
3691 	/* Update TX scheduler. */
3692 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3693 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3694 	    BUS_DMASYNC_PREWRITE);
3695 
3696 	/* I really wonder what this is ?!? */
3697 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3698 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3699 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3700 		    BUS_DMASYNC_PREWRITE);
3701 	}
3702 }
3703 #endif
3704 
3705 static int
3706 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3707 {
3708 	int i;
3709 
3710 	for (i = 0; i < nitems(iwm_rates); i++) {
3711 		if (iwm_rates[i].rate == rate)
3712 			return (i);
3713 	}
3714 	/* XXX error? */
3715 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3716 	    "%s: couldn't find an entry for rate=%d\n",
3717 	    __func__,
3718 	    rate);
3719 	return (0);
3720 }
3721 
3722 /*
3723  * Fill in the rate related information for a transmit command.
3724  */
3725 static const struct iwm_rate *
3726 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3727 	struct mbuf *m, struct iwm_tx_cmd *tx)
3728 {
3729 	struct ieee80211_node *ni = &in->in_ni;
3730 	struct ieee80211_frame *wh;
3731 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3732 	const struct iwm_rate *rinfo;
3733 	int type;
3734 	int ridx, rate_flags;
3735 
3736 	wh = mtod(m, struct ieee80211_frame *);
3737 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3738 
3739 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3740 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3741 
3742 	if (type == IEEE80211_FC0_TYPE_MGT ||
3743 	    type == IEEE80211_FC0_TYPE_CTL ||
3744 	    (m->m_flags & M_EAPOL) != 0) {
3745 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3746 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3747 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3748 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3749 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3750 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3751 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3752 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3753 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3754 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3755 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3756 	} else {
3757 		/* for data frames, use RS table */
3758 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3759 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3760 		if (ridx == -1)
3761 			ridx = 0;
3762 
3763 		/* This is the index into the programmed table */
3764 		tx->initial_rate_index = 0;
3765 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3766 	}
3767 
3768 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3769 	    "%s: frame type=%d txrate %d\n",
3770 	        __func__, type, iwm_rates[ridx].rate);
3771 
3772 	rinfo = &iwm_rates[ridx];
3773 
3774 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3775 	    __func__, ridx,
3776 	    rinfo->rate,
3777 	    !! (IWM_RIDX_IS_CCK(ridx))
3778 	    );
3779 
3780 	/* XXX TODO: hard-coded TX antenna? */
3781 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3782 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3783 	else
3784 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3785 	if (IWM_RIDX_IS_CCK(ridx))
3786 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3787 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3788 
3789 	return rinfo;
3790 }
3791 
3792 #define TB0_SIZE 16
3793 static int
3794 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3795 {
3796 	struct ieee80211com *ic = &sc->sc_ic;
3797 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3798 	struct iwm_node *in = IWM_NODE(ni);
3799 	struct iwm_tx_ring *ring;
3800 	struct iwm_tx_data *data;
3801 	struct iwm_tfd *desc;
3802 	struct iwm_device_cmd *cmd;
3803 	struct iwm_tx_cmd *tx;
3804 	struct ieee80211_frame *wh;
3805 	struct ieee80211_key *k = NULL;
3806 	struct mbuf *m1;
3807 	const struct iwm_rate *rinfo;
3808 	uint32_t flags;
3809 	u_int hdrlen;
3810 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3811 	int nsegs;
3812 	uint8_t tid, type;
3813 	int i, totlen, error, pad;
3814 
3815 	wh = mtod(m, struct ieee80211_frame *);
3816 	hdrlen = ieee80211_anyhdrsize(wh);
3817 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3818 	tid = 0;
3819 	ring = &sc->txq[ac];
3820 	desc = &ring->desc[ring->cur];
3821 	data = &ring->data[ring->cur];
3822 
3823 	/* Fill out iwm_tx_cmd to send to the firmware */
3824 	cmd = &ring->cmd[ring->cur];
3825 	cmd->hdr.code = IWM_TX_CMD;
3826 	cmd->hdr.flags = 0;
3827 	cmd->hdr.qid = ring->qid;
3828 	cmd->hdr.idx = ring->cur;
3829 
3830 	tx = (void *)cmd->data;
3831 	memset(tx, 0, sizeof(*tx));
3832 
3833 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3834 
3835 	/* Encrypt the frame if need be. */
3836 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3837 		/* Retrieve key for TX && do software encryption. */
3838 		k = ieee80211_crypto_encap(ni, m);
3839 		if (k == NULL) {
3840 			m_freem(m);
3841 			return (ENOBUFS);
3842 		}
3843 		/* 802.11 header may have moved. */
3844 		wh = mtod(m, struct ieee80211_frame *);
3845 	}
3846 
3847 	if (ieee80211_radiotap_active_vap(vap)) {
3848 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3849 
3850 		tap->wt_flags = 0;
3851 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3852 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3853 		tap->wt_rate = rinfo->rate;
3854 		if (k != NULL)
3855 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3856 		ieee80211_radiotap_tx(vap, m);
3857 	}
3858 
3859 	flags = 0;
3860 	totlen = m->m_pkthdr.len;
3861 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3862 		flags |= IWM_TX_CMD_FLG_ACK;
3863 	}
3864 
3865 	if (type == IEEE80211_FC0_TYPE_DATA &&
3866 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3867 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3868 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3869 	}
3870 
3871 	tx->sta_id = IWM_STATION_ID;
3872 
3873 	if (type == IEEE80211_FC0_TYPE_MGT) {
3874 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3875 
3876 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3877 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3878 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3879 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3880 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3881 		} else {
3882 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3883 		}
3884 	} else {
3885 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3886 	}
3887 
3888 	if (hdrlen & 3) {
3889 		/* First segment length must be a multiple of 4. */
3890 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3891 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
3892 		pad = 4 - (hdrlen & 3);
3893 	} else {
3894 		tx->offload_assist = 0;
3895 		pad = 0;
3896 	}
3897 
3898 	tx->len = htole16(totlen);
3899 	tx->tid_tspec = tid;
3900 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3901 
3902 	/* Set physical address of "scratch area". */
3903 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3904 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3905 
3906 	/* Copy 802.11 header in TX command. */
3907 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3908 
3909 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3910 
3911 	tx->sec_ctl = 0;
3912 	tx->tx_flags |= htole32(flags);
3913 
3914 	/* Trim 802.11 header. */
3915 	m_adj(m, hdrlen);
3916 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3917 	    segs, &nsegs, BUS_DMA_NOWAIT);
3918 	if (error != 0) {
3919 		if (error != EFBIG) {
3920 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3921 			    error);
3922 			m_freem(m);
3923 			return error;
3924 		}
3925 		/* Too many DMA segments, linearize mbuf. */
3926 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3927 		if (m1 == NULL) {
3928 			device_printf(sc->sc_dev,
3929 			    "%s: could not defrag mbuf\n", __func__);
3930 			m_freem(m);
3931 			return (ENOBUFS);
3932 		}
3933 		m = m1;
3934 
3935 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3936 		    segs, &nsegs, BUS_DMA_NOWAIT);
3937 		if (error != 0) {
3938 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3939 			    error);
3940 			m_freem(m);
3941 			return error;
3942 		}
3943 	}
3944 	data->m = m;
3945 	data->in = in;
3946 	data->done = 0;
3947 
3948 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3949 	    "sending txd %p, in %p\n", data, data->in);
3950 	KASSERT(data->in != NULL, ("node is NULL"));
3951 
3952 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3953 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3954 	    ring->qid, ring->cur, totlen, nsegs,
3955 	    le32toh(tx->tx_flags),
3956 	    le32toh(tx->rate_n_flags),
3957 	    tx->initial_rate_index
3958 	    );
3959 
3960 	/* Fill TX descriptor. */
3961 	memset(desc, 0, sizeof(*desc));
3962 	desc->num_tbs = 2 + nsegs;
3963 
3964 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3965 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3966 	    (TB0_SIZE << 4));
3967 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3968 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3969 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3970 	    hdrlen + pad - TB0_SIZE) << 4));
3971 
3972 	/* Other DMA segments are for data payload. */
3973 	for (i = 0; i < nsegs; i++) {
3974 		seg = &segs[i];
3975 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3976 		desc->tbs[i + 2].hi_n_len =
3977 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3978 		    (seg->ds_len << 4);
3979 	}
3980 
3981 	bus_dmamap_sync(ring->data_dmat, data->map,
3982 	    BUS_DMASYNC_PREWRITE);
3983 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3984 	    BUS_DMASYNC_PREWRITE);
3985 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3986 	    BUS_DMASYNC_PREWRITE);
3987 
3988 #if 0
3989 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3990 #endif
3991 
3992 	/* Kick TX ring. */
3993 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3994 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3995 
3996 	/* Mark TX ring as full if we reach a certain threshold. */
3997 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3998 		sc->qfullmsk |= 1 << ring->qid;
3999 	}
4000 
4001 	return 0;
4002 }
4003 
4004 static int
4005 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4006     const struct ieee80211_bpf_params *params)
4007 {
4008 	struct ieee80211com *ic = ni->ni_ic;
4009 	struct iwm_softc *sc = ic->ic_softc;
4010 	int error = 0;
4011 
4012 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4013 	    "->%s begin\n", __func__);
4014 
4015 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4016 		m_freem(m);
4017 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4018 		    "<-%s not RUNNING\n", __func__);
4019 		return (ENETDOWN);
4020         }
4021 
4022 	IWM_LOCK(sc);
4023 	/* XXX fix this */
4024         if (params == NULL) {
4025 		error = iwm_tx(sc, m, ni, 0);
4026 	} else {
4027 		error = iwm_tx(sc, m, ni, 0);
4028 	}
4029 	if (sc->sc_tx_timer == 0)
4030 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4031 	sc->sc_tx_timer = 5;
4032 	IWM_UNLOCK(sc);
4033 
4034         return (error);
4035 }
4036 
4037 /*
4038  * mvm/tx.c
4039  */
4040 
4041 /*
4042  * Note that there are transports that buffer frames before they reach
4043  * the firmware. This means that after flush_tx_path is called, the
4044  * queue might not be empty. The race-free way to handle this is to:
4045  * 1) set the station as draining
4046  * 2) flush the Tx path
4047  * 3) wait for the transport queues to be empty
4048  */
4049 int
4050 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4051 {
4052 	int ret;
4053 	struct iwm_tx_path_flush_cmd flush_cmd = {
4054 		.queues_ctl = htole32(tfd_msk),
4055 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4056 	};
4057 
4058 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4059 	    sizeof(flush_cmd), &flush_cmd);
4060 	if (ret)
4061                 device_printf(sc->sc_dev,
4062 		    "Flushing tx queue failed: %d\n", ret);
4063 	return ret;
4064 }
4065 
4066 /*
4067  * BEGIN mvm/quota.c
4068  */
4069 
4070 static int
4071 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4072 {
4073 	struct iwm_time_quota_cmd cmd;
4074 	int i, idx, ret, num_active_macs, quota, quota_rem;
4075 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4076 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4077 	uint16_t id;
4078 
4079 	memset(&cmd, 0, sizeof(cmd));
4080 
4081 	/* currently, PHY ID == binding ID */
4082 	if (ivp) {
4083 		id = ivp->phy_ctxt->id;
4084 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4085 		colors[id] = ivp->phy_ctxt->color;
4086 
4087 		if (1)
4088 			n_ifs[id] = 1;
4089 	}
4090 
4091 	/*
4092 	 * The FW's scheduling session consists of
4093 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4094 	 * equally between all the bindings that require quota
4095 	 */
4096 	num_active_macs = 0;
4097 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4098 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4099 		num_active_macs += n_ifs[i];
4100 	}
4101 
4102 	quota = 0;
4103 	quota_rem = 0;
4104 	if (num_active_macs) {
4105 		quota = IWM_MAX_QUOTA / num_active_macs;
4106 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4107 	}
4108 
4109 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4110 		if (colors[i] < 0)
4111 			continue;
4112 
4113 		cmd.quotas[idx].id_and_color =
4114 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4115 
4116 		if (n_ifs[i] <= 0) {
4117 			cmd.quotas[idx].quota = htole32(0);
4118 			cmd.quotas[idx].max_duration = htole32(0);
4119 		} else {
4120 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4121 			cmd.quotas[idx].max_duration = htole32(0);
4122 		}
4123 		idx++;
4124 	}
4125 
4126 	/* Give the remainder of the session to the first binding */
4127 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4128 
4129 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4130 	    sizeof(cmd), &cmd);
4131 	if (ret)
4132 		device_printf(sc->sc_dev,
4133 		    "%s: Failed to send quota: %d\n", __func__, ret);
4134 	return ret;
4135 }
4136 
4137 /*
4138  * END mvm/quota.c
4139  */
4140 
4141 /*
4142  * ieee80211 routines
4143  */
4144 
4145 /*
4146  * Change to AUTH state in 80211 state machine.  Roughly matches what
4147  * Linux does in bss_info_changed().
4148  */
4149 static int
4150 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4151 {
4152 	struct ieee80211_node *ni;
4153 	struct iwm_node *in;
4154 	struct iwm_vap *iv = IWM_VAP(vap);
4155 	uint32_t duration;
4156 	int error;
4157 
4158 	/*
4159 	 * XXX i have a feeling that the vap node is being
4160 	 * freed from underneath us. Grr.
4161 	 */
4162 	ni = ieee80211_ref_node(vap->iv_bss);
4163 	in = IWM_NODE(ni);
4164 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4165 	    "%s: called; vap=%p, bss ni=%p\n",
4166 	    __func__,
4167 	    vap,
4168 	    ni);
4169 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4170 	    __func__, ether_sprintf(ni->ni_bssid));
4171 
4172 	in->in_assoc = 0;
4173 	iv->iv_auth = 1;
4174 
4175 	/*
4176 	 * Firmware bug - it'll crash if the beacon interval is less
4177 	 * than 16. We can't avoid connecting at all, so refuse the
4178 	 * station state change, this will cause net80211 to abandon
4179 	 * attempts to connect to this AP, and eventually wpa_s will
4180 	 * blacklist the AP...
4181 	 */
4182 	if (ni->ni_intval < 16) {
4183 		device_printf(sc->sc_dev,
4184 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4185 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4186 		error = EINVAL;
4187 		goto out;
4188 	}
4189 
4190 	error = iwm_allow_mcast(vap, sc);
4191 	if (error) {
4192 		device_printf(sc->sc_dev,
4193 		    "%s: failed to set multicast\n", __func__);
4194 		goto out;
4195 	}
4196 
4197 	/*
4198 	 * This is where it deviates from what Linux does.
4199 	 *
4200 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4201 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4202 	 * and always does a mac_ctx_changed().
4203 	 *
4204 	 * The openbsd port doesn't attempt to do that - it reset things
4205 	 * at odd states and does the add here.
4206 	 *
4207 	 * So, until the state handling is fixed (ie, we never reset
4208 	 * the NIC except for a firmware failure, which should drag
4209 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4210 	 * contexts that are required), let's do a dirty hack here.
4211 	 */
4212 	if (iv->is_uploaded) {
4213 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4214 			device_printf(sc->sc_dev,
4215 			    "%s: failed to update MAC\n", __func__);
4216 			goto out;
4217 		}
4218 	} else {
4219 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4220 			device_printf(sc->sc_dev,
4221 			    "%s: failed to add MAC\n", __func__);
4222 			goto out;
4223 		}
4224 	}
4225 	sc->sc_firmware_state = 1;
4226 
4227 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4228 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4229 		device_printf(sc->sc_dev,
4230 		    "%s: failed update phy ctxt\n", __func__);
4231 		goto out;
4232 	}
4233 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4234 
4235 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4236 		device_printf(sc->sc_dev,
4237 		    "%s: binding update cmd\n", __func__);
4238 		goto out;
4239 	}
4240 	sc->sc_firmware_state = 2;
4241 	/*
4242 	 * Authentication becomes unreliable when powersaving is left enabled
4243 	 * here. Powersaving will be activated again when association has
4244 	 * finished or is aborted.
4245 	 */
4246 	iv->ps_disabled = TRUE;
4247 	error = iwm_power_update_mac(sc);
4248 	iv->ps_disabled = FALSE;
4249 	if (error != 0) {
4250 		device_printf(sc->sc_dev,
4251 		    "%s: failed to update power management\n",
4252 		    __func__);
4253 		goto out;
4254 	}
4255 	if ((error = iwm_add_sta(sc, in)) != 0) {
4256 		device_printf(sc->sc_dev,
4257 		    "%s: failed to add sta\n", __func__);
4258 		goto out;
4259 	}
4260 	sc->sc_firmware_state = 3;
4261 
4262 	/*
4263 	 * Prevent the FW from wandering off channel during association
4264 	 * by "protecting" the session with a time event.
4265 	 */
4266 	/* XXX duration is in units of TU, not MS */
4267 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4268 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4269 
4270 	error = 0;
4271 out:
4272 	if (error != 0)
4273 		iv->iv_auth = 0;
4274 	ieee80211_free_node(ni);
4275 	return (error);
4276 }
4277 
4278 static struct ieee80211_node *
4279 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4280 {
4281 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4282 	    M_NOWAIT | M_ZERO);
4283 }
4284 
4285 static uint8_t
4286 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4287 {
4288 	uint8_t plcp = rate_n_flags & 0xff;
4289 	int i;
4290 
4291 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4292 		if (iwm_rates[i].plcp == plcp)
4293 			return iwm_rates[i].rate;
4294 	}
4295 	return 0;
4296 }
4297 
4298 uint8_t
4299 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4300 {
4301 	int i;
4302 	uint8_t rval;
4303 
4304 	for (i = 0; i < rs->rs_nrates; i++) {
4305 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4306 		if (rval == iwm_rates[ridx].rate)
4307 			return rs->rs_rates[i];
4308 	}
4309 
4310 	return 0;
4311 }
4312 
4313 static int
4314 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4315 {
4316 	int i;
4317 
4318 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4319 		if (iwm_rates[i].rate == rate)
4320 			return i;
4321 	}
4322 
4323 	device_printf(sc->sc_dev,
4324 	    "%s: WARNING: device rate for %u not found!\n",
4325 	    __func__, rate);
4326 
4327 	return -1;
4328 }
4329 
4330 
4331 static void
4332 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4333 {
4334 	struct ieee80211_node *ni = &in->in_ni;
4335 	struct iwm_lq_cmd *lq = &in->in_lq;
4336 	struct ieee80211_rateset *rs = &ni->ni_rates;
4337 	int nrates = rs->rs_nrates;
4338 	int i, ridx, tab = 0;
4339 //	int txant = 0;
4340 
4341 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4342 
4343 	if (nrates > nitems(lq->rs_table)) {
4344 		device_printf(sc->sc_dev,
4345 		    "%s: node supports %d rates, driver handles "
4346 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4347 		return;
4348 	}
4349 	if (nrates == 0) {
4350 		device_printf(sc->sc_dev,
4351 		    "%s: node supports 0 rates, odd!\n", __func__);
4352 		return;
4353 	}
4354 	nrates = imin(rix + 1, nrates);
4355 
4356 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4357 	    "%s: nrates=%d\n", __func__, nrates);
4358 
4359 	/* then construct a lq_cmd based on those */
4360 	memset(lq, 0, sizeof(*lq));
4361 	lq->sta_id = IWM_STATION_ID;
4362 
4363 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4364 	if (ni->ni_flags & IEEE80211_NODE_HT)
4365 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4366 
4367 	/*
4368 	 * are these used? (we don't do SISO or MIMO)
4369 	 * need to set them to non-zero, though, or we get an error.
4370 	 */
4371 	lq->single_stream_ant_msk = 1;
4372 	lq->dual_stream_ant_msk = 1;
4373 
4374 	/*
4375 	 * Build the actual rate selection table.
4376 	 * The lowest bits are the rates.  Additionally,
4377 	 * CCK needs bit 9 to be set.  The rest of the bits
4378 	 * we add to the table select the tx antenna
4379 	 * Note that we add the rates in the highest rate first
4380 	 * (opposite of ni_rates).
4381 	 */
4382 	for (i = 0; i < nrates; i++) {
4383 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4384 		int nextant;
4385 
4386 		/* Map 802.11 rate to HW rate index. */
4387 		ridx = iwm_rate2ridx(sc, rate);
4388 		if (ridx == -1)
4389 			continue;
4390 
4391 #if 0
4392 		if (txant == 0)
4393 			txant = iwm_get_valid_tx_ant(sc);
4394 		nextant = 1<<(ffs(txant)-1);
4395 		txant &= ~nextant;
4396 #else
4397 		nextant = iwm_get_valid_tx_ant(sc);
4398 #endif
4399 		tab = iwm_rates[ridx].plcp;
4400 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4401 		if (IWM_RIDX_IS_CCK(ridx))
4402 			tab |= IWM_RATE_MCS_CCK_MSK;
4403 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4404 		    "station rate i=%d, rate=%d, hw=%x\n",
4405 		    i, iwm_rates[ridx].rate, tab);
4406 		lq->rs_table[i] = htole32(tab);
4407 	}
4408 	/* then fill the rest with the lowest possible rate */
4409 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4410 		KASSERT(tab != 0, ("invalid tab"));
4411 		lq->rs_table[i] = htole32(tab);
4412 	}
4413 }
4414 
4415 static int
4416 iwm_media_change(struct ifnet *ifp)
4417 {
4418 	struct ieee80211vap *vap = ifp->if_softc;
4419 	struct ieee80211com *ic = vap->iv_ic;
4420 	struct iwm_softc *sc = ic->ic_softc;
4421 	int error;
4422 
4423 	error = ieee80211_media_change(ifp);
4424 	if (error != ENETRESET)
4425 		return error;
4426 
4427 	IWM_LOCK(sc);
4428 	if (ic->ic_nrunning > 0) {
4429 		iwm_stop(sc);
4430 		iwm_init(sc);
4431 	}
4432 	IWM_UNLOCK(sc);
4433 	return error;
4434 }
4435 
4436 static void
4437 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4438 {
4439 	struct iwm_vap *ivp = IWM_VAP(vap);
4440 	int error;
4441 
4442 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4443 	sc->sc_tx_timer = 0;
4444 
4445 	ivp->iv_auth = 0;
4446 	if (sc->sc_firmware_state == 3) {
4447 		iwm_xmit_queue_drain(sc);
4448 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4449 		error = iwm_rm_sta(sc, vap, TRUE);
4450 		if (error) {
4451 			device_printf(sc->sc_dev,
4452 			    "%s: Failed to remove station: %d\n",
4453 			    __func__, error);
4454 		}
4455 	}
4456 	if (sc->sc_firmware_state == 3) {
4457 		error = iwm_mac_ctxt_changed(sc, vap);
4458 		if (error) {
4459 			device_printf(sc->sc_dev,
4460 			    "%s: Failed to change mac context: %d\n",
4461 			    __func__, error);
4462 		}
4463 	}
4464 	if (sc->sc_firmware_state == 3) {
4465 		error = iwm_sf_update(sc, vap, FALSE);
4466 		if (error) {
4467 			device_printf(sc->sc_dev,
4468 			    "%s: Failed to update smart FIFO: %d\n",
4469 			    __func__, error);
4470 		}
4471 	}
4472 	if (sc->sc_firmware_state == 3) {
4473 		error = iwm_rm_sta_id(sc, vap);
4474 		if (error) {
4475 			device_printf(sc->sc_dev,
4476 			    "%s: Failed to remove station id: %d\n",
4477 			    __func__, error);
4478 		}
4479 	}
4480 	if (sc->sc_firmware_state == 3) {
4481 		error = iwm_update_quotas(sc, NULL);
4482 		if (error) {
4483 			device_printf(sc->sc_dev,
4484 			    "%s: Failed to update PHY quota: %d\n",
4485 			    __func__, error);
4486 		}
4487 	}
4488 	if (sc->sc_firmware_state == 3) {
4489 		/* XXX Might need to specify bssid correctly. */
4490 		error = iwm_mac_ctxt_changed(sc, vap);
4491 		if (error) {
4492 			device_printf(sc->sc_dev,
4493 			    "%s: Failed to change mac context: %d\n",
4494 			    __func__, error);
4495 		}
4496 	}
4497 	if (sc->sc_firmware_state == 3) {
4498 		sc->sc_firmware_state = 2;
4499 	}
4500 	if (sc->sc_firmware_state > 1) {
4501 		error = iwm_binding_remove_vif(sc, ivp);
4502 		if (error) {
4503 			device_printf(sc->sc_dev,
4504 			    "%s: Failed to remove channel ctx: %d\n",
4505 			    __func__, error);
4506 		}
4507 	}
4508 	if (sc->sc_firmware_state > 1) {
4509 		sc->sc_firmware_state = 1;
4510 	}
4511 	ivp->phy_ctxt = NULL;
4512 	if (sc->sc_firmware_state > 0) {
4513 		error = iwm_mac_ctxt_changed(sc, vap);
4514 		if (error) {
4515 			device_printf(sc->sc_dev,
4516 			    "%s: Failed to change mac context: %d\n",
4517 			    __func__, error);
4518 		}
4519 	}
4520 	if (sc->sc_firmware_state > 0) {
4521 		error = iwm_power_update_mac(sc);
4522 		if (error != 0) {
4523 			device_printf(sc->sc_dev,
4524 			    "%s: failed to update power management\n",
4525 			    __func__);
4526 		}
4527 	}
4528 	sc->sc_firmware_state = 0;
4529 }
4530 
4531 static int
4532 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4533 {
4534 	struct iwm_vap *ivp = IWM_VAP(vap);
4535 	struct ieee80211com *ic = vap->iv_ic;
4536 	struct iwm_softc *sc = ic->ic_softc;
4537 	struct iwm_node *in;
4538 	int error;
4539 
4540 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4541 	    "switching state %s -> %s arg=0x%x\n",
4542 	    ieee80211_state_name[vap->iv_state],
4543 	    ieee80211_state_name[nstate],
4544 	    arg);
4545 
4546 	IEEE80211_UNLOCK(ic);
4547 	IWM_LOCK(sc);
4548 
4549 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4550 	    (nstate == IEEE80211_S_AUTH ||
4551 	     nstate == IEEE80211_S_ASSOC ||
4552 	     nstate == IEEE80211_S_RUN)) {
4553 		/* Stop blinking for a scan, when authenticating. */
4554 		iwm_led_blink_stop(sc);
4555 	}
4556 
4557 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4558 		iwm_led_disable(sc);
4559 		/* disable beacon filtering if we're hopping out of RUN */
4560 		iwm_disable_beacon_filter(sc);
4561 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4562 			in->in_assoc = 0;
4563 	}
4564 
4565 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4566 	     vap->iv_state == IEEE80211_S_ASSOC ||
4567 	     vap->iv_state == IEEE80211_S_RUN) &&
4568 	    (nstate == IEEE80211_S_INIT ||
4569 	     nstate == IEEE80211_S_SCAN ||
4570 	     nstate == IEEE80211_S_AUTH)) {
4571 		iwm_stop_session_protection(sc, ivp);
4572 	}
4573 
4574 	if ((vap->iv_state == IEEE80211_S_RUN ||
4575 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4576 	    nstate == IEEE80211_S_INIT) {
4577 		/*
4578 		 * In this case, iv_newstate() wants to send an 80211 frame on
4579 		 * the network that we are leaving. So we need to call it,
4580 		 * before tearing down all the firmware state.
4581 		 */
4582 		IWM_UNLOCK(sc);
4583 		IEEE80211_LOCK(ic);
4584 		ivp->iv_newstate(vap, nstate, arg);
4585 		IEEE80211_UNLOCK(ic);
4586 		IWM_LOCK(sc);
4587 		iwm_bring_down_firmware(sc, vap);
4588 		IWM_UNLOCK(sc);
4589 		IEEE80211_LOCK(ic);
4590 		return 0;
4591 	}
4592 
4593 	switch (nstate) {
4594 	case IEEE80211_S_INIT:
4595 	case IEEE80211_S_SCAN:
4596 		break;
4597 
4598 	case IEEE80211_S_AUTH:
4599 		iwm_bring_down_firmware(sc, vap);
4600 		if ((error = iwm_auth(vap, sc)) != 0) {
4601 			device_printf(sc->sc_dev,
4602 			    "%s: could not move to auth state: %d\n",
4603 			    __func__, error);
4604 			iwm_bring_down_firmware(sc, vap);
4605 			IWM_UNLOCK(sc);
4606 			IEEE80211_LOCK(ic);
4607 			return 1;
4608 		}
4609 		break;
4610 
4611 	case IEEE80211_S_ASSOC:
4612 		/*
4613 		 * EBS may be disabled due to previous failures reported by FW.
4614 		 * Reset EBS status here assuming environment has been changed.
4615 		 */
4616 		sc->last_ebs_successful = TRUE;
4617 		break;
4618 
4619 	case IEEE80211_S_RUN:
4620 		in = IWM_NODE(vap->iv_bss);
4621 		/* Update the association state, now we have it all */
4622 		/* (eg associd comes in at this point */
4623 		error = iwm_update_sta(sc, in);
4624 		if (error != 0) {
4625 			device_printf(sc->sc_dev,
4626 			    "%s: failed to update STA\n", __func__);
4627 			IWM_UNLOCK(sc);
4628 			IEEE80211_LOCK(ic);
4629 			return error;
4630 		}
4631 		in->in_assoc = 1;
4632 		error = iwm_mac_ctxt_changed(sc, vap);
4633 		if (error != 0) {
4634 			device_printf(sc->sc_dev,
4635 			    "%s: failed to update MAC: %d\n", __func__, error);
4636 		}
4637 
4638 		iwm_sf_update(sc, vap, FALSE);
4639 		iwm_enable_beacon_filter(sc, ivp);
4640 		iwm_power_update_mac(sc);
4641 		iwm_update_quotas(sc, ivp);
4642 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4643 		iwm_setrates(sc, in, rix);
4644 
4645 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4646 			device_printf(sc->sc_dev,
4647 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4648 		}
4649 
4650 		iwm_led_enable(sc);
4651 		break;
4652 
4653 	default:
4654 		break;
4655 	}
4656 	IWM_UNLOCK(sc);
4657 	IEEE80211_LOCK(ic);
4658 
4659 	return (ivp->iv_newstate(vap, nstate, arg));
4660 }
4661 
4662 void
4663 iwm_endscan_cb(void *arg, int pending)
4664 {
4665 	struct iwm_softc *sc = arg;
4666 	struct ieee80211com *ic = &sc->sc_ic;
4667 
4668 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4669 	    "%s: scan ended\n",
4670 	    __func__);
4671 
4672 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4673 }
4674 
4675 static int
4676 iwm_send_bt_init_conf(struct iwm_softc *sc)
4677 {
4678 	struct iwm_bt_coex_cmd bt_cmd;
4679 
4680 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4681 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4682 
4683 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4684 	    &bt_cmd);
4685 }
4686 
4687 static boolean_t
4688 iwm_is_lar_supported(struct iwm_softc *sc)
4689 {
4690 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4691 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4692 
4693 	if (iwm_lar_disable)
4694 		return FALSE;
4695 
4696 	/*
4697 	 * Enable LAR only if it is supported by the FW (TLV) &&
4698 	 * enabled in the NVM
4699 	 */
4700 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4701 		return nvm_lar && tlv_lar;
4702 	else
4703 		return tlv_lar;
4704 }
4705 
4706 static boolean_t
4707 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4708 {
4709 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4710 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4711 }
4712 
4713 static int
4714 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4715 {
4716 	struct iwm_mcc_update_cmd mcc_cmd;
4717 	struct iwm_host_cmd hcmd = {
4718 		.id = IWM_MCC_UPDATE_CMD,
4719 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4720 		.data = { &mcc_cmd },
4721 	};
4722 	int ret;
4723 #ifdef IWM_DEBUG
4724 	struct iwm_rx_packet *pkt;
4725 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4726 	struct iwm_mcc_update_resp *mcc_resp;
4727 	int n_channels;
4728 	uint16_t mcc;
4729 #endif
4730 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4731 
4732 	if (!iwm_is_lar_supported(sc)) {
4733 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4734 		    __func__);
4735 		return 0;
4736 	}
4737 
4738 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4739 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4740 	if (iwm_is_wifi_mcc_supported(sc))
4741 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4742 	else
4743 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4744 
4745 	if (resp_v2)
4746 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4747 	else
4748 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4749 
4750 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4751 	    "send MCC update to FW with '%c%c' src = %d\n",
4752 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4753 
4754 	ret = iwm_send_cmd(sc, &hcmd);
4755 	if (ret)
4756 		return ret;
4757 
4758 #ifdef IWM_DEBUG
4759 	pkt = hcmd.resp_pkt;
4760 
4761 	/* Extract MCC response */
4762 	if (resp_v2) {
4763 		mcc_resp = (void *)pkt->data;
4764 		mcc = mcc_resp->mcc;
4765 		n_channels =  le32toh(mcc_resp->n_channels);
4766 	} else {
4767 		mcc_resp_v1 = (void *)pkt->data;
4768 		mcc = mcc_resp_v1->mcc;
4769 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4770 	}
4771 
4772 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4773 	if (mcc == 0)
4774 		mcc = 0x3030;  /* "00" - world */
4775 
4776 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4777 	    "regulatory domain '%c%c' (%d channels available)\n",
4778 	    mcc >> 8, mcc & 0xff, n_channels);
4779 #endif
4780 	iwm_free_resp(sc, &hcmd);
4781 
4782 	return 0;
4783 }
4784 
4785 static void
4786 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4787 {
4788 	struct iwm_host_cmd cmd = {
4789 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4790 		.len = { sizeof(uint32_t), },
4791 		.data = { &backoff, },
4792 	};
4793 
4794 	if (iwm_send_cmd(sc, &cmd) != 0) {
4795 		device_printf(sc->sc_dev,
4796 		    "failed to change thermal tx backoff\n");
4797 	}
4798 }
4799 
4800 static int
4801 iwm_init_hw(struct iwm_softc *sc)
4802 {
4803 	struct ieee80211com *ic = &sc->sc_ic;
4804 	int error, i, ac;
4805 
4806 	sc->sf_state = IWM_SF_UNINIT;
4807 
4808 	if ((error = iwm_start_hw(sc)) != 0) {
4809 		printf("iwm_start_hw: failed %d\n", error);
4810 		return error;
4811 	}
4812 
4813 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4814 		printf("iwm_run_init_ucode: failed %d\n", error);
4815 		return error;
4816 	}
4817 
4818 	/*
4819 	 * should stop and start HW since that INIT
4820 	 * image just loaded
4821 	 */
4822 	iwm_stop_device(sc);
4823 	sc->sc_ps_disabled = FALSE;
4824 	if ((error = iwm_start_hw(sc)) != 0) {
4825 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4826 		return error;
4827 	}
4828 
4829 	/* omstart, this time with the regular firmware */
4830 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4831 	if (error) {
4832 		device_printf(sc->sc_dev, "could not load firmware\n");
4833 		goto error;
4834 	}
4835 
4836 	error = iwm_sf_update(sc, NULL, FALSE);
4837 	if (error)
4838 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4839 
4840 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4841 		device_printf(sc->sc_dev, "bt init conf failed\n");
4842 		goto error;
4843 	}
4844 
4845 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4846 	if (error != 0) {
4847 		device_printf(sc->sc_dev, "antenna config failed\n");
4848 		goto error;
4849 	}
4850 
4851 	/* Send phy db control command and then phy db calibration */
4852 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4853 		goto error;
4854 
4855 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4856 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4857 		goto error;
4858 	}
4859 
4860 	/* Add auxiliary station for scanning */
4861 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4862 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4863 		goto error;
4864 	}
4865 
4866 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4867 		/*
4868 		 * The channel used here isn't relevant as it's
4869 		 * going to be overwritten in the other flows.
4870 		 * For now use the first channel we have.
4871 		 */
4872 		if ((error = iwm_phy_ctxt_add(sc,
4873 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4874 			goto error;
4875 	}
4876 
4877 	/* Initialize tx backoffs to the minimum. */
4878 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4879 		iwm_tt_tx_backoff(sc, 0);
4880 
4881 	if (iwm_config_ltr(sc) != 0)
4882 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4883 
4884 	error = iwm_power_update_device(sc);
4885 	if (error)
4886 		goto error;
4887 
4888 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4889 		goto error;
4890 
4891 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4892 		if ((error = iwm_config_umac_scan(sc)) != 0)
4893 			goto error;
4894 	}
4895 
4896 	/* Enable Tx queues. */
4897 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4898 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4899 		    iwm_ac_to_tx_fifo[ac]);
4900 		if (error)
4901 			goto error;
4902 	}
4903 
4904 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4905 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4906 		goto error;
4907 	}
4908 
4909 	return 0;
4910 
4911  error:
4912 	iwm_stop_device(sc);
4913 	return error;
4914 }
4915 
4916 /* Allow multicast from our BSSID. */
4917 static int
4918 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4919 {
4920 	struct ieee80211_node *ni = vap->iv_bss;
4921 	struct iwm_mcast_filter_cmd *cmd;
4922 	size_t size;
4923 	int error;
4924 
4925 	size = roundup(sizeof(*cmd), 4);
4926 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4927 	if (cmd == NULL)
4928 		return ENOMEM;
4929 	cmd->filter_own = 1;
4930 	cmd->port_id = 0;
4931 	cmd->count = 0;
4932 	cmd->pass_all = 1;
4933 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4934 
4935 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4936 	    IWM_CMD_SYNC, size, cmd);
4937 	free(cmd, M_DEVBUF);
4938 
4939 	return (error);
4940 }
4941 
4942 /*
4943  * ifnet interfaces
4944  */
4945 
4946 static void
4947 iwm_init(struct iwm_softc *sc)
4948 {
4949 	int error;
4950 
4951 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4952 		return;
4953 	}
4954 	sc->sc_generation++;
4955 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4956 
4957 	if ((error = iwm_init_hw(sc)) != 0) {
4958 		printf("iwm_init_hw failed %d\n", error);
4959 		iwm_stop(sc);
4960 		return;
4961 	}
4962 
4963 	/*
4964 	 * Ok, firmware loaded and we are jogging
4965 	 */
4966 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4967 }
4968 
4969 static int
4970 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4971 {
4972 	struct iwm_softc *sc;
4973 	int error;
4974 
4975 	sc = ic->ic_softc;
4976 
4977 	IWM_LOCK(sc);
4978 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4979 		IWM_UNLOCK(sc);
4980 		return (ENXIO);
4981 	}
4982 	error = mbufq_enqueue(&sc->sc_snd, m);
4983 	if (error) {
4984 		IWM_UNLOCK(sc);
4985 		return (error);
4986 	}
4987 	iwm_start(sc);
4988 	IWM_UNLOCK(sc);
4989 	return (0);
4990 }
4991 
4992 /*
4993  * Dequeue packets from sendq and call send.
4994  */
4995 static void
4996 iwm_start(struct iwm_softc *sc)
4997 {
4998 	struct ieee80211_node *ni;
4999 	struct mbuf *m;
5000 	int ac = 0;
5001 
5002 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5003 	while (sc->qfullmsk == 0 &&
5004 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5005 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5006 		if (iwm_tx(sc, m, ni, ac) != 0) {
5007 			if_inc_counter(ni->ni_vap->iv_ifp,
5008 			    IFCOUNTER_OERRORS, 1);
5009 			ieee80211_free_node(ni);
5010 			continue;
5011 		}
5012 		if (sc->sc_tx_timer == 0) {
5013 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5014 			    sc);
5015 		}
5016 		sc->sc_tx_timer = 15;
5017 	}
5018 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5019 }
5020 
5021 static void
5022 iwm_stop(struct iwm_softc *sc)
5023 {
5024 
5025 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5026 	sc->sc_flags |= IWM_FLAG_STOPPED;
5027 	sc->sc_generation++;
5028 	iwm_led_blink_stop(sc);
5029 	sc->sc_tx_timer = 0;
5030 	iwm_stop_device(sc);
5031 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5032 }
5033 
5034 static void
5035 iwm_watchdog(void *arg)
5036 {
5037 	struct iwm_softc *sc = arg;
5038 	struct ieee80211com *ic = &sc->sc_ic;
5039 
5040 	if (sc->sc_attached == 0)
5041 		return;
5042 
5043 	if (sc->sc_tx_timer > 0) {
5044 		if (--sc->sc_tx_timer == 0) {
5045 			device_printf(sc->sc_dev, "device timeout\n");
5046 #ifdef IWM_DEBUG
5047 			iwm_nic_error(sc);
5048 #endif
5049 			ieee80211_restart_all(ic);
5050 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5051 			return;
5052 		}
5053 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5054 	}
5055 }
5056 
5057 static void
5058 iwm_parent(struct ieee80211com *ic)
5059 {
5060 	struct iwm_softc *sc = ic->ic_softc;
5061 	int startall = 0;
5062 
5063 	IWM_LOCK(sc);
5064 	if (ic->ic_nrunning > 0) {
5065 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5066 			iwm_init(sc);
5067 			startall = 1;
5068 		}
5069 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5070 		iwm_stop(sc);
5071 	IWM_UNLOCK(sc);
5072 	if (startall)
5073 		ieee80211_start_all(ic);
5074 }
5075 
5076 /*
5077  * The interrupt side of things
5078  */
5079 
5080 /*
5081  * error dumping routines are from iwlwifi/mvm/utils.c
5082  */
5083 
5084 /*
5085  * Note: This structure is read from the device with IO accesses,
5086  * and the reading already does the endian conversion. As it is
5087  * read with uint32_t-sized accesses, any members with a different size
5088  * need to be ordered correctly though!
5089  */
5090 struct iwm_error_event_table {
5091 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5092 	uint32_t error_id;		/* type of error */
5093 	uint32_t trm_hw_status0;	/* TRM HW status */
5094 	uint32_t trm_hw_status1;	/* TRM HW status */
5095 	uint32_t blink2;		/* branch link */
5096 	uint32_t ilink1;		/* interrupt link */
5097 	uint32_t ilink2;		/* interrupt link */
5098 	uint32_t data1;		/* error-specific data */
5099 	uint32_t data2;		/* error-specific data */
5100 	uint32_t data3;		/* error-specific data */
5101 	uint32_t bcon_time;		/* beacon timer */
5102 	uint32_t tsf_low;		/* network timestamp function timer */
5103 	uint32_t tsf_hi;		/* network timestamp function timer */
5104 	uint32_t gp1;		/* GP1 timer register */
5105 	uint32_t gp2;		/* GP2 timer register */
5106 	uint32_t fw_rev_type;	/* firmware revision type */
5107 	uint32_t major;		/* uCode version major */
5108 	uint32_t minor;		/* uCode version minor */
5109 	uint32_t hw_ver;		/* HW Silicon version */
5110 	uint32_t brd_ver;		/* HW board version */
5111 	uint32_t log_pc;		/* log program counter */
5112 	uint32_t frame_ptr;		/* frame pointer */
5113 	uint32_t stack_ptr;		/* stack pointer */
5114 	uint32_t hcmd;		/* last host command header */
5115 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5116 				 * rxtx_flag */
5117 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5118 				 * host_flag */
5119 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5120 				 * enc_flag */
5121 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5122 				 * time_flag */
5123 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5124 				 * wico interrupt */
5125 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5126 	uint32_t wait_event;		/* wait event() caller address */
5127 	uint32_t l2p_control;	/* L2pControlField */
5128 	uint32_t l2p_duration;	/* L2pDurationField */
5129 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5130 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5131 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5132 				 * (LMPM_PMG_SEL) */
5133 	uint32_t u_timestamp;	/* indicate when the date and time of the
5134 				 * compilation */
5135 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5136 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5137 
5138 /*
5139  * UMAC error struct - relevant starting from family 8000 chip.
5140  * Note: This structure is read from the device with IO accesses,
5141  * and the reading already does the endian conversion. As it is
5142  * read with u32-sized accesses, any members with a different size
5143  * need to be ordered correctly though!
5144  */
5145 struct iwm_umac_error_event_table {
5146 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5147 	uint32_t error_id;	/* type of error */
5148 	uint32_t blink1;	/* branch link */
5149 	uint32_t blink2;	/* branch link */
5150 	uint32_t ilink1;	/* interrupt link */
5151 	uint32_t ilink2;	/* interrupt link */
5152 	uint32_t data1;		/* error-specific data */
5153 	uint32_t data2;		/* error-specific data */
5154 	uint32_t data3;		/* error-specific data */
5155 	uint32_t umac_major;
5156 	uint32_t umac_minor;
5157 	uint32_t frame_pointer;	/* core register 27*/
5158 	uint32_t stack_pointer;	/* core register 28 */
5159 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5160 	uint32_t nic_isr_pref;	/* ISR status register */
5161 } __packed;
5162 
5163 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5164 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5165 
5166 #ifdef IWM_DEBUG
5167 struct {
5168 	const char *name;
5169 	uint8_t num;
5170 } advanced_lookup[] = {
5171 	{ "NMI_INTERRUPT_WDG", 0x34 },
5172 	{ "SYSASSERT", 0x35 },
5173 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5174 	{ "BAD_COMMAND", 0x38 },
5175 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5176 	{ "FATAL_ERROR", 0x3D },
5177 	{ "NMI_TRM_HW_ERR", 0x46 },
5178 	{ "NMI_INTERRUPT_TRM", 0x4C },
5179 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5180 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5181 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5182 	{ "NMI_INTERRUPT_HOST", 0x66 },
5183 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5184 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5185 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5186 	{ "ADVANCED_SYSASSERT", 0 },
5187 };
5188 
5189 static const char *
5190 iwm_desc_lookup(uint32_t num)
5191 {
5192 	int i;
5193 
5194 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5195 		if (advanced_lookup[i].num == num)
5196 			return advanced_lookup[i].name;
5197 
5198 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5199 	return advanced_lookup[i].name;
5200 }
5201 
5202 static void
5203 iwm_nic_umac_error(struct iwm_softc *sc)
5204 {
5205 	struct iwm_umac_error_event_table table;
5206 	uint32_t base;
5207 
5208 	base = sc->umac_error_event_table;
5209 
5210 	if (base < 0x800000) {
5211 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5212 		    base);
5213 		return;
5214 	}
5215 
5216 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5217 		device_printf(sc->sc_dev, "reading errlog failed\n");
5218 		return;
5219 	}
5220 
5221 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5222 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5223 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5224 		    sc->sc_flags, table.valid);
5225 	}
5226 
5227 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5228 		iwm_desc_lookup(table.error_id));
5229 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5230 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5231 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5232 	    table.ilink1);
5233 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5234 	    table.ilink2);
5235 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5236 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5237 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5238 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5239 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5240 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5241 	    table.frame_pointer);
5242 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5243 	    table.stack_pointer);
5244 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5245 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5246 	    table.nic_isr_pref);
5247 }
5248 
5249 /*
5250  * Support for dumping the error log seemed like a good idea ...
5251  * but it's mostly hex junk and the only sensible thing is the
5252  * hw/ucode revision (which we know anyway).  Since it's here,
5253  * I'll just leave it in, just in case e.g. the Intel guys want to
5254  * help us decipher some "ADVANCED_SYSASSERT" later.
5255  */
5256 static void
5257 iwm_nic_error(struct iwm_softc *sc)
5258 {
5259 	struct iwm_error_event_table table;
5260 	uint32_t base;
5261 
5262 	device_printf(sc->sc_dev, "dumping device error log\n");
5263 	base = sc->error_event_table[0];
5264 	if (base < 0x800000) {
5265 		device_printf(sc->sc_dev,
5266 		    "Invalid error log pointer 0x%08x\n", base);
5267 		return;
5268 	}
5269 
5270 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5271 		device_printf(sc->sc_dev, "reading errlog failed\n");
5272 		return;
5273 	}
5274 
5275 	if (!table.valid) {
5276 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5277 		return;
5278 	}
5279 
5280 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5281 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5282 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5283 		    sc->sc_flags, table.valid);
5284 	}
5285 
5286 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5287 	    iwm_desc_lookup(table.error_id));
5288 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5289 	    table.trm_hw_status0);
5290 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5291 	    table.trm_hw_status1);
5292 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5293 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5294 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5295 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5296 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5297 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5298 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5299 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5300 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5301 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5302 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5303 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5304 	    table.fw_rev_type);
5305 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5306 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5307 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5308 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5309 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5310 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5311 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5312 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5313 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5314 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5315 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5316 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5317 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5318 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5319 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5320 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5321 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5322 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5323 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5324 
5325 	if (sc->umac_error_event_table)
5326 		iwm_nic_umac_error(sc);
5327 }
5328 #endif
5329 
5330 static void
5331 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5332 {
5333 	struct ieee80211com *ic = &sc->sc_ic;
5334 	struct iwm_cmd_response *cresp;
5335 	struct mbuf *m1;
5336 	uint32_t offset = 0;
5337 	uint32_t maxoff = IWM_RBUF_SIZE;
5338 	uint32_t nextoff;
5339 	boolean_t stolen = FALSE;
5340 
5341 #define HAVEROOM(a)	\
5342     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5343 
5344 	while (HAVEROOM(offset)) {
5345 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5346 		    offset);
5347 		int qid, idx, code, len;
5348 
5349 		qid = pkt->hdr.qid;
5350 		idx = pkt->hdr.idx;
5351 
5352 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5353 
5354 		/*
5355 		 * randomly get these from the firmware, no idea why.
5356 		 * they at least seem harmless, so just ignore them for now
5357 		 */
5358 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5359 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5360 			break;
5361 		}
5362 
5363 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5364 		    "rx packet qid=%d idx=%d type=%x\n",
5365 		    qid & ~0x80, pkt->hdr.idx, code);
5366 
5367 		len = iwm_rx_packet_len(pkt);
5368 		len += sizeof(uint32_t); /* account for status word */
5369 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5370 
5371 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5372 
5373 		switch (code) {
5374 		case IWM_REPLY_RX_PHY_CMD:
5375 			iwm_rx_rx_phy_cmd(sc, pkt);
5376 			break;
5377 
5378 		case IWM_REPLY_RX_MPDU_CMD: {
5379 			/*
5380 			 * If this is the last frame in the RX buffer, we
5381 			 * can directly feed the mbuf to the sharks here.
5382 			 */
5383 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5384 			    struct iwm_rx_packet *, nextoff);
5385 			if (!HAVEROOM(nextoff) ||
5386 			    (nextpkt->hdr.code == 0 &&
5387 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5388 			     nextpkt->hdr.idx == 0) ||
5389 			    (nextpkt->len_n_flags ==
5390 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5391 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5392 					stolen = FALSE;
5393 					/* Make sure we abort the loop */
5394 					nextoff = maxoff;
5395 				}
5396 				break;
5397 			}
5398 
5399 			/*
5400 			 * Use m_copym instead of m_split, because that
5401 			 * makes it easier to keep a valid rx buffer in
5402 			 * the ring, when iwm_rx_mpdu() fails.
5403 			 *
5404 			 * We need to start m_copym() at offset 0, to get the
5405 			 * M_PKTHDR flag preserved.
5406 			 */
5407 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5408 			if (m1) {
5409 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5410 					stolen = TRUE;
5411 				else
5412 					m_freem(m1);
5413 			}
5414 			break;
5415 		}
5416 
5417 		case IWM_TX_CMD:
5418 			iwm_rx_tx_cmd(sc, pkt);
5419 			break;
5420 
5421 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5422 			struct iwm_missed_beacons_notif *resp;
5423 			int missed;
5424 
5425 			/* XXX look at mac_id to determine interface ID */
5426 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5427 
5428 			resp = (void *)pkt->data;
5429 			missed = le32toh(resp->consec_missed_beacons);
5430 
5431 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5432 			    "%s: MISSED_BEACON: mac_id=%d, "
5433 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5434 			    "num_rx=%d\n",
5435 			    __func__,
5436 			    le32toh(resp->mac_id),
5437 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5438 			    le32toh(resp->consec_missed_beacons),
5439 			    le32toh(resp->num_expected_beacons),
5440 			    le32toh(resp->num_recvd_beacons));
5441 
5442 			/* Be paranoid */
5443 			if (vap == NULL)
5444 				break;
5445 
5446 			/* XXX no net80211 locking? */
5447 			if (vap->iv_state == IEEE80211_S_RUN &&
5448 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5449 				if (missed > vap->iv_bmissthreshold) {
5450 					/* XXX bad locking; turn into task */
5451 					IWM_UNLOCK(sc);
5452 					ieee80211_beacon_miss(ic);
5453 					IWM_LOCK(sc);
5454 				}
5455 			}
5456 
5457 			break;
5458 		}
5459 
5460 		case IWM_MFUART_LOAD_NOTIFICATION:
5461 			break;
5462 
5463 		case IWM_ALIVE:
5464 			break;
5465 
5466 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5467 			break;
5468 
5469 		case IWM_STATISTICS_NOTIFICATION:
5470 			iwm_handle_rx_statistics(sc, pkt);
5471 			break;
5472 
5473 		case IWM_NVM_ACCESS_CMD:
5474 		case IWM_MCC_UPDATE_CMD:
5475 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5476 				memcpy(sc->sc_cmd_resp,
5477 				    pkt, sizeof(sc->sc_cmd_resp));
5478 			}
5479 			break;
5480 
5481 		case IWM_MCC_CHUB_UPDATE_CMD: {
5482 			struct iwm_mcc_chub_notif *notif;
5483 			notif = (void *)pkt->data;
5484 
5485 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5486 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5487 			sc->sc_fw_mcc[2] = '\0';
5488 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5489 			    "fw source %d sent CC '%s'\n",
5490 			    notif->source_id, sc->sc_fw_mcc);
5491 			break;
5492 		}
5493 
5494 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5495 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5496 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5497 			struct iwm_dts_measurement_notif_v1 *notif;
5498 
5499 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5500 				device_printf(sc->sc_dev,
5501 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5502 				break;
5503 			}
5504 			notif = (void *)pkt->data;
5505 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5506 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5507 			    notif->temp);
5508 			break;
5509 		}
5510 
5511 		case IWM_PHY_CONFIGURATION_CMD:
5512 		case IWM_TX_ANT_CONFIGURATION_CMD:
5513 		case IWM_ADD_STA:
5514 		case IWM_MAC_CONTEXT_CMD:
5515 		case IWM_REPLY_SF_CFG_CMD:
5516 		case IWM_POWER_TABLE_CMD:
5517 		case IWM_LTR_CONFIG:
5518 		case IWM_PHY_CONTEXT_CMD:
5519 		case IWM_BINDING_CONTEXT_CMD:
5520 		case IWM_TIME_EVENT_CMD:
5521 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5522 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5523 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5524 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5525 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5526 		case IWM_REPLY_BEACON_FILTERING_CMD:
5527 		case IWM_MAC_PM_POWER_TABLE:
5528 		case IWM_TIME_QUOTA_CMD:
5529 		case IWM_REMOVE_STA:
5530 		case IWM_TXPATH_FLUSH:
5531 		case IWM_LQ_CMD:
5532 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5533 				 IWM_FW_PAGING_BLOCK_CMD):
5534 		case IWM_BT_CONFIG:
5535 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5536 			cresp = (void *)pkt->data;
5537 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5538 				memcpy(sc->sc_cmd_resp,
5539 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5540 			}
5541 			break;
5542 
5543 		/* ignore */
5544 		case IWM_PHY_DB_CMD:
5545 			break;
5546 
5547 		case IWM_INIT_COMPLETE_NOTIF:
5548 			break;
5549 
5550 		case IWM_SCAN_OFFLOAD_COMPLETE:
5551 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5552 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5553 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5554 				ieee80211_runtask(ic, &sc->sc_es_task);
5555 			}
5556 			break;
5557 
5558 		case IWM_SCAN_ITERATION_COMPLETE: {
5559 			struct iwm_lmac_scan_complete_notif *notif;
5560 			notif = (void *)pkt->data;
5561 			break;
5562 		}
5563 
5564 		case IWM_SCAN_COMPLETE_UMAC:
5565 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5566 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5567 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5568 				ieee80211_runtask(ic, &sc->sc_es_task);
5569 			}
5570 			break;
5571 
5572 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5573 			struct iwm_umac_scan_iter_complete_notif *notif;
5574 			notif = (void *)pkt->data;
5575 
5576 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5577 			    "complete, status=0x%x, %d channels scanned\n",
5578 			    notif->status, notif->scanned_channels);
5579 			break;
5580 		}
5581 
5582 		case IWM_REPLY_ERROR: {
5583 			struct iwm_error_resp *resp;
5584 			resp = (void *)pkt->data;
5585 
5586 			device_printf(sc->sc_dev,
5587 			    "firmware error 0x%x, cmd 0x%x\n",
5588 			    le32toh(resp->error_type),
5589 			    resp->cmd_id);
5590 			break;
5591 		}
5592 
5593 		case IWM_TIME_EVENT_NOTIFICATION:
5594 			iwm_rx_time_event_notif(sc, pkt);
5595 			break;
5596 
5597 		/*
5598 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5599 		 * messages. Just ignore them for now.
5600 		 */
5601 		case IWM_DEBUG_LOG_MSG:
5602 			break;
5603 
5604 		case IWM_MCAST_FILTER_CMD:
5605 			break;
5606 
5607 		case IWM_SCD_QUEUE_CFG: {
5608 			struct iwm_scd_txq_cfg_rsp *rsp;
5609 			rsp = (void *)pkt->data;
5610 
5611 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5612 			    "queue cfg token=0x%x sta_id=%d "
5613 			    "tid=%d scd_queue=%d\n",
5614 			    rsp->token, rsp->sta_id, rsp->tid,
5615 			    rsp->scd_queue);
5616 			break;
5617 		}
5618 
5619 		default:
5620 			device_printf(sc->sc_dev,
5621 			    "frame %d/%d %x UNHANDLED (this should "
5622 			    "not happen)\n", qid & ~0x80, idx,
5623 			    pkt->len_n_flags);
5624 			break;
5625 		}
5626 
5627 		/*
5628 		 * Why test bit 0x80?  The Linux driver:
5629 		 *
5630 		 * There is one exception:  uCode sets bit 15 when it
5631 		 * originates the response/notification, i.e. when the
5632 		 * response/notification is not a direct response to a
5633 		 * command sent by the driver.  For example, uCode issues
5634 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5635 		 * it is not a direct response to any driver command.
5636 		 *
5637 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5638 		 * uses a slightly different format for pkt->hdr, and "qid"
5639 		 * is actually the upper byte of a two-byte field.
5640 		 */
5641 		if (!(qid & (1 << 7)))
5642 			iwm_cmd_done(sc, pkt);
5643 
5644 		offset = nextoff;
5645 	}
5646 	if (stolen)
5647 		m_freem(m);
5648 #undef HAVEROOM
5649 }
5650 
5651 /*
5652  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5653  * Basic structure from if_iwn
5654  */
5655 static void
5656 iwm_notif_intr(struct iwm_softc *sc)
5657 {
5658 	int count;
5659 	uint32_t wreg;
5660 	uint16_t hw;
5661 
5662 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5663 	    BUS_DMASYNC_POSTREAD);
5664 
5665 	if (sc->cfg->mqrx_supported) {
5666 		count = IWM_RX_MQ_RING_COUNT;
5667 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5668 	} else {
5669 		count = IWM_RX_LEGACY_RING_COUNT;
5670 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5671 	}
5672 
5673 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5674 
5675 	/*
5676 	 * Process responses
5677 	 */
5678 	while (sc->rxq.cur != hw) {
5679 		struct iwm_rx_ring *ring = &sc->rxq;
5680 		struct iwm_rx_data *data = &ring->data[ring->cur];
5681 
5682 		bus_dmamap_sync(ring->data_dmat, data->map,
5683 		    BUS_DMASYNC_POSTREAD);
5684 
5685 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5686 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5687 		iwm_handle_rxb(sc, data->m);
5688 
5689 		ring->cur = (ring->cur + 1) % count;
5690 	}
5691 
5692 	/*
5693 	 * Tell the firmware that it can reuse the ring entries that
5694 	 * we have just processed.
5695 	 * Seems like the hardware gets upset unless we align
5696 	 * the write by 8??
5697 	 */
5698 	hw = (hw == 0) ? count - 1 : hw - 1;
5699 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5700 }
5701 
5702 static void
5703 iwm_intr(void *arg)
5704 {
5705 	struct iwm_softc *sc = arg;
5706 	int handled = 0;
5707 	int r1, r2, rv = 0;
5708 	int isperiodic = 0;
5709 
5710 	IWM_LOCK(sc);
5711 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5712 
5713 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5714 		uint32_t *ict = sc->ict_dma.vaddr;
5715 		int tmp;
5716 
5717 		tmp = htole32(ict[sc->ict_cur]);
5718 		if (!tmp)
5719 			goto out_ena;
5720 
5721 		/*
5722 		 * ok, there was something.  keep plowing until we have all.
5723 		 */
5724 		r1 = r2 = 0;
5725 		while (tmp) {
5726 			r1 |= tmp;
5727 			ict[sc->ict_cur] = 0;
5728 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5729 			tmp = htole32(ict[sc->ict_cur]);
5730 		}
5731 
5732 		/* this is where the fun begins.  don't ask */
5733 		if (r1 == 0xffffffff)
5734 			r1 = 0;
5735 
5736 		/* i am not expected to understand this */
5737 		if (r1 & 0xc0000)
5738 			r1 |= 0x8000;
5739 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5740 	} else {
5741 		r1 = IWM_READ(sc, IWM_CSR_INT);
5742 		/* "hardware gone" (where, fishing?) */
5743 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5744 			goto out;
5745 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5746 	}
5747 	if (r1 == 0 && r2 == 0) {
5748 		goto out_ena;
5749 	}
5750 
5751 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5752 
5753 	/* Safely ignore these bits for debug checks below */
5754 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5755 
5756 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5757 		int i;
5758 		struct ieee80211com *ic = &sc->sc_ic;
5759 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5760 
5761 #ifdef IWM_DEBUG
5762 		iwm_nic_error(sc);
5763 #endif
5764 		/* Dump driver status (TX and RX rings) while we're here. */
5765 		device_printf(sc->sc_dev, "driver status:\n");
5766 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5767 			struct iwm_tx_ring *ring = &sc->txq[i];
5768 			device_printf(sc->sc_dev,
5769 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5770 			    "queued=%-3d\n",
5771 			    i, ring->qid, ring->cur, ring->queued);
5772 		}
5773 		device_printf(sc->sc_dev,
5774 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5775 		device_printf(sc->sc_dev,
5776 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5777 
5778 		/* Reset our firmware state tracking. */
5779 		sc->sc_firmware_state = 0;
5780 		/* Don't stop the device; just do a VAP restart */
5781 		IWM_UNLOCK(sc);
5782 
5783 		if (vap == NULL) {
5784 			printf("%s: null vap\n", __func__);
5785 			return;
5786 		}
5787 
5788 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5789 		    "restarting\n", __func__, vap->iv_state);
5790 
5791 		ieee80211_restart_all(ic);
5792 		return;
5793 	}
5794 
5795 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5796 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5797 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5798 		iwm_stop(sc);
5799 		rv = 1;
5800 		goto out;
5801 	}
5802 
5803 	/* firmware chunk loaded */
5804 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5805 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5806 		handled |= IWM_CSR_INT_BIT_FH_TX;
5807 		sc->sc_fw_chunk_done = 1;
5808 		wakeup(&sc->sc_fw);
5809 	}
5810 
5811 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5812 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5813 		if (iwm_check_rfkill(sc)) {
5814 			device_printf(sc->sc_dev,
5815 			    "%s: rfkill switch, disabling interface\n",
5816 			    __func__);
5817 			iwm_stop(sc);
5818 		}
5819 	}
5820 
5821 	/*
5822 	 * The Linux driver uses periodic interrupts to avoid races.
5823 	 * We cargo-cult like it's going out of fashion.
5824 	 */
5825 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5826 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5827 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5828 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5829 			IWM_WRITE_1(sc,
5830 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5831 		isperiodic = 1;
5832 	}
5833 
5834 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5835 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5836 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5837 
5838 		iwm_notif_intr(sc);
5839 
5840 		/* enable periodic interrupt, see above */
5841 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5842 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5843 			    IWM_CSR_INT_PERIODIC_ENA);
5844 	}
5845 
5846 	if (__predict_false(r1 & ~handled))
5847 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5848 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5849 	rv = 1;
5850 
5851  out_ena:
5852 	iwm_restore_interrupts(sc);
5853  out:
5854 	IWM_UNLOCK(sc);
5855 	return;
5856 }
5857 
5858 /*
5859  * Autoconf glue-sniffing
5860  */
5861 #define	PCI_VENDOR_INTEL		0x8086
5862 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5863 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5864 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5865 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5866 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5867 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5868 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5869 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5870 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5871 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5872 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5873 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5874 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5875 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5876 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5877 
5878 static const struct iwm_devices {
5879 	uint16_t		device;
5880 	const struct iwm_cfg	*cfg;
5881 } iwm_devices[] = {
5882 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5883 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5884 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5885 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5886 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5887 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5888 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5889 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5890 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5891 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5892 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5893 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5894 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5895 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5896 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5897 };
5898 
5899 static int
5900 iwm_probe(device_t dev)
5901 {
5902 	int i;
5903 
5904 	for (i = 0; i < nitems(iwm_devices); i++) {
5905 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5906 		    pci_get_device(dev) == iwm_devices[i].device) {
5907 			device_set_desc(dev, iwm_devices[i].cfg->name);
5908 			return (BUS_PROBE_DEFAULT);
5909 		}
5910 	}
5911 
5912 	return (ENXIO);
5913 }
5914 
5915 static int
5916 iwm_dev_check(device_t dev)
5917 {
5918 	struct iwm_softc *sc;
5919 	uint16_t devid;
5920 	int i;
5921 
5922 	sc = device_get_softc(dev);
5923 
5924 	devid = pci_get_device(dev);
5925 	for (i = 0; i < nitems(iwm_devices); i++) {
5926 		if (iwm_devices[i].device == devid) {
5927 			sc->cfg = iwm_devices[i].cfg;
5928 			return (0);
5929 		}
5930 	}
5931 	device_printf(dev, "unknown adapter type\n");
5932 	return ENXIO;
5933 }
5934 
5935 /* PCI registers */
5936 #define PCI_CFG_RETRY_TIMEOUT	0x041
5937 
5938 static int
5939 iwm_pci_attach(device_t dev)
5940 {
5941 	struct iwm_softc *sc;
5942 	int count, error, rid;
5943 	uint16_t reg;
5944 
5945 	sc = device_get_softc(dev);
5946 
5947 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5948 	 * PCI Tx retries from interfering with C3 CPU state */
5949 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5950 
5951 	/* Enable bus-mastering and hardware bug workaround. */
5952 	pci_enable_busmaster(dev);
5953 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5954 	/* if !MSI */
5955 	if (reg & PCIM_STATUS_INTxSTATE) {
5956 		reg &= ~PCIM_STATUS_INTxSTATE;
5957 	}
5958 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5959 
5960 	rid = PCIR_BAR(0);
5961 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5962 	    RF_ACTIVE);
5963 	if (sc->sc_mem == NULL) {
5964 		device_printf(sc->sc_dev, "can't map mem space\n");
5965 		return (ENXIO);
5966 	}
5967 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5968 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5969 
5970 	/* Install interrupt handler. */
5971 	count = 1;
5972 	rid = 0;
5973 	if (pci_alloc_msi(dev, &count) == 0)
5974 		rid = 1;
5975 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5976 	    (rid != 0 ? 0 : RF_SHAREABLE));
5977 	if (sc->sc_irq == NULL) {
5978 		device_printf(dev, "can't map interrupt\n");
5979 			return (ENXIO);
5980 	}
5981 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5982 	    NULL, iwm_intr, sc, &sc->sc_ih);
5983 	if (sc->sc_ih == NULL) {
5984 		device_printf(dev, "can't establish interrupt");
5985 			return (ENXIO);
5986 	}
5987 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5988 
5989 	return (0);
5990 }
5991 
5992 static void
5993 iwm_pci_detach(device_t dev)
5994 {
5995 	struct iwm_softc *sc = device_get_softc(dev);
5996 
5997 	if (sc->sc_irq != NULL) {
5998 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5999 		bus_release_resource(dev, SYS_RES_IRQ,
6000 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6001 		pci_release_msi(dev);
6002         }
6003 	if (sc->sc_mem != NULL)
6004 		bus_release_resource(dev, SYS_RES_MEMORY,
6005 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6006 }
6007 
6008 static int
6009 iwm_attach(device_t dev)
6010 {
6011 	struct iwm_softc *sc = device_get_softc(dev);
6012 	struct ieee80211com *ic = &sc->sc_ic;
6013 	int error;
6014 	int txq_i, i;
6015 
6016 	sc->sc_dev = dev;
6017 	sc->sc_attached = 1;
6018 	IWM_LOCK_INIT(sc);
6019 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6020 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6021 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6022 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6023 
6024 	error = iwm_dev_check(dev);
6025 	if (error != 0)
6026 		goto fail;
6027 
6028 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6029 	if (sc->sc_notif_wait == NULL) {
6030 		device_printf(dev, "failed to init notification wait struct\n");
6031 		goto fail;
6032 	}
6033 
6034 	sc->sf_state = IWM_SF_UNINIT;
6035 
6036 	/* Init phy db */
6037 	sc->sc_phy_db = iwm_phy_db_init(sc);
6038 	if (!sc->sc_phy_db) {
6039 		device_printf(dev, "Cannot init phy_db\n");
6040 		goto fail;
6041 	}
6042 
6043 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6044 	sc->last_ebs_successful = TRUE;
6045 
6046 	/* PCI attach */
6047 	error = iwm_pci_attach(dev);
6048 	if (error != 0)
6049 		goto fail;
6050 
6051 	sc->sc_wantresp = -1;
6052 
6053 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6054 	/*
6055 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6056 	 * changed, and now the revision step also includes bit 0-1 (no more
6057 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6058 	 * in the old format.
6059 	 */
6060 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6061 		int ret;
6062 		uint32_t hw_step;
6063 
6064 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6065 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6066 
6067 		if (iwm_prepare_card_hw(sc) != 0) {
6068 			device_printf(dev, "could not initialize hardware\n");
6069 			goto fail;
6070 		}
6071 
6072 		/*
6073 		 * In order to recognize C step the driver should read the
6074 		 * chip version id located at the AUX bus MISC address.
6075 		 */
6076 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6077 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6078 		DELAY(2);
6079 
6080 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6081 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6082 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6083 				   25000);
6084 		if (!ret) {
6085 			device_printf(sc->sc_dev,
6086 			    "Failed to wake up the nic\n");
6087 			goto fail;
6088 		}
6089 
6090 		if (iwm_nic_lock(sc)) {
6091 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6092 			hw_step |= IWM_ENABLE_WFPM;
6093 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6094 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6095 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6096 			if (hw_step == 0x3)
6097 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6098 						(IWM_SILICON_C_STEP << 2);
6099 			iwm_nic_unlock(sc);
6100 		} else {
6101 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6102 			goto fail;
6103 		}
6104 	}
6105 
6106 	/* special-case 7265D, it has the same PCI IDs. */
6107 	if (sc->cfg == &iwm7265_cfg &&
6108 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6109 		sc->cfg = &iwm7265d_cfg;
6110 	}
6111 
6112 	/* Allocate DMA memory for firmware transfers. */
6113 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6114 		device_printf(dev, "could not allocate memory for firmware\n");
6115 		goto fail;
6116 	}
6117 
6118 	/* Allocate "Keep Warm" page. */
6119 	if ((error = iwm_alloc_kw(sc)) != 0) {
6120 		device_printf(dev, "could not allocate keep warm page\n");
6121 		goto fail;
6122 	}
6123 
6124 	/* We use ICT interrupts */
6125 	if ((error = iwm_alloc_ict(sc)) != 0) {
6126 		device_printf(dev, "could not allocate ICT table\n");
6127 		goto fail;
6128 	}
6129 
6130 	/* Allocate TX scheduler "rings". */
6131 	if ((error = iwm_alloc_sched(sc)) != 0) {
6132 		device_printf(dev, "could not allocate TX scheduler rings\n");
6133 		goto fail;
6134 	}
6135 
6136 	/* Allocate TX rings */
6137 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6138 		if ((error = iwm_alloc_tx_ring(sc,
6139 		    &sc->txq[txq_i], txq_i)) != 0) {
6140 			device_printf(dev,
6141 			    "could not allocate TX ring %d\n",
6142 			    txq_i);
6143 			goto fail;
6144 		}
6145 	}
6146 
6147 	/* Allocate RX ring. */
6148 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6149 		device_printf(dev, "could not allocate RX ring\n");
6150 		goto fail;
6151 	}
6152 
6153 	/* Clear pending interrupts. */
6154 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6155 
6156 	ic->ic_softc = sc;
6157 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6158 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6159 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6160 
6161 	/* Set device capabilities. */
6162 	ic->ic_caps =
6163 	    IEEE80211_C_STA |
6164 	    IEEE80211_C_WPA |		/* WPA/RSN */
6165 	    IEEE80211_C_WME |
6166 	    IEEE80211_C_PMGT |
6167 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6168 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6169 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6170 	    ;
6171 	/* Advertise full-offload scanning */
6172 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6173 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6174 		sc->sc_phyctxt[i].id = i;
6175 		sc->sc_phyctxt[i].color = 0;
6176 		sc->sc_phyctxt[i].ref = 0;
6177 		sc->sc_phyctxt[i].channel = NULL;
6178 	}
6179 
6180 	/* Default noise floor */
6181 	sc->sc_noise = -96;
6182 
6183 	/* Max RSSI */
6184 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6185 
6186 #ifdef IWM_DEBUG
6187 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6188 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6189 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6190 #endif
6191 
6192 	error = iwm_read_firmware(sc);
6193 	if (error) {
6194 		goto fail;
6195 	} else if (sc->sc_fw.fw_fp == NULL) {
6196 		/*
6197 		 * XXX Add a solution for properly deferring firmware load
6198 		 *     during bootup.
6199 		 */
6200 		goto fail;
6201 	} else {
6202 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6203 		sc->sc_preinit_hook.ich_arg = sc;
6204 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6205 			device_printf(dev,
6206 			    "config_intrhook_establish failed\n");
6207 			goto fail;
6208 		}
6209 	}
6210 
6211 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6212 	    "<-%s\n", __func__);
6213 
6214 	return 0;
6215 
6216 	/* Free allocated memory if something failed during attachment. */
6217 fail:
6218 	iwm_detach_local(sc, 0);
6219 
6220 	return ENXIO;
6221 }
6222 
6223 static int
6224 iwm_is_valid_ether_addr(uint8_t *addr)
6225 {
6226 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6227 
6228 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6229 		return (FALSE);
6230 
6231 	return (TRUE);
6232 }
6233 
6234 static int
6235 iwm_wme_update(struct ieee80211com *ic)
6236 {
6237 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6238 	struct iwm_softc *sc = ic->ic_softc;
6239 	struct chanAccParams chp;
6240 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6241 	struct iwm_vap *ivp = IWM_VAP(vap);
6242 	struct iwm_node *in;
6243 	struct wmeParams tmp[WME_NUM_AC];
6244 	int aci, error;
6245 
6246 	if (vap == NULL)
6247 		return (0);
6248 
6249 	ieee80211_wme_ic_getparams(ic, &chp);
6250 
6251 	IEEE80211_LOCK(ic);
6252 	for (aci = 0; aci < WME_NUM_AC; aci++)
6253 		tmp[aci] = chp.cap_wmeParams[aci];
6254 	IEEE80211_UNLOCK(ic);
6255 
6256 	IWM_LOCK(sc);
6257 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6258 		const struct wmeParams *ac = &tmp[aci];
6259 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6260 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6261 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6262 		ivp->queue_params[aci].edca_txop =
6263 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6264 	}
6265 	ivp->have_wme = TRUE;
6266 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6267 		in = IWM_NODE(vap->iv_bss);
6268 		if (in->in_assoc) {
6269 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6270 				device_printf(sc->sc_dev,
6271 				    "%s: failed to update MAC\n", __func__);
6272 			}
6273 		}
6274 	}
6275 	IWM_UNLOCK(sc);
6276 
6277 	return (0);
6278 #undef IWM_EXP2
6279 }
6280 
6281 static void
6282 iwm_preinit(void *arg)
6283 {
6284 	struct iwm_softc *sc = arg;
6285 	device_t dev = sc->sc_dev;
6286 	struct ieee80211com *ic = &sc->sc_ic;
6287 	int error;
6288 
6289 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6290 	    "->%s\n", __func__);
6291 
6292 	IWM_LOCK(sc);
6293 	if ((error = iwm_start_hw(sc)) != 0) {
6294 		device_printf(dev, "could not initialize hardware\n");
6295 		IWM_UNLOCK(sc);
6296 		goto fail;
6297 	}
6298 
6299 	error = iwm_run_init_ucode(sc, 1);
6300 	iwm_stop_device(sc);
6301 	if (error) {
6302 		IWM_UNLOCK(sc);
6303 		goto fail;
6304 	}
6305 	device_printf(dev,
6306 	    "hw rev 0x%x, fw ver %s, address %s\n",
6307 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6308 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6309 
6310 	/* not all hardware can do 5GHz band */
6311 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6312 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6313 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6314 	IWM_UNLOCK(sc);
6315 
6316 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6317 	    ic->ic_channels);
6318 
6319 	/*
6320 	 * At this point we've committed - if we fail to do setup,
6321 	 * we now also have to tear down the net80211 state.
6322 	 */
6323 	ieee80211_ifattach(ic);
6324 	ic->ic_vap_create = iwm_vap_create;
6325 	ic->ic_vap_delete = iwm_vap_delete;
6326 	ic->ic_raw_xmit = iwm_raw_xmit;
6327 	ic->ic_node_alloc = iwm_node_alloc;
6328 	ic->ic_scan_start = iwm_scan_start;
6329 	ic->ic_scan_end = iwm_scan_end;
6330 	ic->ic_update_mcast = iwm_update_mcast;
6331 	ic->ic_getradiocaps = iwm_init_channel_map;
6332 	ic->ic_set_channel = iwm_set_channel;
6333 	ic->ic_scan_curchan = iwm_scan_curchan;
6334 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6335 	ic->ic_wme.wme_update = iwm_wme_update;
6336 	ic->ic_parent = iwm_parent;
6337 	ic->ic_transmit = iwm_transmit;
6338 	iwm_radiotap_attach(sc);
6339 	if (bootverbose)
6340 		ieee80211_announce(ic);
6341 
6342 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6343 	    "<-%s\n", __func__);
6344 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6345 
6346 	return;
6347 fail:
6348 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6349 	iwm_detach_local(sc, 0);
6350 }
6351 
6352 /*
6353  * Attach the interface to 802.11 radiotap.
6354  */
6355 static void
6356 iwm_radiotap_attach(struct iwm_softc *sc)
6357 {
6358         struct ieee80211com *ic = &sc->sc_ic;
6359 
6360 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6361 	    "->%s begin\n", __func__);
6362         ieee80211_radiotap_attach(ic,
6363             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6364                 IWM_TX_RADIOTAP_PRESENT,
6365             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6366                 IWM_RX_RADIOTAP_PRESENT);
6367 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6368 	    "->%s end\n", __func__);
6369 }
6370 
6371 static struct ieee80211vap *
6372 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6373     enum ieee80211_opmode opmode, int flags,
6374     const uint8_t bssid[IEEE80211_ADDR_LEN],
6375     const uint8_t mac[IEEE80211_ADDR_LEN])
6376 {
6377 	struct iwm_vap *ivp;
6378 	struct ieee80211vap *vap;
6379 
6380 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6381 		return NULL;
6382 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6383 	vap = &ivp->iv_vap;
6384 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6385 	vap->iv_bmissthreshold = 10;            /* override default */
6386 	/* Override with driver methods. */
6387 	ivp->iv_newstate = vap->iv_newstate;
6388 	vap->iv_newstate = iwm_newstate;
6389 
6390 	ivp->id = IWM_DEFAULT_MACID;
6391 	ivp->color = IWM_DEFAULT_COLOR;
6392 
6393 	ivp->have_wme = FALSE;
6394 	ivp->ps_disabled = FALSE;
6395 
6396 	ieee80211_ratectl_init(vap);
6397 	/* Complete setup. */
6398 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6399 	    mac);
6400 	ic->ic_opmode = opmode;
6401 
6402 	return vap;
6403 }
6404 
6405 static void
6406 iwm_vap_delete(struct ieee80211vap *vap)
6407 {
6408 	struct iwm_vap *ivp = IWM_VAP(vap);
6409 
6410 	ieee80211_ratectl_deinit(vap);
6411 	ieee80211_vap_detach(vap);
6412 	free(ivp, M_80211_VAP);
6413 }
6414 
6415 static void
6416 iwm_xmit_queue_drain(struct iwm_softc *sc)
6417 {
6418 	struct mbuf *m;
6419 	struct ieee80211_node *ni;
6420 
6421 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6422 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6423 		ieee80211_free_node(ni);
6424 		m_freem(m);
6425 	}
6426 }
6427 
6428 static void
6429 iwm_scan_start(struct ieee80211com *ic)
6430 {
6431 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6432 	struct iwm_softc *sc = ic->ic_softc;
6433 	int error;
6434 
6435 	IWM_LOCK(sc);
6436 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6437 		/* This should not be possible */
6438 		device_printf(sc->sc_dev,
6439 		    "%s: Previous scan not completed yet\n", __func__);
6440 	}
6441 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6442 		error = iwm_umac_scan(sc);
6443 	else
6444 		error = iwm_lmac_scan(sc);
6445 	if (error != 0) {
6446 		device_printf(sc->sc_dev, "could not initiate scan\n");
6447 		IWM_UNLOCK(sc);
6448 		ieee80211_cancel_scan(vap);
6449 	} else {
6450 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6451 		iwm_led_blink_start(sc);
6452 		IWM_UNLOCK(sc);
6453 	}
6454 }
6455 
6456 static void
6457 iwm_scan_end(struct ieee80211com *ic)
6458 {
6459 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6460 	struct iwm_softc *sc = ic->ic_softc;
6461 
6462 	IWM_LOCK(sc);
6463 	iwm_led_blink_stop(sc);
6464 	if (vap->iv_state == IEEE80211_S_RUN)
6465 		iwm_led_enable(sc);
6466 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6467 		/*
6468 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6469 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6470 		 * taskqueue.
6471 		 */
6472 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6473 		iwm_scan_stop_wait(sc);
6474 	}
6475 	IWM_UNLOCK(sc);
6476 
6477 	/*
6478 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6479 	 * This is to make sure that it won't call ieee80211_scan_done
6480 	 * when we have already started the next scan.
6481 	 */
6482 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6483 }
6484 
6485 static void
6486 iwm_update_mcast(struct ieee80211com *ic)
6487 {
6488 }
6489 
6490 static void
6491 iwm_set_channel(struct ieee80211com *ic)
6492 {
6493 }
6494 
6495 static void
6496 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6497 {
6498 }
6499 
6500 static void
6501 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6502 {
6503 }
6504 
6505 void
6506 iwm_init_task(void *arg1)
6507 {
6508 	struct iwm_softc *sc = arg1;
6509 
6510 	IWM_LOCK(sc);
6511 	while (sc->sc_flags & IWM_FLAG_BUSY)
6512 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6513 	sc->sc_flags |= IWM_FLAG_BUSY;
6514 	iwm_stop(sc);
6515 	if (sc->sc_ic.ic_nrunning > 0)
6516 		iwm_init(sc);
6517 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6518 	wakeup(&sc->sc_flags);
6519 	IWM_UNLOCK(sc);
6520 }
6521 
6522 static int
6523 iwm_resume(device_t dev)
6524 {
6525 	struct iwm_softc *sc = device_get_softc(dev);
6526 	int do_reinit = 0;
6527 
6528 	/*
6529 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6530 	 * PCI Tx retries from interfering with C3 CPU state.
6531 	 */
6532 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6533 
6534 	if (!sc->sc_attached)
6535 		return 0;
6536 
6537 	iwm_init_task(device_get_softc(dev));
6538 
6539 	IWM_LOCK(sc);
6540 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6541 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6542 		do_reinit = 1;
6543 	}
6544 	IWM_UNLOCK(sc);
6545 
6546 	if (do_reinit)
6547 		ieee80211_resume_all(&sc->sc_ic);
6548 
6549 	return 0;
6550 }
6551 
6552 static int
6553 iwm_suspend(device_t dev)
6554 {
6555 	int do_stop = 0;
6556 	struct iwm_softc *sc = device_get_softc(dev);
6557 
6558 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6559 
6560 	if (!sc->sc_attached)
6561 		return (0);
6562 
6563 	ieee80211_suspend_all(&sc->sc_ic);
6564 
6565 	if (do_stop) {
6566 		IWM_LOCK(sc);
6567 		iwm_stop(sc);
6568 		sc->sc_flags |= IWM_FLAG_SCANNING;
6569 		IWM_UNLOCK(sc);
6570 	}
6571 
6572 	return (0);
6573 }
6574 
6575 static int
6576 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6577 {
6578 	struct iwm_fw_info *fw = &sc->sc_fw;
6579 	device_t dev = sc->sc_dev;
6580 	int i;
6581 
6582 	if (!sc->sc_attached)
6583 		return 0;
6584 	sc->sc_attached = 0;
6585 	if (do_net80211) {
6586 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6587 	}
6588 	iwm_stop_device(sc);
6589 	if (do_net80211) {
6590 		IWM_LOCK(sc);
6591 		iwm_xmit_queue_drain(sc);
6592 		IWM_UNLOCK(sc);
6593 		ieee80211_ifdetach(&sc->sc_ic);
6594 	}
6595 	callout_drain(&sc->sc_led_blink_to);
6596 	callout_drain(&sc->sc_watchdog_to);
6597 
6598 	iwm_phy_db_free(sc->sc_phy_db);
6599 	sc->sc_phy_db = NULL;
6600 
6601 	iwm_free_nvm_data(sc->nvm_data);
6602 
6603 	/* Free descriptor rings */
6604 	iwm_free_rx_ring(sc, &sc->rxq);
6605 	for (i = 0; i < nitems(sc->txq); i++)
6606 		iwm_free_tx_ring(sc, &sc->txq[i]);
6607 
6608 	/* Free firmware */
6609 	if (fw->fw_fp != NULL)
6610 		iwm_fw_info_free(fw);
6611 
6612 	/* Free scheduler */
6613 	iwm_dma_contig_free(&sc->sched_dma);
6614 	iwm_dma_contig_free(&sc->ict_dma);
6615 	iwm_dma_contig_free(&sc->kw_dma);
6616 	iwm_dma_contig_free(&sc->fw_dma);
6617 
6618 	iwm_free_fw_paging(sc);
6619 
6620 	/* Finished with the hardware - detach things */
6621 	iwm_pci_detach(dev);
6622 
6623 	if (sc->sc_notif_wait != NULL) {
6624 		iwm_notification_wait_free(sc->sc_notif_wait);
6625 		sc->sc_notif_wait = NULL;
6626 	}
6627 
6628 	IWM_LOCK_DESTROY(sc);
6629 
6630 	return (0);
6631 }
6632 
6633 static int
6634 iwm_detach(device_t dev)
6635 {
6636 	struct iwm_softc *sc = device_get_softc(dev);
6637 
6638 	return (iwm_detach_local(sc, 1));
6639 }
6640 
6641 static device_method_t iwm_pci_methods[] = {
6642         /* Device interface */
6643         DEVMETHOD(device_probe,         iwm_probe),
6644         DEVMETHOD(device_attach,        iwm_attach),
6645         DEVMETHOD(device_detach,        iwm_detach),
6646         DEVMETHOD(device_suspend,       iwm_suspend),
6647         DEVMETHOD(device_resume,        iwm_resume),
6648 
6649         DEVMETHOD_END
6650 };
6651 
6652 static driver_t iwm_pci_driver = {
6653         "iwm",
6654         iwm_pci_methods,
6655         sizeof (struct iwm_softc)
6656 };
6657 
6658 static devclass_t iwm_devclass;
6659 
6660 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6661 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6662     iwm_devices, nitems(iwm_devices));
6663 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6664 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6665 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6666