xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358 static void	iwm_endscan_cb(void *, int);
359 static int	iwm_send_bt_init_conf(struct iwm_softc *);
360 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int	iwm_init_hw(struct iwm_softc *);
365 static void	iwm_init(struct iwm_softc *);
366 static void	iwm_start(struct iwm_softc *);
367 static void	iwm_stop(struct iwm_softc *);
368 static void	iwm_watchdog(void *);
369 static void	iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 		iwm_desc_lookup(uint32_t);
373 static void	iwm_nic_error(struct iwm_softc *);
374 static void	iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void	iwm_notif_intr(struct iwm_softc *);
378 static void	iwm_intr(void *);
379 static int	iwm_attach(device_t);
380 static int	iwm_is_valid_ether_addr(uint8_t *);
381 static void	iwm_preinit(void *);
382 static int	iwm_detach_local(struct iwm_softc *sc, int);
383 static void	iwm_init_task(void *);
384 static void	iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 		iwm_vap_create(struct ieee80211com *,
387 		               const char [IFNAMSIZ], int,
388 		               enum ieee80211_opmode, int,
389 		               const uint8_t [IEEE80211_ADDR_LEN],
390 		               const uint8_t [IEEE80211_ADDR_LEN]);
391 static void	iwm_vap_delete(struct ieee80211vap *);
392 static void	iwm_xmit_queue_drain(struct iwm_softc *);
393 static void	iwm_scan_start(struct ieee80211com *);
394 static void	iwm_scan_end(struct ieee80211com *);
395 static void	iwm_update_mcast(struct ieee80211com *);
396 static void	iwm_set_channel(struct ieee80211com *);
397 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
399 static int	iwm_detach(device_t);
400 
401 static int	iwm_lar_disable = 0;
402 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403 
404 /*
405  * Firmware parser.
406  */
407 
408 static int
409 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410 {
411 	const struct iwm_fw_cscheme_list *l = (const void *)data;
412 
413 	if (dlen < sizeof(*l) ||
414 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 		return EINVAL;
416 
417 	/* we don't actually store anything for now, always use s/w crypto */
418 
419 	return 0;
420 }
421 
422 static int
423 iwm_firmware_store_section(struct iwm_softc *sc,
424     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425 {
426 	struct iwm_fw_img *fws;
427 	struct iwm_fw_desc *fwone;
428 
429 	if (type >= IWM_UCODE_TYPE_MAX)
430 		return EINVAL;
431 	if (dlen < sizeof(uint32_t))
432 		return EINVAL;
433 
434 	fws = &sc->sc_fw.img[type];
435 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
436 		return EINVAL;
437 
438 	fwone = &fws->sec[fws->fw_count];
439 
440 	/* first 32bit are device load offset */
441 	memcpy(&fwone->offset, data, sizeof(uint32_t));
442 
443 	/* rest is data */
444 	fwone->data = data + sizeof(uint32_t);
445 	fwone->len = dlen - sizeof(uint32_t);
446 
447 	fws->fw_count++;
448 
449 	return 0;
450 }
451 
452 #define IWM_DEFAULT_SCAN_CHANNELS 40
453 
454 /* iwlwifi: iwl-drv.c */
455 struct iwm_tlv_calib_data {
456 	uint32_t ucode_type;
457 	struct iwm_tlv_calib_ctrl calib;
458 } __packed;
459 
460 static int
461 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462 {
463 	const struct iwm_tlv_calib_data *def_calib = data;
464 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
465 
466 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
467 		device_printf(sc->sc_dev,
468 		    "Wrong ucode_type %u for default "
469 		    "calibration.\n", ucode_type);
470 		return EINVAL;
471 	}
472 
473 	sc->sc_default_calib[ucode_type].flow_trigger =
474 	    def_calib->calib.flow_trigger;
475 	sc->sc_default_calib[ucode_type].event_trigger =
476 	    def_calib->calib.event_trigger;
477 
478 	return 0;
479 }
480 
481 static int
482 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 			struct iwm_ucode_capabilities *capa)
484 {
485 	const struct iwm_ucode_api *ucode_api = (const void *)data;
486 	uint32_t api_index = le32toh(ucode_api->api_index);
487 	uint32_t api_flags = le32toh(ucode_api->api_flags);
488 	int i;
489 
490 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 		device_printf(sc->sc_dev,
492 		    "api flags index %d larger than supported by driver\n",
493 		    api_index);
494 		/* don't return an error so we can load FW that has more bits */
495 		return 0;
496 	}
497 
498 	for (i = 0; i < 32; i++) {
499 		if (api_flags & (1U << i))
500 			setbit(capa->enabled_api, i + 32 * api_index);
501 	}
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 			   struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_capa->api_index);
512 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "capa flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_capa, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static void
532 iwm_fw_info_free(struct iwm_fw_info *fw)
533 {
534 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 	fw->fw_fp = NULL;
536 	memset(fw->img, 0, sizeof(fw->img));
537 }
538 
539 static int
540 iwm_read_firmware(struct iwm_softc *sc)
541 {
542 	struct iwm_fw_info *fw = &sc->sc_fw;
543 	const struct iwm_tlv_ucode_header *uhdr;
544 	const struct iwm_ucode_tlv *tlv;
545 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 	enum iwm_ucode_tlv_type tlv_type;
547 	const struct firmware *fwp;
548 	const uint8_t *data;
549 	uint32_t tlv_len;
550 	uint32_t usniffer_img;
551 	const uint8_t *tlv_data;
552 	uint32_t paging_mem_size;
553 	int num_of_cpus;
554 	int error = 0;
555 	size_t len;
556 
557 	/*
558 	 * Load firmware into driver memory.
559 	 * fw_fp will be set.
560 	 */
561 	fwp = firmware_get(sc->cfg->fw_name);
562 	if (fwp == NULL) {
563 		device_printf(sc->sc_dev,
564 		    "could not read firmware %s (error %d)\n",
565 		    sc->cfg->fw_name, error);
566 		goto out;
567 	}
568 	fw->fw_fp = fwp;
569 
570 	/* (Re-)Initialize default values. */
571 	capa->flags = 0;
572 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
573 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
574 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577 
578 	/*
579 	 * Parse firmware contents
580 	 */
581 
582 	uhdr = (const void *)fw->fw_fp->data;
583 	if (*(const uint32_t *)fw->fw_fp->data != 0
584 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 		device_printf(sc->sc_dev, "invalid firmware %s\n",
586 		    sc->cfg->fw_name);
587 		error = EINVAL;
588 		goto out;
589 	}
590 
591 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_API(le32toh(uhdr->ver)));
595 	data = uhdr->data;
596 	len = fw->fw_fp->datasize - sizeof(*uhdr);
597 
598 	while (len >= sizeof(*tlv)) {
599 		len -= sizeof(*tlv);
600 		tlv = (const void *)data;
601 
602 		tlv_len = le32toh(tlv->length);
603 		tlv_type = le32toh(tlv->type);
604 		tlv_data = tlv->data;
605 
606 		if (len < tlv_len) {
607 			device_printf(sc->sc_dev,
608 			    "firmware too short: %zu bytes\n",
609 			    len);
610 			error = EINVAL;
611 			goto parse_out;
612 		}
613 		len -= roundup2(tlv_len, 4);
614 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
615 
616 		switch ((int)tlv_type) {
617 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
618 			if (tlv_len != sizeof(uint32_t)) {
619 				device_printf(sc->sc_dev,
620 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 				    __func__, tlv_len);
622 				error = EINVAL;
623 				goto parse_out;
624 			}
625 			capa->max_probe_length =
626 			    le32_to_cpup((const uint32_t *)tlv_data);
627 			/* limit it to something sensible */
628 			if (capa->max_probe_length >
629 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
630 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 				    "ridiculous\n", __func__);
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			break;
637 		case IWM_UCODE_TLV_PAN:
638 			if (tlv_len) {
639 				device_printf(sc->sc_dev,
640 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 				    __func__, tlv_len);
642 				error = EINVAL;
643 				goto parse_out;
644 			}
645 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
646 			break;
647 		case IWM_UCODE_TLV_FLAGS:
648 			if (tlv_len < sizeof(uint32_t)) {
649 				device_printf(sc->sc_dev,
650 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 				    __func__, tlv_len);
652 				error = EINVAL;
653 				goto parse_out;
654 			}
655 			if (tlv_len % sizeof(uint32_t)) {
656 				device_printf(sc->sc_dev,
657 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 				    __func__, tlv_len);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			/*
663 			 * Apparently there can be many flags, but Linux driver
664 			 * parses only the first one, and so do we.
665 			 *
666 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 			 * Intentional or a bug?  Observations from
668 			 * current firmware file:
669 			 *  1) TLV_PAN is parsed first
670 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
671 			 * ==> this resets TLV_PAN to itself... hnnnk
672 			 */
673 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 			break;
675 		case IWM_UCODE_TLV_CSCHEME:
676 			if ((error = iwm_store_cscheme(sc,
677 			    tlv_data, tlv_len)) != 0) {
678 				device_printf(sc->sc_dev,
679 				    "%s: iwm_store_cscheme(): returned %d\n",
680 				    __func__, error);
681 				goto parse_out;
682 			}
683 			break;
684 		case IWM_UCODE_TLV_NUM_OF_CPU:
685 			if (tlv_len != sizeof(uint32_t)) {
686 				device_printf(sc->sc_dev,
687 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 				    __func__, tlv_len);
689 				error = EINVAL;
690 				goto parse_out;
691 			}
692 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 			if (num_of_cpus == 2) {
694 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
695 					TRUE;
696 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
697 					TRUE;
698 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
699 					TRUE;
700 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 				device_printf(sc->sc_dev,
702 				    "%s: Driver supports only 1 or 2 CPUs\n",
703 				    __func__);
704 				error = EINVAL;
705 				goto parse_out;
706 			}
707 			break;
708 		case IWM_UCODE_TLV_SEC_RT:
709 			if ((error = iwm_firmware_store_section(sc,
710 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 				    __func__, error);
714 				goto parse_out;
715 			}
716 			break;
717 		case IWM_UCODE_TLV_SEC_INIT:
718 			if ((error = iwm_firmware_store_section(sc,
719 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 				device_printf(sc->sc_dev,
721 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 				    __func__, error);
723 				goto parse_out;
724 			}
725 			break;
726 		case IWM_UCODE_TLV_SEC_WOWLAN:
727 			if ((error = iwm_firmware_store_section(sc,
728 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 				device_printf(sc->sc_dev,
730 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 				    __func__, error);
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_DEF_CALIB:
736 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 				    __func__, tlv_len,
740 				    sizeof(struct iwm_tlv_calib_data));
741 				error = EINVAL;
742 				goto parse_out;
743 			}
744 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: iwm_set_default_calib() failed: %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_PHY_SKU:
752 			if (tlv_len != sizeof(uint32_t)) {
753 				error = EINVAL;
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 				    __func__, tlv_len);
757 				goto parse_out;
758 			}
759 			sc->sc_fw.phy_config =
760 			    le32_to_cpup((const uint32_t *)tlv_data);
761 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
762 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
763 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
764 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
765 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
766 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
767 			break;
768 
769 		case IWM_UCODE_TLV_API_CHANGES_SET: {
770 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 				error = EINVAL;
772 				goto parse_out;
773 			}
774 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 				error = EINVAL;
776 				goto parse_out;
777 			}
778 			break;
779 		}
780 
781 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
782 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 				error = EINVAL;
784 				goto parse_out;
785 			}
786 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 				error = EINVAL;
788 				goto parse_out;
789 			}
790 			break;
791 		}
792 
793 		case IWM_UCODE_TLV_CMD_VERSIONS:
794 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
795 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
796 			/* ignore, not used by current driver */
797 			break;
798 
799 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
800 			if ((error = iwm_firmware_store_section(sc,
801 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
802 			    tlv_len)) != 0)
803 				goto parse_out;
804 			break;
805 
806 		case IWM_UCODE_TLV_PAGING:
807 			if (tlv_len != sizeof(uint32_t)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812 
813 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 			    "%s: Paging: paging enabled (size = %u bytes)\n",
815 			    __func__, paging_mem_size);
816 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 				device_printf(sc->sc_dev,
818 					"%s: Paging: driver supports up to %u bytes for paging image\n",
819 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 				error = EINVAL;
821 				goto out;
822 			}
823 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 				device_printf(sc->sc_dev,
825 				    "%s: Paging: image isn't multiple %u\n",
826 				    __func__, IWM_FW_PAGING_SIZE);
827 				error = EINVAL;
828 				goto out;
829 			}
830 
831 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
832 			    paging_mem_size;
833 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 			sc->sc_fw.img[usniffer_img].paging_mem_size =
835 			    paging_mem_size;
836 			break;
837 
838 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
839 			if (tlv_len != sizeof(uint32_t)) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			capa->n_scan_channels =
844 			    le32_to_cpup((const uint32_t *)tlv_data);
845 			break;
846 
847 		case IWM_UCODE_TLV_FW_VERSION:
848 			if (tlv_len != sizeof(uint32_t) * 3) {
849 				error = EINVAL;
850 				goto parse_out;
851 			}
852 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 			    "%u.%u.%u",
854 			    le32toh(((const uint32_t *)tlv_data)[0]),
855 			    le32toh(((const uint32_t *)tlv_data)[1]),
856 			    le32toh(((const uint32_t *)tlv_data)[2]));
857 			break;
858 
859 		case IWM_UCODE_TLV_FW_MEM_SEG:
860 			break;
861 
862 		default:
863 			device_printf(sc->sc_dev,
864 			    "%s: unknown firmware section %d, abort\n",
865 			    __func__, tlv_type);
866 			error = EINVAL;
867 			goto parse_out;
868 		}
869 	}
870 
871 	KASSERT(error == 0, ("unhandled error"));
872 
873  parse_out:
874 	if (error) {
875 		device_printf(sc->sc_dev, "firmware parse error %d, "
876 		    "section type %d\n", error, tlv_type);
877 	}
878 
879  out:
880 	if (error) {
881 		if (fw->fw_fp != NULL)
882 			iwm_fw_info_free(fw);
883 	}
884 
885 	return error;
886 }
887 
888 /*
889  * DMA resource routines
890  */
891 
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896 	/* Must be aligned on a 16-byte boundary. */
897 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
899 }
900 
901 /* tx scheduler rings.  not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905 	/* TX scheduler rings must be aligned on a 1KB boundary. */
906 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909 
910 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916 
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924 
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928 	bus_size_t size;
929 	size_t descsz;
930 	int count, i, error;
931 
932 	ring->cur = 0;
933 	if (sc->cfg->mqrx_supported) {
934 		count = IWM_RX_MQ_RING_COUNT;
935 		descsz = sizeof(uint64_t);
936 	} else {
937 		count = IWM_RX_LEGACY_RING_COUNT;
938 		descsz = sizeof(uint32_t);
939 	}
940 
941 	/* Allocate RX descriptors (256-byte aligned). */
942 	size = count * descsz;
943 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 	    256);
945 	if (error != 0) {
946 		device_printf(sc->sc_dev,
947 		    "could not allocate RX ring DMA memory\n");
948 		goto fail;
949 	}
950 	ring->desc = ring->free_desc_dma.vaddr;
951 
952 	/* Allocate RX status area (16-byte aligned). */
953 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 	    sizeof(*ring->stat), 16);
955 	if (error != 0) {
956 		device_printf(sc->sc_dev,
957 		    "could not allocate RX status DMA memory\n");
958 		goto fail;
959 	}
960 	ring->stat = ring->stat_dma.vaddr;
961 
962 	if (sc->cfg->mqrx_supported) {
963 		size = count * sizeof(uint32_t);
964 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 		    size, 256);
966 		if (error != 0) {
967 			device_printf(sc->sc_dev,
968 			    "could not allocate RX ring DMA memory\n");
969 			goto fail;
970 		}
971 	}
972 
973         /* Create RX buffer DMA tag. */
974         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977         if (error != 0) {
978                 device_printf(sc->sc_dev,
979                     "%s: could not create RX buf DMA tag, error %d\n",
980                     __func__, error);
981                 goto fail;
982         }
983 
984 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 	if (error != 0) {
987 		device_printf(sc->sc_dev,
988 		    "%s: could not create RX buf DMA map, error %d\n",
989 		    __func__, error);
990 		goto fail;
991 	}
992 
993 	/*
994 	 * Allocate and map RX buffers.
995 	 */
996 	for (i = 0; i < count; i++) {
997 		struct iwm_rx_data *data = &ring->data[i];
998 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 		if (error != 0) {
1000 			device_printf(sc->sc_dev,
1001 			    "%s: could not create RX buf DMA map, error %d\n",
1002 			    __func__, error);
1003 			goto fail;
1004 		}
1005 		data->m = NULL;
1006 
1007 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 			goto fail;
1009 		}
1010 	}
1011 	return 0;
1012 
1013 fail:	iwm_free_rx_ring(sc, ring);
1014 	return error;
1015 }
1016 
1017 static void
1018 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019 {
1020 	/* Reset the ring state */
1021 	ring->cur = 0;
1022 
1023 	/*
1024 	 * The hw rx ring index in shared memory must also be cleared,
1025 	 * otherwise the discrepancy can cause reprocessing chaos.
1026 	 */
1027 	if (sc->rxq.stat)
1028 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029 }
1030 
1031 static void
1032 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033 {
1034 	int count, i;
1035 
1036 	iwm_dma_contig_free(&ring->free_desc_dma);
1037 	iwm_dma_contig_free(&ring->stat_dma);
1038 	iwm_dma_contig_free(&ring->used_desc_dma);
1039 
1040 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1041 	    IWM_RX_LEGACY_RING_COUNT;
1042 
1043 	for (i = 0; i < count; i++) {
1044 		struct iwm_rx_data *data = &ring->data[i];
1045 
1046 		if (data->m != NULL) {
1047 			bus_dmamap_sync(ring->data_dmat, data->map,
1048 			    BUS_DMASYNC_POSTREAD);
1049 			bus_dmamap_unload(ring->data_dmat, data->map);
1050 			m_freem(data->m);
1051 			data->m = NULL;
1052 		}
1053 		if (data->map != NULL) {
1054 			bus_dmamap_destroy(ring->data_dmat, data->map);
1055 			data->map = NULL;
1056 		}
1057 	}
1058 	if (ring->spare_map != NULL) {
1059 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 		ring->spare_map = NULL;
1061 	}
1062 	if (ring->data_dmat != NULL) {
1063 		bus_dma_tag_destroy(ring->data_dmat);
1064 		ring->data_dmat = NULL;
1065 	}
1066 }
1067 
1068 static int
1069 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070 {
1071 	bus_addr_t paddr;
1072 	bus_size_t size;
1073 	size_t maxsize;
1074 	int nsegments;
1075 	int i, error;
1076 
1077 	ring->qid = qid;
1078 	ring->queued = 0;
1079 	ring->cur = 0;
1080 
1081 	/* Allocate TX descriptors (256-byte aligned). */
1082 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 	if (error != 0) {
1085 		device_printf(sc->sc_dev,
1086 		    "could not allocate TX ring DMA memory\n");
1087 		goto fail;
1088 	}
1089 	ring->desc = ring->desc_dma.vaddr;
1090 
1091 	/*
1092 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 	 * to allocate commands space for other rings.
1094 	 */
1095 	if (qid > IWM_CMD_QUEUE)
1096 		return 0;
1097 
1098 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 	if (error != 0) {
1101 		device_printf(sc->sc_dev,
1102 		    "could not allocate TX cmd DMA memory\n");
1103 		goto fail;
1104 	}
1105 	ring->cmd = ring->cmd_dma.vaddr;
1106 
1107 	/* FW commands may require more mapped space than packets. */
1108 	if (qid == IWM_CMD_QUEUE) {
1109 		maxsize = IWM_RBUF_SIZE;
1110 		nsegments = 1;
1111 	} else {
1112 		maxsize = MCLBYTES;
1113 		nsegments = IWM_MAX_SCATTER - 2;
1114 	}
1115 
1116 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 	if (error != 0) {
1120 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 		goto fail;
1122 	}
1123 
1124 	paddr = ring->cmd_dma.paddr;
1125 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 		struct iwm_tx_data *data = &ring->data[i];
1127 
1128 		data->cmd_paddr = paddr;
1129 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 		    + offsetof(struct iwm_tx_cmd, scratch);
1131 		paddr += sizeof(struct iwm_device_cmd);
1132 
1133 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 		if (error != 0) {
1135 			device_printf(sc->sc_dev,
1136 			    "could not create TX buf DMA map\n");
1137 			goto fail;
1138 		}
1139 	}
1140 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 	    ("invalid physical address"));
1142 	return 0;
1143 
1144 fail:	iwm_free_tx_ring(sc, ring);
1145 	return error;
1146 }
1147 
1148 static void
1149 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150 {
1151 	int i;
1152 
1153 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 		struct iwm_tx_data *data = &ring->data[i];
1155 
1156 		if (data->m != NULL) {
1157 			bus_dmamap_sync(ring->data_dmat, data->map,
1158 			    BUS_DMASYNC_POSTWRITE);
1159 			bus_dmamap_unload(ring->data_dmat, data->map);
1160 			m_freem(data->m);
1161 			data->m = NULL;
1162 		}
1163 	}
1164 	/* Clear TX descriptors. */
1165 	memset(ring->desc, 0, ring->desc_dma.size);
1166 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 	    BUS_DMASYNC_PREWRITE);
1168 	sc->qfullmsk &= ~(1 << ring->qid);
1169 	ring->queued = 0;
1170 	ring->cur = 0;
1171 
1172 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1173 		iwm_pcie_clear_cmd_in_flight(sc);
1174 }
1175 
1176 static void
1177 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179 	int i;
1180 
1181 	iwm_dma_contig_free(&ring->desc_dma);
1182 	iwm_dma_contig_free(&ring->cmd_dma);
1183 
1184 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 		struct iwm_tx_data *data = &ring->data[i];
1186 
1187 		if (data->m != NULL) {
1188 			bus_dmamap_sync(ring->data_dmat, data->map,
1189 			    BUS_DMASYNC_POSTWRITE);
1190 			bus_dmamap_unload(ring->data_dmat, data->map);
1191 			m_freem(data->m);
1192 			data->m = NULL;
1193 		}
1194 		if (data->map != NULL) {
1195 			bus_dmamap_destroy(ring->data_dmat, data->map);
1196 			data->map = NULL;
1197 		}
1198 	}
1199 	if (ring->data_dmat != NULL) {
1200 		bus_dma_tag_destroy(ring->data_dmat);
1201 		ring->data_dmat = NULL;
1202 	}
1203 }
1204 
1205 /*
1206  * High-level hardware frobbing routines
1207  */
1208 
1209 static void
1210 iwm_enable_interrupts(struct iwm_softc *sc)
1211 {
1212 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1213 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1214 }
1215 
1216 static void
1217 iwm_restore_interrupts(struct iwm_softc *sc)
1218 {
1219 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221 
1222 static void
1223 iwm_disable_interrupts(struct iwm_softc *sc)
1224 {
1225 	/* disable interrupts */
1226 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1227 
1228 	/* acknowledge all interrupts */
1229 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1230 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1231 }
1232 
1233 static void
1234 iwm_ict_reset(struct iwm_softc *sc)
1235 {
1236 	iwm_disable_interrupts(sc);
1237 
1238 	/* Reset ICT table. */
1239 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 	sc->ict_cur = 0;
1241 
1242 	/* Set physical address of ICT table (4KB aligned). */
1243 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1244 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1245 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1247 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1248 
1249 	/* Switch to ICT interrupt mode in driver. */
1250 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1251 
1252 	/* Re-enable interrupts. */
1253 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1254 	iwm_enable_interrupts(sc);
1255 }
1256 
1257 /* iwlwifi pcie/trans.c */
1258 
1259 /*
1260  * Since this .. hard-resets things, it's time to actually
1261  * mark the first vap (if any) as having no mac context.
1262  * It's annoying, but since the driver is potentially being
1263  * stop/start'ed whilst active (thanks openbsd port!) we
1264  * have to correctly track this.
1265  */
1266 static void
1267 iwm_stop_device(struct iwm_softc *sc)
1268 {
1269 	struct ieee80211com *ic = &sc->sc_ic;
1270 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 	int chnl, qid;
1272 	uint32_t mask = 0;
1273 
1274 	/* tell the device to stop sending interrupts */
1275 	iwm_disable_interrupts(sc);
1276 
1277 	/*
1278 	 * FreeBSD-local: mark the first vap as not-uploaded,
1279 	 * so the next transition through auth/assoc
1280 	 * will correctly populate the MAC context.
1281 	 */
1282 	if (vap) {
1283 		struct iwm_vap *iv = IWM_VAP(vap);
1284 		iv->phy_ctxt = NULL;
1285 		iv->is_uploaded = 0;
1286 	}
1287 	sc->sc_firmware_state = 0;
1288 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289 
1290 	/* device going down, Stop using ICT table */
1291 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292 
1293 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1294 
1295 	if (iwm_nic_lock(sc)) {
1296 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1297 
1298 		/* Stop each Tx DMA channel */
1299 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 			IWM_WRITE(sc,
1301 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1302 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1303 		}
1304 
1305 		/* Wait for DMA channels to be idle */
1306 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 		    5000)) {
1308 			device_printf(sc->sc_dev,
1309 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1310 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1311 		}
1312 		iwm_nic_unlock(sc);
1313 	}
1314 	iwm_pcie_rx_stop(sc);
1315 
1316 	/* Stop RX ring. */
1317 	iwm_reset_rx_ring(sc, &sc->rxq);
1318 
1319 	/* Reset all TX rings. */
1320 	for (qid = 0; qid < nitems(sc->txq); qid++)
1321 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322 
1323 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1324 		/* Power-down device's busmaster DMA clocks */
1325 		if (iwm_nic_lock(sc)) {
1326 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1327 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1328 			iwm_nic_unlock(sc);
1329 		}
1330 		DELAY(5);
1331 	}
1332 
1333 	/* Make sure (redundant) we've released our request to stay awake */
1334 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336 
1337 	/* Stop the device, and put it in low power state */
1338 	iwm_apm_stop(sc);
1339 
1340 	/* stop and reset the on-board processor */
1341 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1342 	DELAY(5000);
1343 
1344 	/*
1345 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 	 */
1347 	iwm_disable_interrupts(sc);
1348 
1349 	/*
1350 	 * Even if we stop the HW, we still want the RF kill
1351 	 * interrupt
1352 	 */
1353 	iwm_enable_rfkill_int(sc);
1354 	iwm_check_rfkill(sc);
1355 
1356 	iwm_prepare_card_hw(sc);
1357 }
1358 
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_nic_config(struct iwm_softc *sc)
1362 {
1363 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 	uint32_t reg_val = 0;
1365 	uint32_t phy_config = iwm_get_phy_config(sc);
1366 
1367 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1368 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1369 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1370 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1371 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1372 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1373 
1374 	/* SKU control */
1375 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1377 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1378 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1379 
1380 	/* radio configuration */
1381 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384 
1385 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1386 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1393 	    reg_val);
1394 
1395 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 	    radio_cfg_step, radio_cfg_dash);
1398 
1399 	/*
1400 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 	 * to lose ownership and not being able to obtain it back.
1403 	 */
1404 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1405 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1406 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1407 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1408 	}
1409 }
1410 
1411 static int
1412 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1413 {
1414 	int enabled;
1415 
1416 	if (!iwm_nic_lock(sc))
1417 		return EBUSY;
1418 
1419 	/* Stop RX DMA. */
1420 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1421 	/* Disable RX used and free queue operation. */
1422 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1423 
1424 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1425 	    sc->rxq.free_desc_dma.paddr);
1426 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1427 	    sc->rxq.used_desc_dma.paddr);
1428 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1429 	    sc->rxq.stat_dma.paddr);
1430 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1433 
1434 	/* We configure only queue 0 for now. */
1435 	enabled = ((1 << 0) << 16) | (1 << 0);
1436 
1437 	/* Enable RX DMA, 4KB buffer size. */
1438 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1439 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1440 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1441 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1442 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1443 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1444 
1445 	/* Enable RX DMA snooping. */
1446 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1447 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1448 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1449 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1450 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1451 
1452 	/* Enable the configured queue(s). */
1453 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1458 
1459 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1466 {
1467 
1468 	/* Stop Rx DMA */
1469 	iwm_pcie_rx_stop(sc);
1470 
1471 	if (!iwm_nic_lock(sc))
1472 		return EBUSY;
1473 
1474 	/* reset and flush pointers */
1475 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1477 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1479 
1480 	/* Set physical address of RX ring (256-byte aligned). */
1481 	IWM_WRITE(sc,
1482 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1483 	    sc->rxq.free_desc_dma.paddr >> 8);
1484 
1485 	/* Set physical address of RX status (16-byte aligned). */
1486 	IWM_WRITE(sc,
1487 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1488 
1489 	/* Enable Rx DMA
1490 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 	 *      the credit mechanism in 5000 HW RX FIFO
1493 	 * Direct rx interrupts to hosts
1494 	 * Rx buffer size 4 or 8k or 12k
1495 	 * RB timeout 0x10
1496 	 * 256 RBDs
1497 	 */
1498 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1500 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1501 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1502 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1503 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505 
1506 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507 
1508 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 	if (sc->cfg->host_interrupt_operation_mode)
1510 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1511 
1512 	iwm_nic_unlock(sc);
1513 
1514 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1515 
1516 	return 0;
1517 }
1518 
1519 static int
1520 iwm_nic_rx_init(struct iwm_softc *sc)
1521 {
1522 	if (sc->cfg->mqrx_supported)
1523 		return iwm_nic_rx_mq_init(sc);
1524 	else
1525 		return iwm_nic_rx_legacy_init(sc);
1526 }
1527 
1528 static int
1529 iwm_nic_tx_init(struct iwm_softc *sc)
1530 {
1531 	int qid;
1532 
1533 	if (!iwm_nic_lock(sc))
1534 		return EBUSY;
1535 
1536 	/* Deactivate TX scheduler. */
1537 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1538 
1539 	/* Set physical address of "keep warm" page (16-byte aligned). */
1540 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1541 
1542 	/* Initialize TX rings. */
1543 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 		struct iwm_tx_ring *txq = &sc->txq[qid];
1545 
1546 		/* Set physical address of TX ring (256-byte aligned). */
1547 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1548 		    txq->desc_dma.paddr >> 8);
1549 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1551 		    __func__,
1552 		    qid, txq->desc,
1553 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1554 	}
1555 
1556 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1557 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1558 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1559 
1560 	iwm_nic_unlock(sc);
1561 
1562 	return 0;
1563 }
1564 
1565 static int
1566 iwm_nic_init(struct iwm_softc *sc)
1567 {
1568 	int error;
1569 
1570 	iwm_apm_init(sc);
1571 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1572 		iwm_set_pwr(sc);
1573 
1574 	iwm_nic_config(sc);
1575 
1576 	if ((error = iwm_nic_rx_init(sc)) != 0)
1577 		return error;
1578 
1579 	/*
1580 	 * Ditto for TX, from iwn
1581 	 */
1582 	if ((error = iwm_nic_tx_init(sc)) != 0)
1583 		return error;
1584 
1585 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 	    "%s: shadow registers enabled\n", __func__);
1587 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1588 
1589 	return 0;
1590 }
1591 
1592 int
1593 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594 {
1595 	int qmsk;
1596 
1597 	qmsk = 1 << qid;
1598 
1599 	if (!iwm_nic_lock(sc)) {
1600 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 		    __func__, qid);
1602 		return EBUSY;
1603 	}
1604 
1605 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606 
1607 	if (qid == IWM_CMD_QUEUE) {
1608 		/* Disable the scheduler. */
1609 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1610 
1611 		/* Stop the TX queue prior to configuration. */
1612 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1613 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1614 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1615 
1616 		iwm_nic_unlock(sc);
1617 
1618 		/* Disable aggregations for this queue. */
1619 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1620 
1621 		if (!iwm_nic_lock(sc)) {
1622 			device_printf(sc->sc_dev,
1623 			    "%s: cannot enable txq %d\n", __func__, qid);
1624 			return EBUSY;
1625 		}
1626 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1627 		iwm_nic_unlock(sc);
1628 
1629 		iwm_write_mem32(sc,
1630 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 		/* Set scheduler window size and frame limit. */
1632 		iwm_write_mem32(sc,
1633 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634 		    sizeof(uint32_t),
1635 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639 
1640 		if (!iwm_nic_lock(sc)) {
1641 			device_printf(sc->sc_dev,
1642 			    "%s: cannot enable txq %d\n", __func__, qid);
1643 			return EBUSY;
1644 		}
1645 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1646 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1647 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1648 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1649 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1650 
1651 		/* Enable the scheduler for this queue. */
1652 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1653 	} else {
1654 		struct iwm_scd_txq_cfg_cmd cmd;
1655 		int error;
1656 
1657 		iwm_nic_unlock(sc);
1658 
1659 		memset(&cmd, 0, sizeof(cmd));
1660 		cmd.scd_queue = qid;
1661 		cmd.enable = 1;
1662 		cmd.sta_id = sta_id;
1663 		cmd.tx_fifo = fifo;
1664 		cmd.aggregate = 0;
1665 		cmd.window = IWM_FRAME_LIMIT;
1666 
1667 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1668 		    sizeof(cmd), &cmd);
1669 		if (error) {
1670 			device_printf(sc->sc_dev,
1671 			    "cannot enable txq %d\n", qid);
1672 			return error;
1673 		}
1674 
1675 		if (!iwm_nic_lock(sc))
1676 			return EBUSY;
1677 	}
1678 
1679 	iwm_nic_unlock(sc);
1680 
1681 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 	    __func__, qid, fifo);
1683 
1684 	return 0;
1685 }
1686 
1687 static int
1688 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689 {
1690 	int error, chnl;
1691 
1692 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694 
1695 	if (!iwm_nic_lock(sc))
1696 		return EBUSY;
1697 
1698 	iwm_ict_reset(sc);
1699 
1700 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1701 	if (scd_base_addr != 0 &&
1702 	    scd_base_addr != sc->scd_base_addr) {
1703 		device_printf(sc->sc_dev,
1704 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 		    __func__, sc->scd_base_addr, scd_base_addr);
1706 	}
1707 
1708 	iwm_nic_unlock(sc);
1709 
1710 	/* reset context data, TX status and translation data */
1711 	error = iwm_write_mem(sc,
1712 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1713 	    NULL, clear_dwords);
1714 	if (error)
1715 		return EBUSY;
1716 
1717 	if (!iwm_nic_lock(sc))
1718 		return EBUSY;
1719 
1720 	/* Set physical address of TX scheduler rings (1KB aligned). */
1721 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1722 
1723 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1724 
1725 	iwm_nic_unlock(sc);
1726 
1727 	/* enable command channel */
1728 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 	if (error)
1730 		return error;
1731 
1732 	if (!iwm_nic_lock(sc))
1733 		return EBUSY;
1734 
1735 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736 
1737 	/* Enable DMA channels. */
1738 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1739 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1740 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1742 	}
1743 
1744 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1745 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1746 
1747 	iwm_nic_unlock(sc);
1748 
1749 	/* Enable L1-Active */
1750 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1751 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1752 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1753 	}
1754 
1755 	return error;
1756 }
1757 
1758 /*
1759  * NVM read access and content parsing.  We do not support
1760  * external NVM or writing NVM.
1761  * iwlwifi/mvm/nvm.c
1762  */
1763 
1764 /* Default NVM size to read */
1765 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1766 
1767 #define IWM_NVM_WRITE_OPCODE 1
1768 #define IWM_NVM_READ_OPCODE 0
1769 
1770 /* load nvm chunk response */
1771 enum {
1772 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1773 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1774 };
1775 
1776 static int
1777 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779 {
1780 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 		.offset = htole16(offset),
1782 		.length = htole16(length),
1783 		.type = htole16(section),
1784 		.op_code = IWM_NVM_READ_OPCODE,
1785 	};
1786 	struct iwm_nvm_access_resp *nvm_resp;
1787 	struct iwm_rx_packet *pkt;
1788 	struct iwm_host_cmd cmd = {
1789 		.id = IWM_NVM_ACCESS_CMD,
1790 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1791 		.data = { &nvm_access_cmd, },
1792 	};
1793 	int ret, bytes_read, offset_read;
1794 	uint8_t *resp_data;
1795 
1796 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797 
1798 	ret = iwm_send_cmd(sc, &cmd);
1799 	if (ret) {
1800 		device_printf(sc->sc_dev,
1801 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	pkt = cmd.resp_pkt;
1806 
1807 	/* Extract NVM response */
1808 	nvm_resp = (void *)pkt->data;
1809 	ret = le16toh(nvm_resp->status);
1810 	bytes_read = le16toh(nvm_resp->length);
1811 	offset_read = le16toh(nvm_resp->offset);
1812 	resp_data = nvm_resp->data;
1813 	if (ret) {
1814 		if ((offset != 0) &&
1815 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1816 			/*
1817 			 * meaning of NOT_VALID_ADDRESS:
1818 			 * driver try to read chunk from address that is
1819 			 * multiple of 2K and got an error since addr is empty.
1820 			 * meaning of (offset != 0): driver already
1821 			 * read valid data from another chunk so this case
1822 			 * is not an error.
1823 			 */
1824 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 				    offset);
1827 			*len = 0;
1828 			ret = 0;
1829 		} else {
1830 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 				    "NVM access command failed with status %d\n", ret);
1832 			ret = EIO;
1833 		}
1834 		goto exit;
1835 	}
1836 
1837 	if (offset_read != offset) {
1838 		device_printf(sc->sc_dev,
1839 		    "NVM ACCESS response with invalid offset %d\n",
1840 		    offset_read);
1841 		ret = EINVAL;
1842 		goto exit;
1843 	}
1844 
1845 	if (bytes_read > length) {
1846 		device_printf(sc->sc_dev,
1847 		    "NVM ACCESS response with too much data "
1848 		    "(%d bytes requested, %d bytes received)\n",
1849 		    length, bytes_read);
1850 		ret = EINVAL;
1851 		goto exit;
1852 	}
1853 
1854 	/* Write data to NVM */
1855 	memcpy(data + offset, resp_data, bytes_read);
1856 	*len = bytes_read;
1857 
1858  exit:
1859 	iwm_free_resp(sc, &cmd);
1860 	return ret;
1861 }
1862 
1863 /*
1864  * Reads an NVM section completely.
1865  * NICs prior to 7000 family don't have a real NVM, but just read
1866  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867  * by uCode, we need to manually check in this case that we don't
1868  * overflow and try to read more than the EEPROM size.
1869  * For 7000 family NICs, we supply the maximal size we can read, and
1870  * the uCode fills the response with as much data as we can,
1871  * without overflowing, so no check is needed.
1872  */
1873 static int
1874 iwm_nvm_read_section(struct iwm_softc *sc,
1875 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876 {
1877 	uint16_t seglen, length, offset = 0;
1878 	int ret;
1879 
1880 	/* Set nvm section read length */
1881 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1882 
1883 	seglen = length;
1884 
1885 	/* Read the NVM until exhausted (reading less than requested) */
1886 	while (seglen == length) {
1887 		/* Check no memory assumptions fail and cause an overflow */
1888 		if ((size_read + offset + length) >
1889 		    sc->cfg->eeprom_size) {
1890 			device_printf(sc->sc_dev,
1891 			    "EEPROM size is too small for NVM\n");
1892 			return ENOBUFS;
1893 		}
1894 
1895 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 		if (ret) {
1897 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 				    "Cannot read NVM from section %d offset %d, length %d\n",
1899 				    section, offset, length);
1900 			return ret;
1901 		}
1902 		offset += seglen;
1903 	}
1904 
1905 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 		    "NVM section %d read completed\n", section);
1907 	*len = offset;
1908 	return 0;
1909 }
1910 
1911 /*
1912  * BEGIN IWM_NVM_PARSE
1913  */
1914 
1915 /* iwlwifi/iwl-nvm-parse.c */
1916 
1917 /*
1918  * Translate EEPROM flags to net80211.
1919  */
1920 static uint32_t
1921 iwm_eeprom_channel_flags(uint16_t ch_flags)
1922 {
1923 	uint32_t nflags;
1924 
1925 	nflags = 0;
1926 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1927 		nflags |= IEEE80211_CHAN_PASSIVE;
1928 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1929 		nflags |= IEEE80211_CHAN_NOADHOC;
1930 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1931 		nflags |= IEEE80211_CHAN_DFS;
1932 		/* Just in case. */
1933 		nflags |= IEEE80211_CHAN_NOADHOC;
1934 	}
1935 
1936 	return (nflags);
1937 }
1938 
1939 static void
1940 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1941     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1942     const uint8_t bands[])
1943 {
1944 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1945 	uint32_t nflags;
1946 	uint16_t ch_flags;
1947 	uint8_t ieee;
1948 	int error;
1949 
1950 	for (; ch_idx < ch_num; ch_idx++) {
1951 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1952 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1953 			ieee = iwm_nvm_channels[ch_idx];
1954 		else
1955 			ieee = iwm_nvm_channels_8000[ch_idx];
1956 
1957 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1958 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1959 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1960 			    ieee, ch_flags,
1961 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1962 			    "5.2" : "2.4");
1963 			continue;
1964 		}
1965 
1966 		nflags = iwm_eeprom_channel_flags(ch_flags);
1967 		error = ieee80211_add_channel(chans, maxchans, nchans,
1968 		    ieee, 0, 0, nflags, bands);
1969 		if (error != 0)
1970 			break;
1971 
1972 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1973 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1974 		    ieee, ch_flags,
1975 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1976 		    "5.2" : "2.4");
1977 	}
1978 }
1979 
1980 static void
1981 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1982     struct ieee80211_channel chans[])
1983 {
1984 	struct iwm_softc *sc = ic->ic_softc;
1985 	struct iwm_nvm_data *data = sc->nvm_data;
1986 	uint8_t bands[IEEE80211_MODE_BYTES];
1987 	size_t ch_num;
1988 
1989 	memset(bands, 0, sizeof(bands));
1990 	/* 1-13: 11b/g channels. */
1991 	setbit(bands, IEEE80211_MODE_11B);
1992 	setbit(bands, IEEE80211_MODE_11G);
1993 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1994 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1995 
1996 	/* 14: 11b channel only. */
1997 	clrbit(bands, IEEE80211_MODE_11G);
1998 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1999 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2000 
2001 	if (data->sku_cap_band_52GHz_enable) {
2002 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2003 			ch_num = nitems(iwm_nvm_channels);
2004 		else
2005 			ch_num = nitems(iwm_nvm_channels_8000);
2006 		memset(bands, 0, sizeof(bands));
2007 		setbit(bands, IEEE80211_MODE_11A);
2008 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2009 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2010 	}
2011 }
2012 
2013 static void
2014 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2015 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2016 {
2017 	const uint8_t *hw_addr;
2018 
2019 	if (mac_override) {
2020 		static const uint8_t reserved_mac[] = {
2021 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2022 		};
2023 
2024 		hw_addr = (const uint8_t *)(mac_override +
2025 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2026 
2027 		/*
2028 		 * Store the MAC address from MAO section.
2029 		 * No byte swapping is required in MAO section
2030 		 */
2031 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2032 
2033 		/*
2034 		 * Force the use of the OTP MAC address in case of reserved MAC
2035 		 * address in the NVM, or if address is given but invalid.
2036 		 */
2037 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2038 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2039 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2040 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2041 			return;
2042 
2043 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2044 		    "%s: mac address from nvm override section invalid\n",
2045 		    __func__);
2046 	}
2047 
2048 	if (nvm_hw) {
2049 		/* read the mac address from WFMP registers */
2050 		uint32_t mac_addr0 =
2051 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2052 		uint32_t mac_addr1 =
2053 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2054 
2055 		hw_addr = (const uint8_t *)&mac_addr0;
2056 		data->hw_addr[0] = hw_addr[3];
2057 		data->hw_addr[1] = hw_addr[2];
2058 		data->hw_addr[2] = hw_addr[1];
2059 		data->hw_addr[3] = hw_addr[0];
2060 
2061 		hw_addr = (const uint8_t *)&mac_addr1;
2062 		data->hw_addr[4] = hw_addr[1];
2063 		data->hw_addr[5] = hw_addr[0];
2064 
2065 		return;
2066 	}
2067 
2068 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2069 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2070 }
2071 
2072 static int
2073 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2074 	    const uint16_t *phy_sku)
2075 {
2076 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2077 		return le16_to_cpup(nvm_sw + IWM_SKU);
2078 
2079 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2080 }
2081 
2082 static int
2083 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2084 {
2085 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2086 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2087 	else
2088 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2089 						IWM_NVM_VERSION_8000));
2090 }
2091 
2092 static int
2093 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2094 		  const uint16_t *phy_sku)
2095 {
2096         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2097                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2098 
2099         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2100 }
2101 
2102 static int
2103 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2104 {
2105 	int n_hw_addr;
2106 
2107 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2108 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2109 
2110 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2111 
2112         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2113 }
2114 
2115 static void
2116 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2117 		  uint32_t radio_cfg)
2118 {
2119 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2120 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2121 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2122 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2123 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2124 		return;
2125 	}
2126 
2127 	/* set the radio configuration for family 8000 */
2128 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2129 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2130 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2131 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2132 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2133 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2134 }
2135 
2136 static int
2137 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2138 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2139 {
2140 #ifdef notyet /* for FAMILY 9000 */
2141 	if (cfg->mac_addr_from_csr) {
2142 		iwm_set_hw_address_from_csr(sc, data);
2143         } else
2144 #endif
2145 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2146 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2147 
2148 		/* The byte order is little endian 16 bit, meaning 214365 */
2149 		data->hw_addr[0] = hw_addr[1];
2150 		data->hw_addr[1] = hw_addr[0];
2151 		data->hw_addr[2] = hw_addr[3];
2152 		data->hw_addr[3] = hw_addr[2];
2153 		data->hw_addr[4] = hw_addr[5];
2154 		data->hw_addr[5] = hw_addr[4];
2155 	} else {
2156 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2157 	}
2158 
2159 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2160 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2161 		return EINVAL;
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 static struct iwm_nvm_data *
2168 iwm_parse_nvm_data(struct iwm_softc *sc,
2169 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2170 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2171 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2172 {
2173 	struct iwm_nvm_data *data;
2174 	uint32_t sku, radio_cfg;
2175 	uint16_t lar_config;
2176 
2177 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2178 		data = malloc(sizeof(*data) +
2179 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2180 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2181 	} else {
2182 		data = malloc(sizeof(*data) +
2183 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2184 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2185 	}
2186 	if (!data)
2187 		return NULL;
2188 
2189 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2190 
2191 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2192 	iwm_set_radio_cfg(sc, data, radio_cfg);
2193 
2194 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2195 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2196 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2197 	data->sku_cap_11n_enable = 0;
2198 
2199 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2200 
2201 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2202 		/* TODO: use IWL_NVM_EXT */
2203 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2204 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2205 				       IWM_NVM_LAR_OFFSET_8000;
2206 
2207 		lar_config = le16_to_cpup(regulatory + lar_offset);
2208 		data->lar_enabled = !!(lar_config &
2209 				       IWM_NVM_LAR_ENABLED_8000);
2210 	}
2211 
2212 	/* If no valid mac address was found - bail out */
2213 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2214 		free(data, M_DEVBUF);
2215 		return NULL;
2216 	}
2217 
2218 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2219 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2220 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2221 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2222 	} else {
2223 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2224 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2225 	}
2226 
2227 	return data;
2228 }
2229 
2230 static void
2231 iwm_free_nvm_data(struct iwm_nvm_data *data)
2232 {
2233 	if (data != NULL)
2234 		free(data, M_DEVBUF);
2235 }
2236 
2237 static struct iwm_nvm_data *
2238 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2239 {
2240 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2241 
2242 	/* Checking for required sections */
2243 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2244 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2245 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2246 			device_printf(sc->sc_dev,
2247 			    "Can't parse empty OTP/NVM sections\n");
2248 			return NULL;
2249 		}
2250 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2251 		/* SW and REGULATORY sections are mandatory */
2252 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2253 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2254 			device_printf(sc->sc_dev,
2255 			    "Can't parse empty OTP/NVM sections\n");
2256 			return NULL;
2257 		}
2258 		/* MAC_OVERRIDE or at least HW section must exist */
2259 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2260 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2261 			device_printf(sc->sc_dev,
2262 			    "Can't parse mac_address, empty sections\n");
2263 			return NULL;
2264 		}
2265 
2266 		/* PHY_SKU section is mandatory in B0 */
2267 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2268 			device_printf(sc->sc_dev,
2269 			    "Can't parse phy_sku in B0, empty sections\n");
2270 			return NULL;
2271 		}
2272 	} else {
2273 		panic("unknown device family %d\n", sc->cfg->device_family);
2274 	}
2275 
2276 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2277 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2278 	calib = (const uint16_t *)
2279 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2280 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2281 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2282 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2283 	mac_override = (const uint16_t *)
2284 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2285 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2286 
2287 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2288 	    phy_sku, regulatory);
2289 }
2290 
2291 static int
2292 iwm_nvm_init(struct iwm_softc *sc)
2293 {
2294 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2295 	int i, ret, section;
2296 	uint32_t size_read = 0;
2297 	uint8_t *nvm_buffer, *temp;
2298 	uint16_t len;
2299 
2300 	memset(nvm_sections, 0, sizeof(nvm_sections));
2301 
2302 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2303 		return EINVAL;
2304 
2305 	/* load NVM values from nic */
2306 	/* Read From FW NVM */
2307 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2308 
2309 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2310 	if (!nvm_buffer)
2311 		return ENOMEM;
2312 	for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2313 		/* we override the constness for initial read */
2314 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2315 					   &len, size_read);
2316 		if (ret)
2317 			continue;
2318 		size_read += len;
2319 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2320 		if (!temp) {
2321 			ret = ENOMEM;
2322 			break;
2323 		}
2324 		memcpy(temp, nvm_buffer, len);
2325 
2326 		nvm_sections[section].data = temp;
2327 		nvm_sections[section].length = len;
2328 	}
2329 	if (!size_read)
2330 		device_printf(sc->sc_dev, "OTP is blank\n");
2331 	free(nvm_buffer, M_DEVBUF);
2332 
2333 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2334 	if (!sc->nvm_data)
2335 		return EINVAL;
2336 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2337 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2338 
2339 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2340 		if (nvm_sections[i].data != NULL)
2341 			free(nvm_sections[i].data, M_DEVBUF);
2342 	}
2343 
2344 	return 0;
2345 }
2346 
2347 static int
2348 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2349 	const struct iwm_fw_desc *section)
2350 {
2351 	struct iwm_dma_info *dma = &sc->fw_dma;
2352 	uint8_t *v_addr;
2353 	bus_addr_t p_addr;
2354 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2355 	int ret = 0;
2356 
2357 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2358 		    "%s: [%d] uCode section being loaded...\n",
2359 		    __func__, section_num);
2360 
2361 	v_addr = dma->vaddr;
2362 	p_addr = dma->paddr;
2363 
2364 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2365 		uint32_t copy_size, dst_addr;
2366 		int extended_addr = FALSE;
2367 
2368 		copy_size = MIN(chunk_sz, section->len - offset);
2369 		dst_addr = section->offset + offset;
2370 
2371 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2372 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2373 			extended_addr = TRUE;
2374 
2375 		if (extended_addr)
2376 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2377 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2378 
2379 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2380 		    copy_size);
2381 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2382 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2383 						   copy_size);
2384 
2385 		if (extended_addr)
2386 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2387 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2388 
2389 		if (ret) {
2390 			device_printf(sc->sc_dev,
2391 			    "%s: Could not load the [%d] uCode section\n",
2392 			    __func__, section_num);
2393 			break;
2394 		}
2395 	}
2396 
2397 	return ret;
2398 }
2399 
2400 /*
2401  * ucode
2402  */
2403 static int
2404 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2405 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2406 {
2407 	sc->sc_fw_chunk_done = 0;
2408 
2409 	if (!iwm_nic_lock(sc))
2410 		return EBUSY;
2411 
2412 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2413 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2414 
2415 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2416 	    dst_addr);
2417 
2418 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2419 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2420 
2421 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2422 	    (iwm_get_dma_hi_addr(phy_addr)
2423 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2424 
2425 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2426 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2427 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2428 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2429 
2430 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2432 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2433 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2434 
2435 	iwm_nic_unlock(sc);
2436 
2437 	/* wait up to 5s for this segment to load */
2438 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2439 
2440 	if (!sc->sc_fw_chunk_done) {
2441 		device_printf(sc->sc_dev,
2442 		    "fw chunk addr 0x%x len %d failed to load\n",
2443 		    dst_addr, byte_cnt);
2444 		return ETIMEDOUT;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 static int
2451 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2452 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2453 {
2454 	int shift_param;
2455 	int i, ret = 0, sec_num = 0x1;
2456 	uint32_t val, last_read_idx = 0;
2457 
2458 	if (cpu == 1) {
2459 		shift_param = 0;
2460 		*first_ucode_section = 0;
2461 	} else {
2462 		shift_param = 16;
2463 		(*first_ucode_section)++;
2464 	}
2465 
2466 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2467 		last_read_idx = i;
2468 
2469 		/*
2470 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2471 		 * CPU1 to CPU2.
2472 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2473 		 * CPU2 non paged to CPU2 paging sec.
2474 		 */
2475 		if (!image->sec[i].data ||
2476 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2477 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2478 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2479 				    "Break since Data not valid or Empty section, sec = %d\n",
2480 				    i);
2481 			break;
2482 		}
2483 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2484 		if (ret)
2485 			return ret;
2486 
2487 		/* Notify the ucode of the loaded section number and status */
2488 		if (iwm_nic_lock(sc)) {
2489 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2490 			val = val | (sec_num << shift_param);
2491 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2492 			sec_num = (sec_num << 1) | 0x1;
2493 			iwm_nic_unlock(sc);
2494 		}
2495 	}
2496 
2497 	*first_ucode_section = last_read_idx;
2498 
2499 	iwm_enable_interrupts(sc);
2500 
2501 	if (iwm_nic_lock(sc)) {
2502 		if (cpu == 1)
2503 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2504 		else
2505 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2506 		iwm_nic_unlock(sc);
2507 	}
2508 
2509 	return 0;
2510 }
2511 
2512 static int
2513 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2514 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2515 {
2516 	int i, ret = 0;
2517 	uint32_t last_read_idx = 0;
2518 
2519 	if (cpu == 1) {
2520 		*first_ucode_section = 0;
2521 	} else {
2522 		(*first_ucode_section)++;
2523 	}
2524 
2525 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2526 		last_read_idx = i;
2527 
2528 		/*
2529 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2530 		 * CPU1 to CPU2.
2531 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2532 		 * CPU2 non paged to CPU2 paging sec.
2533 		 */
2534 		if (!image->sec[i].data ||
2535 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2536 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2537 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2538 				    "Break since Data not valid or Empty section, sec = %d\n",
2539 				     i);
2540 			break;
2541 		}
2542 
2543 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2544 		if (ret)
2545 			return ret;
2546 	}
2547 
2548 	*first_ucode_section = last_read_idx;
2549 
2550 	return 0;
2551 
2552 }
2553 
2554 static int
2555 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2556 {
2557 	int ret = 0;
2558 	int first_ucode_section;
2559 
2560 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2561 		     image->is_dual_cpus ? "Dual" : "Single");
2562 
2563 	/* load to FW the binary non secured sections of CPU1 */
2564 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2565 	if (ret)
2566 		return ret;
2567 
2568 	if (image->is_dual_cpus) {
2569 		/* set CPU2 header address */
2570 		if (iwm_nic_lock(sc)) {
2571 			iwm_write_prph(sc,
2572 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2573 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2574 			iwm_nic_unlock(sc);
2575 		}
2576 
2577 		/* load to FW the binary sections of CPU2 */
2578 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2579 						 &first_ucode_section);
2580 		if (ret)
2581 			return ret;
2582 	}
2583 
2584 	iwm_enable_interrupts(sc);
2585 
2586 	/* release CPU reset */
2587 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2588 
2589 	return 0;
2590 }
2591 
2592 int
2593 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2594 	const struct iwm_fw_img *image)
2595 {
2596 	int ret = 0;
2597 	int first_ucode_section;
2598 
2599 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2600 		    image->is_dual_cpus ? "Dual" : "Single");
2601 
2602 	/* configure the ucode to be ready to get the secured image */
2603 	/* release CPU reset */
2604 	if (iwm_nic_lock(sc)) {
2605 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2606 		    IWM_RELEASE_CPU_RESET_BIT);
2607 		iwm_nic_unlock(sc);
2608 	}
2609 
2610 	/* load to FW the binary Secured sections of CPU1 */
2611 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2612 	    &first_ucode_section);
2613 	if (ret)
2614 		return ret;
2615 
2616 	/* load to FW the binary sections of CPU2 */
2617 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2618 	    &first_ucode_section);
2619 }
2620 
2621 /* XXX Get rid of this definition */
2622 static inline void
2623 iwm_enable_fw_load_int(struct iwm_softc *sc)
2624 {
2625 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2626 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2627 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2628 }
2629 
2630 /* XXX Add proper rfkill support code */
2631 static int
2632 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2633 {
2634 	int ret;
2635 
2636 	/* This may fail if AMT took ownership of the device */
2637 	if (iwm_prepare_card_hw(sc)) {
2638 		device_printf(sc->sc_dev,
2639 		    "%s: Exit HW not ready\n", __func__);
2640 		ret = EIO;
2641 		goto out;
2642 	}
2643 
2644 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2645 
2646 	iwm_disable_interrupts(sc);
2647 
2648 	/* make sure rfkill handshake bits are cleared */
2649 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2650 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2651 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2652 
2653 	/* clear (again), then enable host interrupts */
2654 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2655 
2656 	ret = iwm_nic_init(sc);
2657 	if (ret) {
2658 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2659 		goto out;
2660 	}
2661 
2662 	/*
2663 	 * Now, we load the firmware and don't want to be interrupted, even
2664 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2665 	 * FH_TX interrupt which is needed to load the firmware). If the
2666 	 * RF-Kill switch is toggled, we will find out after having loaded
2667 	 * the firmware and return the proper value to the caller.
2668 	 */
2669 	iwm_enable_fw_load_int(sc);
2670 
2671 	/* really make sure rfkill handshake bits are cleared */
2672 	/* maybe we should write a few times more?  just to make sure */
2673 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2674 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2675 
2676 	/* Load the given image to the HW */
2677 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2678 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2679 	else
2680 		ret = iwm_pcie_load_given_ucode(sc, fw);
2681 
2682 	/* XXX re-check RF-Kill state */
2683 
2684 out:
2685 	return ret;
2686 }
2687 
2688 static int
2689 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2690 {
2691 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2692 		.valid = htole32(valid_tx_ant),
2693 	};
2694 
2695 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2696 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2697 }
2698 
2699 /* iwlwifi: mvm/fw.c */
2700 static int
2701 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2702 {
2703 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2704 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2705 
2706 	/* Set parameters */
2707 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2708 	phy_cfg_cmd.calib_control.event_trigger =
2709 	    sc->sc_default_calib[ucode_type].event_trigger;
2710 	phy_cfg_cmd.calib_control.flow_trigger =
2711 	    sc->sc_default_calib[ucode_type].flow_trigger;
2712 
2713 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2714 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2715 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2716 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2717 }
2718 
2719 static int
2720 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2721 {
2722 	struct iwm_alive_data *alive_data = data;
2723 	struct iwm_alive_resp_v3 *palive3;
2724 	struct iwm_alive_resp *palive;
2725 	struct iwm_umac_alive *umac;
2726 	struct iwm_lmac_alive *lmac1;
2727 	struct iwm_lmac_alive *lmac2 = NULL;
2728 	uint16_t status;
2729 
2730 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2731 		palive = (void *)pkt->data;
2732 		umac = &palive->umac_data;
2733 		lmac1 = &palive->lmac_data[0];
2734 		lmac2 = &palive->lmac_data[1];
2735 		status = le16toh(palive->status);
2736 	} else {
2737 		palive3 = (void *)pkt->data;
2738 		umac = &palive3->umac_data;
2739 		lmac1 = &palive3->lmac_data;
2740 		status = le16toh(palive3->status);
2741 	}
2742 
2743 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2744 	if (lmac2)
2745 		sc->error_event_table[1] =
2746 			le32toh(lmac2->error_event_table_ptr);
2747 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2748 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2749 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2750 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2751 	if (sc->umac_error_event_table)
2752 		sc->support_umac_log = TRUE;
2753 
2754 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2755 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2756 		    status, lmac1->ver_type, lmac1->ver_subtype);
2757 
2758 	if (lmac2)
2759 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2760 
2761 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2762 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2763 		    le32toh(umac->umac_major),
2764 		    le32toh(umac->umac_minor));
2765 
2766 	return TRUE;
2767 }
2768 
2769 static int
2770 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2771 	struct iwm_rx_packet *pkt, void *data)
2772 {
2773 	struct iwm_phy_db *phy_db = data;
2774 
2775 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2776 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2777 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2778 			    __func__, pkt->hdr.code);
2779 		}
2780 		return TRUE;
2781 	}
2782 
2783 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2784 		device_printf(sc->sc_dev,
2785 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2786 	}
2787 
2788 	return FALSE;
2789 }
2790 
2791 static int
2792 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2793 	enum iwm_ucode_type ucode_type)
2794 {
2795 	struct iwm_notification_wait alive_wait;
2796 	struct iwm_alive_data alive_data;
2797 	const struct iwm_fw_img *fw;
2798 	enum iwm_ucode_type old_type = sc->cur_ucode;
2799 	int error;
2800 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2801 
2802 	fw = &sc->sc_fw.img[ucode_type];
2803 	sc->cur_ucode = ucode_type;
2804 	sc->ucode_loaded = FALSE;
2805 
2806 	memset(&alive_data, 0, sizeof(alive_data));
2807 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2808 				   alive_cmd, nitems(alive_cmd),
2809 				   iwm_alive_fn, &alive_data);
2810 
2811 	error = iwm_start_fw(sc, fw);
2812 	if (error) {
2813 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2814 		sc->cur_ucode = old_type;
2815 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2816 		return error;
2817 	}
2818 
2819 	/*
2820 	 * Some things may run in the background now, but we
2821 	 * just wait for the ALIVE notification here.
2822 	 */
2823 	IWM_UNLOCK(sc);
2824 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2825 				      IWM_UCODE_ALIVE_TIMEOUT);
2826 	IWM_LOCK(sc);
2827 	if (error) {
2828 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2829 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2830 			if (iwm_nic_lock(sc)) {
2831 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2832 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2833 				iwm_nic_unlock(sc);
2834 			}
2835 			device_printf(sc->sc_dev,
2836 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2837 			    a, b);
2838 		}
2839 		sc->cur_ucode = old_type;
2840 		return error;
2841 	}
2842 
2843 	if (!alive_data.valid) {
2844 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2845 		    __func__);
2846 		sc->cur_ucode = old_type;
2847 		return EIO;
2848 	}
2849 
2850 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2851 
2852 	/*
2853 	 * configure and operate fw paging mechanism.
2854 	 * driver configures the paging flow only once, CPU2 paging image
2855 	 * included in the IWM_UCODE_INIT image.
2856 	 */
2857 	if (fw->paging_mem_size) {
2858 		error = iwm_save_fw_paging(sc, fw);
2859 		if (error) {
2860 			device_printf(sc->sc_dev,
2861 			    "%s: failed to save the FW paging image\n",
2862 			    __func__);
2863 			return error;
2864 		}
2865 
2866 		error = iwm_send_paging_cmd(sc, fw);
2867 		if (error) {
2868 			device_printf(sc->sc_dev,
2869 			    "%s: failed to send the paging cmd\n", __func__);
2870 			iwm_free_fw_paging(sc);
2871 			return error;
2872 		}
2873 	}
2874 
2875 	if (!error)
2876 		sc->ucode_loaded = TRUE;
2877 	return error;
2878 }
2879 
2880 /*
2881  * mvm misc bits
2882  */
2883 
2884 /*
2885  * follows iwlwifi/fw.c
2886  */
2887 static int
2888 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2889 {
2890 	struct iwm_notification_wait calib_wait;
2891 	static const uint16_t init_complete[] = {
2892 		IWM_INIT_COMPLETE_NOTIF,
2893 		IWM_CALIB_RES_NOTIF_PHY_DB
2894 	};
2895 	int ret;
2896 
2897 	/* do not operate with rfkill switch turned on */
2898 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2899 		device_printf(sc->sc_dev,
2900 		    "radio is disabled by hardware switch\n");
2901 		return EPERM;
2902 	}
2903 
2904 	iwm_init_notification_wait(sc->sc_notif_wait,
2905 				   &calib_wait,
2906 				   init_complete,
2907 				   nitems(init_complete),
2908 				   iwm_wait_phy_db_entry,
2909 				   sc->sc_phy_db);
2910 
2911 	/* Will also start the device */
2912 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2913 	if (ret) {
2914 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2915 		    ret);
2916 		goto error;
2917 	}
2918 
2919 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2920 		ret = iwm_send_bt_init_conf(sc);
2921 		if (ret) {
2922 			device_printf(sc->sc_dev,
2923 			    "failed to send bt coex configuration: %d\n", ret);
2924 			goto error;
2925 		}
2926 	}
2927 
2928 	if (justnvm) {
2929 		/* Read nvm */
2930 		ret = iwm_nvm_init(sc);
2931 		if (ret) {
2932 			device_printf(sc->sc_dev, "failed to read nvm\n");
2933 			goto error;
2934 		}
2935 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2936 		goto error;
2937 	}
2938 
2939 	/* Send TX valid antennas before triggering calibrations */
2940 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2941 	if (ret) {
2942 		device_printf(sc->sc_dev,
2943 		    "failed to send antennas before calibration: %d\n", ret);
2944 		goto error;
2945 	}
2946 
2947 	/*
2948 	 * Send phy configurations command to init uCode
2949 	 * to start the 16.0 uCode init image internal calibrations.
2950 	 */
2951 	ret = iwm_send_phy_cfg_cmd(sc);
2952 	if (ret) {
2953 		device_printf(sc->sc_dev,
2954 		    "%s: Failed to run INIT calibrations: %d\n",
2955 		    __func__, ret);
2956 		goto error;
2957 	}
2958 
2959 	/*
2960 	 * Nothing to do but wait for the init complete notification
2961 	 * from the firmware.
2962 	 */
2963 	IWM_UNLOCK(sc);
2964 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2965 	    IWM_UCODE_CALIB_TIMEOUT);
2966 	IWM_LOCK(sc);
2967 
2968 
2969 	goto out;
2970 
2971 error:
2972 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2973 out:
2974 	return ret;
2975 }
2976 
2977 static int
2978 iwm_config_ltr(struct iwm_softc *sc)
2979 {
2980 	struct iwm_ltr_config_cmd cmd = {
2981 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
2982 	};
2983 
2984 	if (!sc->sc_ltr_enabled)
2985 		return 0;
2986 
2987 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2988 }
2989 
2990 /*
2991  * receive side
2992  */
2993 
2994 /* (re)stock rx ring, called at init-time and at runtime */
2995 static int
2996 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2997 {
2998 	struct iwm_rx_ring *ring = &sc->rxq;
2999 	struct iwm_rx_data *data = &ring->data[idx];
3000 	struct mbuf *m;
3001 	bus_dmamap_t dmamap;
3002 	bus_dma_segment_t seg;
3003 	int nsegs, error;
3004 
3005 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3006 	if (m == NULL)
3007 		return ENOBUFS;
3008 
3009 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3010 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3011 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3012 	if (error != 0) {
3013 		device_printf(sc->sc_dev,
3014 		    "%s: can't map mbuf, error %d\n", __func__, error);
3015 		m_freem(m);
3016 		return error;
3017 	}
3018 
3019 	if (data->m != NULL)
3020 		bus_dmamap_unload(ring->data_dmat, data->map);
3021 
3022 	/* Swap ring->spare_map with data->map */
3023 	dmamap = data->map;
3024 	data->map = ring->spare_map;
3025 	ring->spare_map = dmamap;
3026 
3027 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3028 	data->m = m;
3029 
3030 	/* Update RX descriptor. */
3031 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3032 	if (sc->cfg->mqrx_supported)
3033 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3034 	else
3035 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3036 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3037 	    BUS_DMASYNC_PREWRITE);
3038 
3039 	return 0;
3040 }
3041 
3042 static void
3043 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3044 {
3045 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3046 
3047 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3048 
3049 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3050 }
3051 
3052 /*
3053  * Retrieve the average noise (in dBm) among receivers.
3054  */
3055 static int
3056 iwm_get_noise(struct iwm_softc *sc,
3057     const struct iwm_statistics_rx_non_phy *stats)
3058 {
3059 	int i, total, nbant, noise;
3060 
3061 	total = nbant = noise = 0;
3062 	for (i = 0; i < 3; i++) {
3063 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3064 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3065 		    __func__,
3066 		    i,
3067 		    noise);
3068 
3069 		if (noise) {
3070 			total += noise;
3071 			nbant++;
3072 		}
3073 	}
3074 
3075 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3076 	    __func__, nbant, total);
3077 #if 0
3078 	/* There should be at least one antenna but check anyway. */
3079 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3080 #else
3081 	/* For now, just hard-code it to -96 to be safe */
3082 	return (-96);
3083 #endif
3084 }
3085 
3086 static void
3087 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3088 {
3089 	struct iwm_notif_statistics *stats = (void *)&pkt->data;
3090 
3091 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3092 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3093 }
3094 
3095 /* iwlwifi: mvm/rx.c */
3096 /*
3097  * iwm_get_signal_strength - use new rx PHY INFO API
3098  * values are reported by the fw as positive values - need to negate
3099  * to obtain their dBM.  Account for missing antennas by replacing 0
3100  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3101  */
3102 static int
3103 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3104     struct iwm_rx_phy_info *phy_info)
3105 {
3106 	int energy_a, energy_b, energy_c, max_energy;
3107 	uint32_t val;
3108 
3109 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3110 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3111 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3112 	energy_a = energy_a ? -energy_a : -256;
3113 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3114 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3115 	energy_b = energy_b ? -energy_b : -256;
3116 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3117 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3118 	energy_c = energy_c ? -energy_c : -256;
3119 	max_energy = MAX(energy_a, energy_b);
3120 	max_energy = MAX(max_energy, energy_c);
3121 
3122 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3123 	    "energy In A %d B %d C %d , and max %d\n",
3124 	    energy_a, energy_b, energy_c, max_energy);
3125 
3126 	return max_energy;
3127 }
3128 
3129 static int
3130 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3131     struct iwm_rx_mpdu_desc *desc)
3132 {
3133 	int energy_a, energy_b;
3134 
3135 	energy_a = desc->v1.energy_a;
3136 	energy_b = desc->v1.energy_b;
3137 	energy_a = energy_a ? -energy_a : -256;
3138 	energy_b = energy_b ? -energy_b : -256;
3139 	return MAX(energy_a, energy_b);
3140 }
3141 
3142 /*
3143  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3144  *
3145  * Handles the actual data of the Rx packet from the fw
3146  */
3147 static bool
3148 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3149     bool stolen)
3150 {
3151 	struct ieee80211com *ic = &sc->sc_ic;
3152 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3153 	struct ieee80211_rx_stats rxs;
3154 	struct iwm_rx_phy_info *phy_info;
3155 	struct iwm_rx_mpdu_res_start *rx_res;
3156 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3157 	uint32_t len;
3158 	uint32_t rx_pkt_status;
3159 	int rssi;
3160 
3161 	phy_info = &sc->sc_last_phy_info;
3162 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3163 	len = le16toh(rx_res->byte_count);
3164 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3165 
3166 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3167 		device_printf(sc->sc_dev,
3168 		    "dsp size out of range [0,20]: %d\n",
3169 		    phy_info->cfg_phy_cnt);
3170 		return false;
3171 	}
3172 
3173 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3174 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3175 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3176 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3177 		return false;
3178 	}
3179 
3180 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3181 
3182 	/* Map it to relative value */
3183 	rssi = rssi - sc->sc_noise;
3184 
3185 	/* replenish ring for the buffer we're going to feed to the sharks */
3186 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3187 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3188 		    __func__);
3189 		return false;
3190 	}
3191 
3192 	m->m_data = pkt->data + sizeof(*rx_res);
3193 	m->m_pkthdr.len = m->m_len = len;
3194 
3195 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3196 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3197 
3198 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3199 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3200 	    __func__,
3201 	    le16toh(phy_info->channel),
3202 	    le16toh(phy_info->phy_flags));
3203 
3204 	/*
3205 	 * Populate an RX state struct with the provided information.
3206 	 */
3207 	bzero(&rxs, sizeof(rxs));
3208 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3209 	rxs.r_flags |= IEEE80211_R_BAND;
3210 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3211 	rxs.c_ieee = le16toh(phy_info->channel);
3212 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3213 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3214 		rxs.c_band = IEEE80211_CHAN_2GHZ;
3215 	} else {
3216 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3217 		rxs.c_band = IEEE80211_CHAN_5GHZ;
3218 	}
3219 
3220 	/* rssi is in 1/2db units */
3221 	rxs.c_rssi = rssi * 2;
3222 	rxs.c_nf = sc->sc_noise;
3223 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3224 		return false;
3225 
3226 	if (ieee80211_radiotap_active_vap(vap)) {
3227 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3228 
3229 		tap->wr_flags = 0;
3230 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3231 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3232 		tap->wr_chan_freq = htole16(rxs.c_freq);
3233 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3234 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3235 		tap->wr_dbm_antsignal = (int8_t)rssi;
3236 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3237 		tap->wr_tsft = phy_info->system_timestamp;
3238 		switch (phy_info->rate) {
3239 		/* CCK rates. */
3240 		case  10: tap->wr_rate =   2; break;
3241 		case  20: tap->wr_rate =   4; break;
3242 		case  55: tap->wr_rate =  11; break;
3243 		case 110: tap->wr_rate =  22; break;
3244 		/* OFDM rates. */
3245 		case 0xd: tap->wr_rate =  12; break;
3246 		case 0xf: tap->wr_rate =  18; break;
3247 		case 0x5: tap->wr_rate =  24; break;
3248 		case 0x7: tap->wr_rate =  36; break;
3249 		case 0x9: tap->wr_rate =  48; break;
3250 		case 0xb: tap->wr_rate =  72; break;
3251 		case 0x1: tap->wr_rate =  96; break;
3252 		case 0x3: tap->wr_rate = 108; break;
3253 		/* Unknown rate: should not happen. */
3254 		default:  tap->wr_rate =   0;
3255 		}
3256 	}
3257 
3258 	return true;
3259 }
3260 
3261 static bool
3262 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3263     bool stolen)
3264 {
3265 	struct ieee80211com *ic = &sc->sc_ic;
3266 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3267 	struct ieee80211_frame *wh;
3268 	struct ieee80211_rx_stats rxs;
3269 	struct iwm_rx_mpdu_desc *desc;
3270 	struct iwm_rx_packet *pkt;
3271 	int rssi;
3272 	uint32_t hdrlen, len, rate_n_flags;
3273 	uint16_t phy_info;
3274 	uint8_t channel;
3275 
3276 	pkt = mtodo(m, offset);
3277 	desc = (void *)pkt->data;
3278 
3279 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3280 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3281 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3282 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3283 		return false;
3284 	}
3285 
3286 	channel = desc->v1.channel;
3287 	len = le16toh(desc->mpdu_len);
3288 	phy_info = le16toh(desc->phy_info);
3289 	rate_n_flags = desc->v1.rate_n_flags;
3290 
3291 	wh = mtodo(m, sizeof(*desc));
3292 	m->m_data = pkt->data + sizeof(*desc);
3293 	m->m_pkthdr.len = m->m_len = len;
3294 	m->m_len = len;
3295 
3296 	/* Account for padding following the frame header. */
3297 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3298 		hdrlen = ieee80211_anyhdrsize(wh);
3299 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3300 		m->m_data = mtodo(m, 2);
3301 		wh = mtod(m, struct ieee80211_frame *);
3302 	}
3303 
3304 	/* Map it to relative value */
3305 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3306 	rssi = rssi - sc->sc_noise;
3307 
3308 	/* replenish ring for the buffer we're going to feed to the sharks */
3309 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3310 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3311 		    __func__);
3312 		return false;
3313 	}
3314 
3315 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3316 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3317 
3318 	/*
3319 	 * Populate an RX state struct with the provided information.
3320 	 */
3321 	bzero(&rxs, sizeof(rxs));
3322 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3323 	rxs.r_flags |= IEEE80211_R_BAND;
3324 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3325 	rxs.c_ieee = channel;
3326 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3327 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3328 	rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3329 
3330 	/* rssi is in 1/2db units */
3331 	rxs.c_rssi = rssi * 2;
3332 	rxs.c_nf = sc->sc_noise;
3333 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3334 		return false;
3335 
3336 	if (ieee80211_radiotap_active_vap(vap)) {
3337 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3338 
3339 		tap->wr_flags = 0;
3340 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3341 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3342 		tap->wr_chan_freq = htole16(rxs.c_freq);
3343 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3344 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3345 		tap->wr_dbm_antsignal = (int8_t)rssi;
3346 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3347 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3348 		switch ((rate_n_flags & 0xff)) {
3349 		/* CCK rates. */
3350 		case  10: tap->wr_rate =   2; break;
3351 		case  20: tap->wr_rate =   4; break;
3352 		case  55: tap->wr_rate =  11; break;
3353 		case 110: tap->wr_rate =  22; break;
3354 		/* OFDM rates. */
3355 		case 0xd: tap->wr_rate =  12; break;
3356 		case 0xf: tap->wr_rate =  18; break;
3357 		case 0x5: tap->wr_rate =  24; break;
3358 		case 0x7: tap->wr_rate =  36; break;
3359 		case 0x9: tap->wr_rate =  48; break;
3360 		case 0xb: tap->wr_rate =  72; break;
3361 		case 0x1: tap->wr_rate =  96; break;
3362 		case 0x3: tap->wr_rate = 108; break;
3363 		/* Unknown rate: should not happen. */
3364 		default:  tap->wr_rate =   0;
3365 		}
3366 	}
3367 
3368 	return true;
3369 }
3370 
3371 static bool
3372 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3373     bool stolen)
3374 {
3375   	struct epoch_tracker et;
3376 	struct ieee80211com *ic;
3377 	struct ieee80211_frame *wh;
3378 	struct ieee80211_node *ni;
3379 	bool ret;
3380 
3381 	ic = &sc->sc_ic;
3382 
3383 	ret = sc->cfg->mqrx_supported ?
3384 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3385 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3386 	if (!ret) {
3387 		counter_u64_add(ic->ic_ierrors, 1);
3388 		return (ret);
3389 	}
3390 
3391 	wh = mtod(m, struct ieee80211_frame *);
3392 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3393 
3394 	IWM_UNLOCK(sc);
3395 
3396 	NET_EPOCH_ENTER(et);
3397 	if (ni != NULL) {
3398 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3399 		ieee80211_input_mimo(ni, m);
3400 		ieee80211_free_node(ni);
3401 	} else {
3402 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3403 		ieee80211_input_mimo_all(ic, m);
3404 	}
3405 	NET_EPOCH_EXIT(et);
3406 
3407 	IWM_LOCK(sc);
3408 
3409 	return true;
3410 }
3411 
3412 static int
3413 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3414 	struct iwm_node *in)
3415 {
3416 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3417 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3418 	struct ieee80211_node *ni = &in->in_ni;
3419 	struct ieee80211vap *vap = ni->ni_vap;
3420 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3421 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3422 	boolean_t rate_matched;
3423 	uint8_t tx_resp_rate;
3424 
3425 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3426 
3427 	/* Update rate control statistics. */
3428 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3429 	    __func__,
3430 	    (int) le16toh(tx_resp->status.status),
3431 	    (int) le16toh(tx_resp->status.sequence),
3432 	    tx_resp->frame_count,
3433 	    tx_resp->bt_kill_count,
3434 	    tx_resp->failure_rts,
3435 	    tx_resp->failure_frame,
3436 	    le32toh(tx_resp->initial_rate),
3437 	    (int) le16toh(tx_resp->wireless_media_time));
3438 
3439 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3440 
3441 	/* For rate control, ignore frames sent at different initial rate */
3442 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3443 
3444 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3445 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3446 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3447 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3448 	}
3449 
3450 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3451 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3452 	txs->short_retries = tx_resp->failure_rts;
3453 	txs->long_retries = tx_resp->failure_frame;
3454 	if (status != IWM_TX_STATUS_SUCCESS &&
3455 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3456 		switch (status) {
3457 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3458 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3459 			break;
3460 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3461 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3462 			break;
3463 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3464 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3465 			break;
3466 		default:
3467 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3468 			break;
3469 		}
3470 	} else {
3471 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3472 	}
3473 
3474 	if (rate_matched) {
3475 		ieee80211_ratectl_tx_complete(ni, txs);
3476 
3477 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3478 		new_rate = vap->iv_bss->ni_txrate;
3479 		if (new_rate != 0 && new_rate != cur_rate) {
3480 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3481 			iwm_setrates(sc, in, rix);
3482 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3483 		}
3484  	}
3485 
3486 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3487 }
3488 
3489 static void
3490 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3491 {
3492 	struct iwm_cmd_header *cmd_hdr;
3493 	struct iwm_tx_ring *ring;
3494 	struct iwm_tx_data *txd;
3495 	struct iwm_node *in;
3496 	struct mbuf *m;
3497 	int idx, qid, qmsk, status;
3498 
3499 	cmd_hdr = &pkt->hdr;
3500 	idx = cmd_hdr->idx;
3501 	qid = cmd_hdr->qid;
3502 
3503 	ring = &sc->txq[qid];
3504 	txd = &ring->data[idx];
3505 	in = txd->in;
3506 	m = txd->m;
3507 
3508 	KASSERT(txd->done == 0, ("txd not done"));
3509 	KASSERT(txd->in != NULL, ("txd without node"));
3510 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3511 
3512 	sc->sc_tx_timer = 0;
3513 
3514 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3515 
3516 	/* Unmap and free mbuf. */
3517 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3518 	bus_dmamap_unload(ring->data_dmat, txd->map);
3519 
3520 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3521 	    "free txd %p, in %p\n", txd, txd->in);
3522 	txd->done = 1;
3523 	txd->m = NULL;
3524 	txd->in = NULL;
3525 
3526 	ieee80211_tx_complete(&in->in_ni, m, status);
3527 
3528 	qmsk = 1 << qid;
3529 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3530 		sc->qfullmsk &= ~qmsk;
3531 		if (sc->qfullmsk == 0)
3532 			iwm_start(sc);
3533 	}
3534 }
3535 
3536 /*
3537  * transmit side
3538  */
3539 
3540 /*
3541  * Process a "command done" firmware notification.  This is where we wakeup
3542  * processes waiting for a synchronous command completion.
3543  * from if_iwn
3544  */
3545 static void
3546 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3547 {
3548 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3549 	struct iwm_tx_data *data;
3550 
3551 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3552 		return;	/* Not a command ack. */
3553 	}
3554 
3555 	/* XXX wide commands? */
3556 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3557 	    "cmd notification type 0x%x qid %d idx %d\n",
3558 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3559 
3560 	data = &ring->data[pkt->hdr.idx];
3561 
3562 	/* If the command was mapped in an mbuf, free it. */
3563 	if (data->m != NULL) {
3564 		bus_dmamap_sync(ring->data_dmat, data->map,
3565 		    BUS_DMASYNC_POSTWRITE);
3566 		bus_dmamap_unload(ring->data_dmat, data->map);
3567 		m_freem(data->m);
3568 		data->m = NULL;
3569 	}
3570 	wakeup(&ring->desc[pkt->hdr.idx]);
3571 
3572 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3573 		device_printf(sc->sc_dev,
3574 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3575 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3576 		/* XXX call iwm_force_nmi() */
3577 	}
3578 
3579 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3580 	ring->queued--;
3581 	if (ring->queued == 0)
3582 		iwm_pcie_clear_cmd_in_flight(sc);
3583 }
3584 
3585 #if 0
3586 /*
3587  * necessary only for block ack mode
3588  */
3589 void
3590 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3591 	uint16_t len)
3592 {
3593 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3594 	uint16_t w_val;
3595 
3596 	scd_bc_tbl = sc->sched_dma.vaddr;
3597 
3598 	len += 8; /* magic numbers came naturally from paris */
3599 	len = roundup(len, 4) / 4;
3600 
3601 	w_val = htole16(sta_id << 12 | len);
3602 
3603 	/* Update TX scheduler. */
3604 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3605 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3606 	    BUS_DMASYNC_PREWRITE);
3607 
3608 	/* I really wonder what this is ?!? */
3609 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3610 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3611 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3612 		    BUS_DMASYNC_PREWRITE);
3613 	}
3614 }
3615 #endif
3616 
3617 static int
3618 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3619 {
3620 	int i;
3621 
3622 	for (i = 0; i < nitems(iwm_rates); i++) {
3623 		if (iwm_rates[i].rate == rate)
3624 			return (i);
3625 	}
3626 	/* XXX error? */
3627 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3628 	    "%s: couldn't find an entry for rate=%d\n",
3629 	    __func__,
3630 	    rate);
3631 	return (0);
3632 }
3633 
3634 /*
3635  * Fill in the rate related information for a transmit command.
3636  */
3637 static const struct iwm_rate *
3638 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3639 	struct mbuf *m, struct iwm_tx_cmd *tx)
3640 {
3641 	struct ieee80211_node *ni = &in->in_ni;
3642 	struct ieee80211_frame *wh;
3643 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3644 	const struct iwm_rate *rinfo;
3645 	int type;
3646 	int ridx, rate_flags;
3647 
3648 	wh = mtod(m, struct ieee80211_frame *);
3649 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3650 
3651 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3652 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3653 
3654 	if (type == IEEE80211_FC0_TYPE_MGT ||
3655 	    type == IEEE80211_FC0_TYPE_CTL ||
3656 	    (m->m_flags & M_EAPOL) != 0) {
3657 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3658 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3659 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3660 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3661 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3662 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3663 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3664 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3665 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3666 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3667 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3668 	} else {
3669 		/* for data frames, use RS table */
3670 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3671 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3672 		if (ridx == -1)
3673 			ridx = 0;
3674 
3675 		/* This is the index into the programmed table */
3676 		tx->initial_rate_index = 0;
3677 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3678 	}
3679 
3680 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3681 	    "%s: frame type=%d txrate %d\n",
3682 	        __func__, type, iwm_rates[ridx].rate);
3683 
3684 	rinfo = &iwm_rates[ridx];
3685 
3686 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3687 	    __func__, ridx,
3688 	    rinfo->rate,
3689 	    !! (IWM_RIDX_IS_CCK(ridx))
3690 	    );
3691 
3692 	/* XXX TODO: hard-coded TX antenna? */
3693 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3694 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3695 	else
3696 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3697 	if (IWM_RIDX_IS_CCK(ridx))
3698 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3699 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3700 
3701 	return rinfo;
3702 }
3703 
3704 #define TB0_SIZE 16
3705 static int
3706 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3707 {
3708 	struct ieee80211com *ic = &sc->sc_ic;
3709 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3710 	struct iwm_node *in = IWM_NODE(ni);
3711 	struct iwm_tx_ring *ring;
3712 	struct iwm_tx_data *data;
3713 	struct iwm_tfd *desc;
3714 	struct iwm_device_cmd *cmd;
3715 	struct iwm_tx_cmd *tx;
3716 	struct ieee80211_frame *wh;
3717 	struct ieee80211_key *k = NULL;
3718 	struct mbuf *m1;
3719 	const struct iwm_rate *rinfo;
3720 	uint32_t flags;
3721 	u_int hdrlen;
3722 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3723 	int nsegs;
3724 	uint8_t tid, type;
3725 	int i, totlen, error, pad;
3726 
3727 	wh = mtod(m, struct ieee80211_frame *);
3728 	hdrlen = ieee80211_anyhdrsize(wh);
3729 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3730 	tid = 0;
3731 	ring = &sc->txq[ac];
3732 	desc = &ring->desc[ring->cur];
3733 	data = &ring->data[ring->cur];
3734 
3735 	/* Fill out iwm_tx_cmd to send to the firmware */
3736 	cmd = &ring->cmd[ring->cur];
3737 	cmd->hdr.code = IWM_TX_CMD;
3738 	cmd->hdr.flags = 0;
3739 	cmd->hdr.qid = ring->qid;
3740 	cmd->hdr.idx = ring->cur;
3741 
3742 	tx = (void *)cmd->data;
3743 	memset(tx, 0, sizeof(*tx));
3744 
3745 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3746 
3747 	/* Encrypt the frame if need be. */
3748 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3749 		/* Retrieve key for TX && do software encryption. */
3750 		k = ieee80211_crypto_encap(ni, m);
3751 		if (k == NULL) {
3752 			m_freem(m);
3753 			return (ENOBUFS);
3754 		}
3755 		/* 802.11 header may have moved. */
3756 		wh = mtod(m, struct ieee80211_frame *);
3757 	}
3758 
3759 	if (ieee80211_radiotap_active_vap(vap)) {
3760 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3761 
3762 		tap->wt_flags = 0;
3763 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3764 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3765 		tap->wt_rate = rinfo->rate;
3766 		if (k != NULL)
3767 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3768 		ieee80211_radiotap_tx(vap, m);
3769 	}
3770 
3771 	flags = 0;
3772 	totlen = m->m_pkthdr.len;
3773 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3774 		flags |= IWM_TX_CMD_FLG_ACK;
3775 	}
3776 
3777 	if (type == IEEE80211_FC0_TYPE_DATA &&
3778 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3779 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3780 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3781 	}
3782 
3783 	tx->sta_id = IWM_STATION_ID;
3784 
3785 	if (type == IEEE80211_FC0_TYPE_MGT) {
3786 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3787 
3788 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3789 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3790 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3791 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3792 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3793 		} else {
3794 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3795 		}
3796 	} else {
3797 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3798 	}
3799 
3800 	if (hdrlen & 3) {
3801 		/* First segment length must be a multiple of 4. */
3802 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3803 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3804 		pad = 4 - (hdrlen & 3);
3805 	} else {
3806 		tx->offload_assist = 0;
3807 		pad = 0;
3808 	}
3809 
3810 	tx->len = htole16(totlen);
3811 	tx->tid_tspec = tid;
3812 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3813 
3814 	/* Set physical address of "scratch area". */
3815 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3816 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3817 
3818 	/* Copy 802.11 header in TX command. */
3819 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3820 
3821 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3822 
3823 	tx->sec_ctl = 0;
3824 	tx->tx_flags |= htole32(flags);
3825 
3826 	/* Trim 802.11 header. */
3827 	m_adj(m, hdrlen);
3828 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3829 	    segs, &nsegs, BUS_DMA_NOWAIT);
3830 	if (error != 0) {
3831 		if (error != EFBIG) {
3832 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3833 			    error);
3834 			m_freem(m);
3835 			return error;
3836 		}
3837 		/* Too many DMA segments, linearize mbuf. */
3838 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3839 		if (m1 == NULL) {
3840 			device_printf(sc->sc_dev,
3841 			    "%s: could not defrag mbuf\n", __func__);
3842 			m_freem(m);
3843 			return (ENOBUFS);
3844 		}
3845 		m = m1;
3846 
3847 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3848 		    segs, &nsegs, BUS_DMA_NOWAIT);
3849 		if (error != 0) {
3850 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3851 			    error);
3852 			m_freem(m);
3853 			return error;
3854 		}
3855 	}
3856 	data->m = m;
3857 	data->in = in;
3858 	data->done = 0;
3859 
3860 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3861 	    "sending txd %p, in %p\n", data, data->in);
3862 	KASSERT(data->in != NULL, ("node is NULL"));
3863 
3864 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3865 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3866 	    ring->qid, ring->cur, totlen, nsegs,
3867 	    le32toh(tx->tx_flags),
3868 	    le32toh(tx->rate_n_flags),
3869 	    tx->initial_rate_index
3870 	    );
3871 
3872 	/* Fill TX descriptor. */
3873 	memset(desc, 0, sizeof(*desc));
3874 	desc->num_tbs = 2 + nsegs;
3875 
3876 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3877 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3878 	    (TB0_SIZE << 4));
3879 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3880 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3881 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3882 	    hdrlen + pad - TB0_SIZE) << 4));
3883 
3884 	/* Other DMA segments are for data payload. */
3885 	for (i = 0; i < nsegs; i++) {
3886 		seg = &segs[i];
3887 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3888 		desc->tbs[i + 2].hi_n_len =
3889 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3890 		    (seg->ds_len << 4);
3891 	}
3892 
3893 	bus_dmamap_sync(ring->data_dmat, data->map,
3894 	    BUS_DMASYNC_PREWRITE);
3895 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3896 	    BUS_DMASYNC_PREWRITE);
3897 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3898 	    BUS_DMASYNC_PREWRITE);
3899 
3900 #if 0
3901 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3902 #endif
3903 
3904 	/* Kick TX ring. */
3905 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3906 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3907 
3908 	/* Mark TX ring as full if we reach a certain threshold. */
3909 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3910 		sc->qfullmsk |= 1 << ring->qid;
3911 	}
3912 
3913 	return 0;
3914 }
3915 
3916 static int
3917 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3918     const struct ieee80211_bpf_params *params)
3919 {
3920 	struct ieee80211com *ic = ni->ni_ic;
3921 	struct iwm_softc *sc = ic->ic_softc;
3922 	int error = 0;
3923 
3924 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3925 	    "->%s begin\n", __func__);
3926 
3927 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3928 		m_freem(m);
3929 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3930 		    "<-%s not RUNNING\n", __func__);
3931 		return (ENETDOWN);
3932         }
3933 
3934 	IWM_LOCK(sc);
3935 	/* XXX fix this */
3936         if (params == NULL) {
3937 		error = iwm_tx(sc, m, ni, 0);
3938 	} else {
3939 		error = iwm_tx(sc, m, ni, 0);
3940 	}
3941 	if (sc->sc_tx_timer == 0)
3942 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3943 	sc->sc_tx_timer = 5;
3944 	IWM_UNLOCK(sc);
3945 
3946         return (error);
3947 }
3948 
3949 /*
3950  * mvm/tx.c
3951  */
3952 
3953 /*
3954  * Note that there are transports that buffer frames before they reach
3955  * the firmware. This means that after flush_tx_path is called, the
3956  * queue might not be empty. The race-free way to handle this is to:
3957  * 1) set the station as draining
3958  * 2) flush the Tx path
3959  * 3) wait for the transport queues to be empty
3960  */
3961 int
3962 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3963 {
3964 	int ret;
3965 	struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3966 		.queues_ctl = htole32(tfd_msk),
3967 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3968 	};
3969 
3970 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3971 	    sizeof(flush_cmd), &flush_cmd);
3972 	if (ret)
3973                 device_printf(sc->sc_dev,
3974 		    "Flushing tx queue failed: %d\n", ret);
3975 	return ret;
3976 }
3977 
3978 /*
3979  * BEGIN mvm/quota.c
3980  */
3981 
3982 static int
3983 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3984 {
3985 	struct iwm_time_quota_cmd_v1 cmd;
3986 	int i, idx, ret, num_active_macs, quota, quota_rem;
3987 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3988 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3989 	uint16_t id;
3990 
3991 	memset(&cmd, 0, sizeof(cmd));
3992 
3993 	/* currently, PHY ID == binding ID */
3994 	if (ivp) {
3995 		id = ivp->phy_ctxt->id;
3996 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3997 		colors[id] = ivp->phy_ctxt->color;
3998 
3999 		if (1)
4000 			n_ifs[id] = 1;
4001 	}
4002 
4003 	/*
4004 	 * The FW's scheduling session consists of
4005 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4006 	 * equally between all the bindings that require quota
4007 	 */
4008 	num_active_macs = 0;
4009 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4010 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4011 		num_active_macs += n_ifs[i];
4012 	}
4013 
4014 	quota = 0;
4015 	quota_rem = 0;
4016 	if (num_active_macs) {
4017 		quota = IWM_MAX_QUOTA / num_active_macs;
4018 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4019 	}
4020 
4021 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4022 		if (colors[i] < 0)
4023 			continue;
4024 
4025 		cmd.quotas[idx].id_and_color =
4026 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4027 
4028 		if (n_ifs[i] <= 0) {
4029 			cmd.quotas[idx].quota = htole32(0);
4030 			cmd.quotas[idx].max_duration = htole32(0);
4031 		} else {
4032 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4033 			cmd.quotas[idx].max_duration = htole32(0);
4034 		}
4035 		idx++;
4036 	}
4037 
4038 	/* Give the remainder of the session to the first binding */
4039 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4040 
4041 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4042 	    sizeof(cmd), &cmd);
4043 	if (ret)
4044 		device_printf(sc->sc_dev,
4045 		    "%s: Failed to send quota: %d\n", __func__, ret);
4046 	return ret;
4047 }
4048 
4049 /*
4050  * END mvm/quota.c
4051  */
4052 
4053 /*
4054  * ieee80211 routines
4055  */
4056 
4057 /*
4058  * Change to AUTH state in 80211 state machine.  Roughly matches what
4059  * Linux does in bss_info_changed().
4060  */
4061 static int
4062 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4063 {
4064 	struct ieee80211_node *ni;
4065 	struct iwm_node *in;
4066 	struct iwm_vap *iv = IWM_VAP(vap);
4067 	uint32_t duration;
4068 	int error;
4069 
4070 	/*
4071 	 * XXX i have a feeling that the vap node is being
4072 	 * freed from underneath us. Grr.
4073 	 */
4074 	ni = ieee80211_ref_node(vap->iv_bss);
4075 	in = IWM_NODE(ni);
4076 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4077 	    "%s: called; vap=%p, bss ni=%p\n",
4078 	    __func__,
4079 	    vap,
4080 	    ni);
4081 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4082 	    __func__, ether_sprintf(ni->ni_bssid));
4083 
4084 	in->in_assoc = 0;
4085 	iv->iv_auth = 1;
4086 
4087 	/*
4088 	 * Firmware bug - it'll crash if the beacon interval is less
4089 	 * than 16. We can't avoid connecting at all, so refuse the
4090 	 * station state change, this will cause net80211 to abandon
4091 	 * attempts to connect to this AP, and eventually wpa_s will
4092 	 * blacklist the AP...
4093 	 */
4094 	if (ni->ni_intval < 16) {
4095 		device_printf(sc->sc_dev,
4096 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4097 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4098 		error = EINVAL;
4099 		goto out;
4100 	}
4101 
4102 	error = iwm_allow_mcast(vap, sc);
4103 	if (error) {
4104 		device_printf(sc->sc_dev,
4105 		    "%s: failed to set multicast\n", __func__);
4106 		goto out;
4107 	}
4108 
4109 	/*
4110 	 * This is where it deviates from what Linux does.
4111 	 *
4112 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4113 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4114 	 * and always does a mac_ctx_changed().
4115 	 *
4116 	 * The openbsd port doesn't attempt to do that - it reset things
4117 	 * at odd states and does the add here.
4118 	 *
4119 	 * So, until the state handling is fixed (ie, we never reset
4120 	 * the NIC except for a firmware failure, which should drag
4121 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4122 	 * contexts that are required), let's do a dirty hack here.
4123 	 */
4124 	if (iv->is_uploaded) {
4125 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4126 			device_printf(sc->sc_dev,
4127 			    "%s: failed to update MAC\n", __func__);
4128 			goto out;
4129 		}
4130 	} else {
4131 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4132 			device_printf(sc->sc_dev,
4133 			    "%s: failed to add MAC\n", __func__);
4134 			goto out;
4135 		}
4136 	}
4137 	sc->sc_firmware_state = 1;
4138 
4139 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4140 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4141 		device_printf(sc->sc_dev,
4142 		    "%s: failed update phy ctxt\n", __func__);
4143 		goto out;
4144 	}
4145 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4146 
4147 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4148 		device_printf(sc->sc_dev,
4149 		    "%s: binding update cmd\n", __func__);
4150 		goto out;
4151 	}
4152 	sc->sc_firmware_state = 2;
4153 	/*
4154 	 * Authentication becomes unreliable when powersaving is left enabled
4155 	 * here. Powersaving will be activated again when association has
4156 	 * finished or is aborted.
4157 	 */
4158 	iv->ps_disabled = TRUE;
4159 	error = iwm_power_update_mac(sc);
4160 	iv->ps_disabled = FALSE;
4161 	if (error != 0) {
4162 		device_printf(sc->sc_dev,
4163 		    "%s: failed to update power management\n",
4164 		    __func__);
4165 		goto out;
4166 	}
4167 	if ((error = iwm_add_sta(sc, in)) != 0) {
4168 		device_printf(sc->sc_dev,
4169 		    "%s: failed to add sta\n", __func__);
4170 		goto out;
4171 	}
4172 	sc->sc_firmware_state = 3;
4173 
4174 	/*
4175 	 * Prevent the FW from wandering off channel during association
4176 	 * by "protecting" the session with a time event.
4177 	 */
4178 	/* XXX duration is in units of TU, not MS */
4179 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4180 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4181 
4182 	error = 0;
4183 out:
4184 	if (error != 0)
4185 		iv->iv_auth = 0;
4186 	ieee80211_free_node(ni);
4187 	return (error);
4188 }
4189 
4190 static struct ieee80211_node *
4191 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4192 {
4193 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4194 	    M_NOWAIT | M_ZERO);
4195 }
4196 
4197 static uint8_t
4198 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4199 {
4200 	uint8_t plcp = rate_n_flags & 0xff;
4201 	int i;
4202 
4203 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4204 		if (iwm_rates[i].plcp == plcp)
4205 			return iwm_rates[i].rate;
4206 	}
4207 	return 0;
4208 }
4209 
4210 uint8_t
4211 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4212 {
4213 	int i;
4214 	uint8_t rval;
4215 
4216 	for (i = 0; i < rs->rs_nrates; i++) {
4217 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4218 		if (rval == iwm_rates[ridx].rate)
4219 			return rs->rs_rates[i];
4220 	}
4221 
4222 	return 0;
4223 }
4224 
4225 static int
4226 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4227 {
4228 	int i;
4229 
4230 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4231 		if (iwm_rates[i].rate == rate)
4232 			return i;
4233 	}
4234 
4235 	device_printf(sc->sc_dev,
4236 	    "%s: WARNING: device rate for %u not found!\n",
4237 	    __func__, rate);
4238 
4239 	return -1;
4240 }
4241 
4242 
4243 static void
4244 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4245 {
4246 	struct ieee80211_node *ni = &in->in_ni;
4247 	struct iwm_lq_cmd *lq = &in->in_lq;
4248 	struct ieee80211_rateset *rs = &ni->ni_rates;
4249 	int nrates = rs->rs_nrates;
4250 	int i, ridx, tab = 0;
4251 //	int txant = 0;
4252 
4253 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4254 
4255 	if (nrates > nitems(lq->rs_table)) {
4256 		device_printf(sc->sc_dev,
4257 		    "%s: node supports %d rates, driver handles "
4258 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4259 		return;
4260 	}
4261 	if (nrates == 0) {
4262 		device_printf(sc->sc_dev,
4263 		    "%s: node supports 0 rates, odd!\n", __func__);
4264 		return;
4265 	}
4266 	nrates = imin(rix + 1, nrates);
4267 
4268 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4269 	    "%s: nrates=%d\n", __func__, nrates);
4270 
4271 	/* then construct a lq_cmd based on those */
4272 	memset(lq, 0, sizeof(*lq));
4273 	lq->sta_id = IWM_STATION_ID;
4274 
4275 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4276 	if (ni->ni_flags & IEEE80211_NODE_HT)
4277 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4278 
4279 	/*
4280 	 * are these used? (we don't do SISO or MIMO)
4281 	 * need to set them to non-zero, though, or we get an error.
4282 	 */
4283 	lq->single_stream_ant_msk = 1;
4284 	lq->dual_stream_ant_msk = 1;
4285 
4286 	/*
4287 	 * Build the actual rate selection table.
4288 	 * The lowest bits are the rates.  Additionally,
4289 	 * CCK needs bit 9 to be set.  The rest of the bits
4290 	 * we add to the table select the tx antenna
4291 	 * Note that we add the rates in the highest rate first
4292 	 * (opposite of ni_rates).
4293 	 */
4294 	for (i = 0; i < nrates; i++) {
4295 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4296 		int nextant;
4297 
4298 		/* Map 802.11 rate to HW rate index. */
4299 		ridx = iwm_rate2ridx(sc, rate);
4300 		if (ridx == -1)
4301 			continue;
4302 
4303 #if 0
4304 		if (txant == 0)
4305 			txant = iwm_get_valid_tx_ant(sc);
4306 		nextant = 1<<(ffs(txant)-1);
4307 		txant &= ~nextant;
4308 #else
4309 		nextant = iwm_get_valid_tx_ant(sc);
4310 #endif
4311 		tab = iwm_rates[ridx].plcp;
4312 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4313 		if (IWM_RIDX_IS_CCK(ridx))
4314 			tab |= IWM_RATE_MCS_CCK_MSK;
4315 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4316 		    "station rate i=%d, rate=%d, hw=%x\n",
4317 		    i, iwm_rates[ridx].rate, tab);
4318 		lq->rs_table[i] = htole32(tab);
4319 	}
4320 	/* then fill the rest with the lowest possible rate */
4321 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4322 		KASSERT(tab != 0, ("invalid tab"));
4323 		lq->rs_table[i] = htole32(tab);
4324 	}
4325 }
4326 
4327 static void
4328 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4329 {
4330 	struct iwm_vap *ivp = IWM_VAP(vap);
4331 	int error;
4332 
4333 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4334 	sc->sc_tx_timer = 0;
4335 
4336 	ivp->iv_auth = 0;
4337 	if (sc->sc_firmware_state == 3) {
4338 		iwm_xmit_queue_drain(sc);
4339 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4340 		error = iwm_rm_sta(sc, vap, TRUE);
4341 		if (error) {
4342 			device_printf(sc->sc_dev,
4343 			    "%s: Failed to remove station: %d\n",
4344 			    __func__, error);
4345 		}
4346 	}
4347 	if (sc->sc_firmware_state == 3) {
4348 		error = iwm_mac_ctxt_changed(sc, vap);
4349 		if (error) {
4350 			device_printf(sc->sc_dev,
4351 			    "%s: Failed to change mac context: %d\n",
4352 			    __func__, error);
4353 		}
4354 	}
4355 	if (sc->sc_firmware_state == 3) {
4356 		error = iwm_sf_update(sc, vap, FALSE);
4357 		if (error) {
4358 			device_printf(sc->sc_dev,
4359 			    "%s: Failed to update smart FIFO: %d\n",
4360 			    __func__, error);
4361 		}
4362 	}
4363 	if (sc->sc_firmware_state == 3) {
4364 		error = iwm_rm_sta_id(sc, vap);
4365 		if (error) {
4366 			device_printf(sc->sc_dev,
4367 			    "%s: Failed to remove station id: %d\n",
4368 			    __func__, error);
4369 		}
4370 	}
4371 	if (sc->sc_firmware_state == 3) {
4372 		error = iwm_update_quotas(sc, NULL);
4373 		if (error) {
4374 			device_printf(sc->sc_dev,
4375 			    "%s: Failed to update PHY quota: %d\n",
4376 			    __func__, error);
4377 		}
4378 	}
4379 	if (sc->sc_firmware_state == 3) {
4380 		/* XXX Might need to specify bssid correctly. */
4381 		error = iwm_mac_ctxt_changed(sc, vap);
4382 		if (error) {
4383 			device_printf(sc->sc_dev,
4384 			    "%s: Failed to change mac context: %d\n",
4385 			    __func__, error);
4386 		}
4387 	}
4388 	if (sc->sc_firmware_state == 3) {
4389 		sc->sc_firmware_state = 2;
4390 	}
4391 	if (sc->sc_firmware_state > 1) {
4392 		error = iwm_binding_remove_vif(sc, ivp);
4393 		if (error) {
4394 			device_printf(sc->sc_dev,
4395 			    "%s: Failed to remove channel ctx: %d\n",
4396 			    __func__, error);
4397 		}
4398 	}
4399 	if (sc->sc_firmware_state > 1) {
4400 		sc->sc_firmware_state = 1;
4401 	}
4402 	ivp->phy_ctxt = NULL;
4403 	if (sc->sc_firmware_state > 0) {
4404 		error = iwm_mac_ctxt_changed(sc, vap);
4405 		if (error) {
4406 			device_printf(sc->sc_dev,
4407 			    "%s: Failed to change mac context: %d\n",
4408 			    __func__, error);
4409 		}
4410 	}
4411 	if (sc->sc_firmware_state > 0) {
4412 		error = iwm_power_update_mac(sc);
4413 		if (error != 0) {
4414 			device_printf(sc->sc_dev,
4415 			    "%s: failed to update power management\n",
4416 			    __func__);
4417 		}
4418 	}
4419 	sc->sc_firmware_state = 0;
4420 }
4421 
4422 static int
4423 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4424 {
4425 	struct iwm_vap *ivp = IWM_VAP(vap);
4426 	struct ieee80211com *ic = vap->iv_ic;
4427 	struct iwm_softc *sc = ic->ic_softc;
4428 	struct iwm_node *in;
4429 	int error;
4430 
4431 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4432 	    "switching state %s -> %s arg=0x%x\n",
4433 	    ieee80211_state_name[vap->iv_state],
4434 	    ieee80211_state_name[nstate],
4435 	    arg);
4436 
4437 	IEEE80211_UNLOCK(ic);
4438 	IWM_LOCK(sc);
4439 
4440 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4441 	    (nstate == IEEE80211_S_AUTH ||
4442 	     nstate == IEEE80211_S_ASSOC ||
4443 	     nstate == IEEE80211_S_RUN)) {
4444 		/* Stop blinking for a scan, when authenticating. */
4445 		iwm_led_blink_stop(sc);
4446 	}
4447 
4448 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4449 		iwm_led_disable(sc);
4450 		/* disable beacon filtering if we're hopping out of RUN */
4451 		iwm_disable_beacon_filter(sc);
4452 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4453 			in->in_assoc = 0;
4454 	}
4455 
4456 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4457 	     vap->iv_state == IEEE80211_S_ASSOC ||
4458 	     vap->iv_state == IEEE80211_S_RUN) &&
4459 	    (nstate == IEEE80211_S_INIT ||
4460 	     nstate == IEEE80211_S_SCAN ||
4461 	     nstate == IEEE80211_S_AUTH)) {
4462 		iwm_stop_session_protection(sc, ivp);
4463 	}
4464 
4465 	if ((vap->iv_state == IEEE80211_S_RUN ||
4466 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4467 	    nstate == IEEE80211_S_INIT) {
4468 		/*
4469 		 * In this case, iv_newstate() wants to send an 80211 frame on
4470 		 * the network that we are leaving. So we need to call it,
4471 		 * before tearing down all the firmware state.
4472 		 */
4473 		IWM_UNLOCK(sc);
4474 		IEEE80211_LOCK(ic);
4475 		ivp->iv_newstate(vap, nstate, arg);
4476 		IEEE80211_UNLOCK(ic);
4477 		IWM_LOCK(sc);
4478 		iwm_bring_down_firmware(sc, vap);
4479 		IWM_UNLOCK(sc);
4480 		IEEE80211_LOCK(ic);
4481 		return 0;
4482 	}
4483 
4484 	switch (nstate) {
4485 	case IEEE80211_S_INIT:
4486 	case IEEE80211_S_SCAN:
4487 		break;
4488 
4489 	case IEEE80211_S_AUTH:
4490 		iwm_bring_down_firmware(sc, vap);
4491 		if ((error = iwm_auth(vap, sc)) != 0) {
4492 			device_printf(sc->sc_dev,
4493 			    "%s: could not move to auth state: %d\n",
4494 			    __func__, error);
4495 			iwm_bring_down_firmware(sc, vap);
4496 			IWM_UNLOCK(sc);
4497 			IEEE80211_LOCK(ic);
4498 			return 1;
4499 		}
4500 		break;
4501 
4502 	case IEEE80211_S_ASSOC:
4503 		/*
4504 		 * EBS may be disabled due to previous failures reported by FW.
4505 		 * Reset EBS status here assuming environment has been changed.
4506 		 */
4507 		sc->last_ebs_successful = TRUE;
4508 		break;
4509 
4510 	case IEEE80211_S_RUN:
4511 		in = IWM_NODE(vap->iv_bss);
4512 		/* Update the association state, now we have it all */
4513 		/* (eg associd comes in at this point */
4514 		error = iwm_update_sta(sc, in);
4515 		if (error != 0) {
4516 			device_printf(sc->sc_dev,
4517 			    "%s: failed to update STA\n", __func__);
4518 			IWM_UNLOCK(sc);
4519 			IEEE80211_LOCK(ic);
4520 			return error;
4521 		}
4522 		in->in_assoc = 1;
4523 		error = iwm_mac_ctxt_changed(sc, vap);
4524 		if (error != 0) {
4525 			device_printf(sc->sc_dev,
4526 			    "%s: failed to update MAC: %d\n", __func__, error);
4527 		}
4528 
4529 		iwm_sf_update(sc, vap, FALSE);
4530 		iwm_enable_beacon_filter(sc, ivp);
4531 		iwm_power_update_mac(sc);
4532 		iwm_update_quotas(sc, ivp);
4533 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4534 		iwm_setrates(sc, in, rix);
4535 
4536 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4537 			device_printf(sc->sc_dev,
4538 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4539 		}
4540 
4541 		iwm_led_enable(sc);
4542 		break;
4543 
4544 	default:
4545 		break;
4546 	}
4547 	IWM_UNLOCK(sc);
4548 	IEEE80211_LOCK(ic);
4549 
4550 	return (ivp->iv_newstate(vap, nstate, arg));
4551 }
4552 
4553 void
4554 iwm_endscan_cb(void *arg, int pending)
4555 {
4556 	struct iwm_softc *sc = arg;
4557 	struct ieee80211com *ic = &sc->sc_ic;
4558 
4559 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4560 	    "%s: scan ended\n",
4561 	    __func__);
4562 
4563 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4564 }
4565 
4566 static int
4567 iwm_send_bt_init_conf(struct iwm_softc *sc)
4568 {
4569 	struct iwm_bt_coex_cmd bt_cmd;
4570 
4571 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4572 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4573 
4574 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4575 	    &bt_cmd);
4576 }
4577 
4578 static boolean_t
4579 iwm_is_lar_supported(struct iwm_softc *sc)
4580 {
4581 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4582 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4583 
4584 	if (iwm_lar_disable)
4585 		return FALSE;
4586 
4587 	/*
4588 	 * Enable LAR only if it is supported by the FW (TLV) &&
4589 	 * enabled in the NVM
4590 	 */
4591 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4592 		return nvm_lar && tlv_lar;
4593 	else
4594 		return tlv_lar;
4595 }
4596 
4597 static boolean_t
4598 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4599 {
4600 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4601 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4602 }
4603 
4604 static int
4605 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4606 {
4607 	struct iwm_mcc_update_cmd mcc_cmd;
4608 	struct iwm_host_cmd hcmd = {
4609 		.id = IWM_MCC_UPDATE_CMD,
4610 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4611 		.data = { &mcc_cmd },
4612 	};
4613 	int ret;
4614 #ifdef IWM_DEBUG
4615 	struct iwm_rx_packet *pkt;
4616 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4617 	struct iwm_mcc_update_resp_v2 *mcc_resp;
4618 	int n_channels;
4619 	uint16_t mcc;
4620 #endif
4621 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4622 
4623 	if (!iwm_is_lar_supported(sc)) {
4624 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4625 		    __func__);
4626 		return 0;
4627 	}
4628 
4629 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4630 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4631 	if (iwm_is_wifi_mcc_supported(sc))
4632 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4633 	else
4634 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4635 
4636 	if (resp_v2)
4637 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4638 	else
4639 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4640 
4641 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4642 	    "send MCC update to FW with '%c%c' src = %d\n",
4643 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4644 
4645 	ret = iwm_send_cmd(sc, &hcmd);
4646 	if (ret)
4647 		return ret;
4648 
4649 #ifdef IWM_DEBUG
4650 	pkt = hcmd.resp_pkt;
4651 
4652 	/* Extract MCC response */
4653 	if (resp_v2) {
4654 		mcc_resp = (void *)pkt->data;
4655 		mcc = mcc_resp->mcc;
4656 		n_channels =  le32toh(mcc_resp->n_channels);
4657 	} else {
4658 		mcc_resp_v1 = (void *)pkt->data;
4659 		mcc = mcc_resp_v1->mcc;
4660 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4661 	}
4662 
4663 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4664 	if (mcc == 0)
4665 		mcc = 0x3030;  /* "00" - world */
4666 
4667 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4668 	    "regulatory domain '%c%c' (%d channels available)\n",
4669 	    mcc >> 8, mcc & 0xff, n_channels);
4670 #endif
4671 	iwm_free_resp(sc, &hcmd);
4672 
4673 	return 0;
4674 }
4675 
4676 static void
4677 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4678 {
4679 	struct iwm_host_cmd cmd = {
4680 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4681 		.len = { sizeof(uint32_t), },
4682 		.data = { &backoff, },
4683 	};
4684 
4685 	if (iwm_send_cmd(sc, &cmd) != 0) {
4686 		device_printf(sc->sc_dev,
4687 		    "failed to change thermal tx backoff\n");
4688 	}
4689 }
4690 
4691 static int
4692 iwm_init_hw(struct iwm_softc *sc)
4693 {
4694 	struct ieee80211com *ic = &sc->sc_ic;
4695 	int error, i, ac;
4696 
4697 	sc->sf_state = IWM_SF_UNINIT;
4698 
4699 	if ((error = iwm_start_hw(sc)) != 0) {
4700 		printf("iwm_start_hw: failed %d\n", error);
4701 		return error;
4702 	}
4703 
4704 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4705 		printf("iwm_run_init_ucode: failed %d\n", error);
4706 		return error;
4707 	}
4708 
4709 	/*
4710 	 * should stop and start HW since that INIT
4711 	 * image just loaded
4712 	 */
4713 	iwm_stop_device(sc);
4714 	sc->sc_ps_disabled = FALSE;
4715 	if ((error = iwm_start_hw(sc)) != 0) {
4716 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4717 		return error;
4718 	}
4719 
4720 	/* omstart, this time with the regular firmware */
4721 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4722 	if (error) {
4723 		device_printf(sc->sc_dev, "could not load firmware\n");
4724 		goto error;
4725 	}
4726 
4727 	error = iwm_sf_update(sc, NULL, FALSE);
4728 	if (error)
4729 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4730 
4731 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4732 		device_printf(sc->sc_dev, "bt init conf failed\n");
4733 		goto error;
4734 	}
4735 
4736 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4737 	if (error != 0) {
4738 		device_printf(sc->sc_dev, "antenna config failed\n");
4739 		goto error;
4740 	}
4741 
4742 	/* Send phy db control command and then phy db calibration */
4743 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4744 		goto error;
4745 
4746 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4747 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4748 		goto error;
4749 	}
4750 
4751 	/* Add auxiliary station for scanning */
4752 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4753 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4754 		goto error;
4755 	}
4756 
4757 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4758 		/*
4759 		 * The channel used here isn't relevant as it's
4760 		 * going to be overwritten in the other flows.
4761 		 * For now use the first channel we have.
4762 		 */
4763 		if ((error = iwm_phy_ctxt_add(sc,
4764 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4765 			goto error;
4766 	}
4767 
4768 	/* Initialize tx backoffs to the minimum. */
4769 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4770 		iwm_tt_tx_backoff(sc, 0);
4771 
4772 	if (iwm_config_ltr(sc) != 0)
4773 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4774 
4775 	error = iwm_power_update_device(sc);
4776 	if (error)
4777 		goto error;
4778 
4779 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4780 		goto error;
4781 
4782 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4783 		if ((error = iwm_config_umac_scan(sc)) != 0)
4784 			goto error;
4785 	}
4786 
4787 	/* Enable Tx queues. */
4788 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4789 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4790 		    iwm_ac_to_tx_fifo[ac]);
4791 		if (error)
4792 			goto error;
4793 	}
4794 
4795 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4796 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4797 		goto error;
4798 	}
4799 
4800 	return 0;
4801 
4802  error:
4803 	iwm_stop_device(sc);
4804 	return error;
4805 }
4806 
4807 /* Allow multicast from our BSSID. */
4808 static int
4809 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4810 {
4811 	struct ieee80211_node *ni = vap->iv_bss;
4812 	struct iwm_mcast_filter_cmd *cmd;
4813 	size_t size;
4814 	int error;
4815 
4816 	size = roundup(sizeof(*cmd), 4);
4817 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4818 	if (cmd == NULL)
4819 		return ENOMEM;
4820 	cmd->filter_own = 1;
4821 	cmd->port_id = 0;
4822 	cmd->count = 0;
4823 	cmd->pass_all = 1;
4824 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4825 
4826 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4827 	    IWM_CMD_SYNC, size, cmd);
4828 	free(cmd, M_DEVBUF);
4829 
4830 	return (error);
4831 }
4832 
4833 /*
4834  * ifnet interfaces
4835  */
4836 
4837 static void
4838 iwm_init(struct iwm_softc *sc)
4839 {
4840 	int error;
4841 
4842 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4843 		return;
4844 	}
4845 	sc->sc_generation++;
4846 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4847 
4848 	if ((error = iwm_init_hw(sc)) != 0) {
4849 		printf("iwm_init_hw failed %d\n", error);
4850 		iwm_stop(sc);
4851 		return;
4852 	}
4853 
4854 	/*
4855 	 * Ok, firmware loaded and we are jogging
4856 	 */
4857 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4858 }
4859 
4860 static int
4861 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4862 {
4863 	struct iwm_softc *sc;
4864 	int error;
4865 
4866 	sc = ic->ic_softc;
4867 
4868 	IWM_LOCK(sc);
4869 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4870 		IWM_UNLOCK(sc);
4871 		return (ENXIO);
4872 	}
4873 	error = mbufq_enqueue(&sc->sc_snd, m);
4874 	if (error) {
4875 		IWM_UNLOCK(sc);
4876 		return (error);
4877 	}
4878 	iwm_start(sc);
4879 	IWM_UNLOCK(sc);
4880 	return (0);
4881 }
4882 
4883 /*
4884  * Dequeue packets from sendq and call send.
4885  */
4886 static void
4887 iwm_start(struct iwm_softc *sc)
4888 {
4889 	struct ieee80211_node *ni;
4890 	struct mbuf *m;
4891 	int ac = 0;
4892 
4893 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4894 	while (sc->qfullmsk == 0 &&
4895 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4896 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4897 		if (iwm_tx(sc, m, ni, ac) != 0) {
4898 			if_inc_counter(ni->ni_vap->iv_ifp,
4899 			    IFCOUNTER_OERRORS, 1);
4900 			ieee80211_free_node(ni);
4901 			continue;
4902 		}
4903 		if (sc->sc_tx_timer == 0) {
4904 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4905 			    sc);
4906 		}
4907 		sc->sc_tx_timer = 15;
4908 	}
4909 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4910 }
4911 
4912 static void
4913 iwm_stop(struct iwm_softc *sc)
4914 {
4915 
4916 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4917 	sc->sc_flags |= IWM_FLAG_STOPPED;
4918 	sc->sc_generation++;
4919 	iwm_led_blink_stop(sc);
4920 	sc->sc_tx_timer = 0;
4921 	iwm_stop_device(sc);
4922 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4923 }
4924 
4925 static void
4926 iwm_watchdog(void *arg)
4927 {
4928 	struct iwm_softc *sc = arg;
4929 	struct ieee80211com *ic = &sc->sc_ic;
4930 
4931 	if (sc->sc_attached == 0)
4932 		return;
4933 
4934 	if (sc->sc_tx_timer > 0) {
4935 		if (--sc->sc_tx_timer == 0) {
4936 			device_printf(sc->sc_dev, "device timeout\n");
4937 #ifdef IWM_DEBUG
4938 			iwm_nic_error(sc);
4939 #endif
4940 			ieee80211_restart_all(ic);
4941 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4942 			return;
4943 		}
4944 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4945 	}
4946 }
4947 
4948 static void
4949 iwm_parent(struct ieee80211com *ic)
4950 {
4951 	struct iwm_softc *sc = ic->ic_softc;
4952 	int startall = 0;
4953 	int rfkill = 0;
4954 
4955 	IWM_LOCK(sc);
4956 	if (ic->ic_nrunning > 0) {
4957 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4958 			iwm_init(sc);
4959 			rfkill = iwm_check_rfkill(sc);
4960 			if (!rfkill)
4961 				startall = 1;
4962 		}
4963 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4964 		iwm_stop(sc);
4965 	IWM_UNLOCK(sc);
4966 	if (startall)
4967 		ieee80211_start_all(ic);
4968 	else if (rfkill)
4969 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
4970 }
4971 
4972 static void
4973 iwm_rftoggle_task(void *arg, int npending __unused)
4974 {
4975 	struct iwm_softc *sc = arg;
4976 	struct ieee80211com *ic = &sc->sc_ic;
4977 	int rfkill;
4978 
4979 	IWM_LOCK(sc);
4980 	rfkill = iwm_check_rfkill(sc);
4981 	IWM_UNLOCK(sc);
4982 	if (rfkill) {
4983 		device_printf(sc->sc_dev,
4984 		    "%s: rfkill switch, disabling interface\n", __func__);
4985 		ieee80211_suspend_all(ic);
4986 		ieee80211_notify_radio(ic, 0);
4987 	} else {
4988 		device_printf(sc->sc_dev,
4989 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
4990 		ieee80211_resume_all(ic);
4991 		ieee80211_notify_radio(ic, 1);
4992 	}
4993 }
4994 
4995 /*
4996  * The interrupt side of things
4997  */
4998 
4999 /*
5000  * error dumping routines are from iwlwifi/mvm/utils.c
5001  */
5002 
5003 /*
5004  * Note: This structure is read from the device with IO accesses,
5005  * and the reading already does the endian conversion. As it is
5006  * read with uint32_t-sized accesses, any members with a different size
5007  * need to be ordered correctly though!
5008  */
5009 struct iwm_error_event_table {
5010 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5011 	uint32_t error_id;		/* type of error */
5012 	uint32_t trm_hw_status0;	/* TRM HW status */
5013 	uint32_t trm_hw_status1;	/* TRM HW status */
5014 	uint32_t blink2;		/* branch link */
5015 	uint32_t ilink1;		/* interrupt link */
5016 	uint32_t ilink2;		/* interrupt link */
5017 	uint32_t data1;		/* error-specific data */
5018 	uint32_t data2;		/* error-specific data */
5019 	uint32_t data3;		/* error-specific data */
5020 	uint32_t bcon_time;		/* beacon timer */
5021 	uint32_t tsf_low;		/* network timestamp function timer */
5022 	uint32_t tsf_hi;		/* network timestamp function timer */
5023 	uint32_t gp1;		/* GP1 timer register */
5024 	uint32_t gp2;		/* GP2 timer register */
5025 	uint32_t fw_rev_type;	/* firmware revision type */
5026 	uint32_t major;		/* uCode version major */
5027 	uint32_t minor;		/* uCode version minor */
5028 	uint32_t hw_ver;		/* HW Silicon version */
5029 	uint32_t brd_ver;		/* HW board version */
5030 	uint32_t log_pc;		/* log program counter */
5031 	uint32_t frame_ptr;		/* frame pointer */
5032 	uint32_t stack_ptr;		/* stack pointer */
5033 	uint32_t hcmd;		/* last host command header */
5034 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5035 				 * rxtx_flag */
5036 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5037 				 * host_flag */
5038 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5039 				 * enc_flag */
5040 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5041 				 * time_flag */
5042 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5043 				 * wico interrupt */
5044 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5045 	uint32_t wait_event;		/* wait event() caller address */
5046 	uint32_t l2p_control;	/* L2pControlField */
5047 	uint32_t l2p_duration;	/* L2pDurationField */
5048 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5049 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5050 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5051 				 * (LMPM_PMG_SEL) */
5052 	uint32_t u_timestamp;	/* indicate when the date and time of the
5053 				 * compilation */
5054 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5055 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5056 
5057 /*
5058  * UMAC error struct - relevant starting from family 8000 chip.
5059  * Note: This structure is read from the device with IO accesses,
5060  * and the reading already does the endian conversion. As it is
5061  * read with u32-sized accesses, any members with a different size
5062  * need to be ordered correctly though!
5063  */
5064 struct iwm_umac_error_event_table {
5065 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5066 	uint32_t error_id;	/* type of error */
5067 	uint32_t blink1;	/* branch link */
5068 	uint32_t blink2;	/* branch link */
5069 	uint32_t ilink1;	/* interrupt link */
5070 	uint32_t ilink2;	/* interrupt link */
5071 	uint32_t data1;		/* error-specific data */
5072 	uint32_t data2;		/* error-specific data */
5073 	uint32_t data3;		/* error-specific data */
5074 	uint32_t umac_major;
5075 	uint32_t umac_minor;
5076 	uint32_t frame_pointer;	/* core register 27*/
5077 	uint32_t stack_pointer;	/* core register 28 */
5078 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5079 	uint32_t nic_isr_pref;	/* ISR status register */
5080 } __packed;
5081 
5082 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5083 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5084 
5085 #ifdef IWM_DEBUG
5086 struct {
5087 	const char *name;
5088 	uint8_t num;
5089 } advanced_lookup[] = {
5090 	{ "NMI_INTERRUPT_WDG", 0x34 },
5091 	{ "SYSASSERT", 0x35 },
5092 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5093 	{ "BAD_COMMAND", 0x38 },
5094 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5095 	{ "FATAL_ERROR", 0x3D },
5096 	{ "NMI_TRM_HW_ERR", 0x46 },
5097 	{ "NMI_INTERRUPT_TRM", 0x4C },
5098 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5099 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5100 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5101 	{ "NMI_INTERRUPT_HOST", 0x66 },
5102 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5103 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5104 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5105 	{ "ADVANCED_SYSASSERT", 0 },
5106 };
5107 
5108 static const char *
5109 iwm_desc_lookup(uint32_t num)
5110 {
5111 	int i;
5112 
5113 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5114 		if (advanced_lookup[i].num == num)
5115 			return advanced_lookup[i].name;
5116 
5117 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5118 	return advanced_lookup[i].name;
5119 }
5120 
5121 static void
5122 iwm_nic_umac_error(struct iwm_softc *sc)
5123 {
5124 	struct iwm_umac_error_event_table table;
5125 	uint32_t base;
5126 
5127 	base = sc->umac_error_event_table;
5128 
5129 	if (base < 0x800000) {
5130 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5131 		    base);
5132 		return;
5133 	}
5134 
5135 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5136 		device_printf(sc->sc_dev, "reading errlog failed\n");
5137 		return;
5138 	}
5139 
5140 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5141 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5142 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5143 		    sc->sc_flags, table.valid);
5144 	}
5145 
5146 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5147 		iwm_desc_lookup(table.error_id));
5148 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5149 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5150 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5151 	    table.ilink1);
5152 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5153 	    table.ilink2);
5154 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5155 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5156 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5157 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5158 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5159 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5160 	    table.frame_pointer);
5161 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5162 	    table.stack_pointer);
5163 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5164 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5165 	    table.nic_isr_pref);
5166 }
5167 
5168 /*
5169  * Support for dumping the error log seemed like a good idea ...
5170  * but it's mostly hex junk and the only sensible thing is the
5171  * hw/ucode revision (which we know anyway).  Since it's here,
5172  * I'll just leave it in, just in case e.g. the Intel guys want to
5173  * help us decipher some "ADVANCED_SYSASSERT" later.
5174  */
5175 static void
5176 iwm_nic_error(struct iwm_softc *sc)
5177 {
5178 	struct iwm_error_event_table table;
5179 	uint32_t base;
5180 
5181 	device_printf(sc->sc_dev, "dumping device error log\n");
5182 	base = sc->error_event_table[0];
5183 	if (base < 0x800000) {
5184 		device_printf(sc->sc_dev,
5185 		    "Invalid error log pointer 0x%08x\n", base);
5186 		return;
5187 	}
5188 
5189 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5190 		device_printf(sc->sc_dev, "reading errlog failed\n");
5191 		return;
5192 	}
5193 
5194 	if (!table.valid) {
5195 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5196 		return;
5197 	}
5198 
5199 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5200 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5201 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5202 		    sc->sc_flags, table.valid);
5203 	}
5204 
5205 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5206 	    iwm_desc_lookup(table.error_id));
5207 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5208 	    table.trm_hw_status0);
5209 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5210 	    table.trm_hw_status1);
5211 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5212 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5213 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5214 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5215 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5216 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5217 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5218 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5219 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5220 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5221 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5222 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5223 	    table.fw_rev_type);
5224 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5225 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5226 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5227 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5228 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5229 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5230 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5231 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5232 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5233 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5234 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5235 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5236 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5237 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5238 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5239 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5240 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5241 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5242 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5243 
5244 	if (sc->umac_error_event_table)
5245 		iwm_nic_umac_error(sc);
5246 }
5247 #endif
5248 
5249 static void
5250 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5251 {
5252 	struct ieee80211com *ic = &sc->sc_ic;
5253 	struct iwm_cmd_response *cresp;
5254 	struct mbuf *m1;
5255 	uint32_t offset = 0;
5256 	uint32_t maxoff = IWM_RBUF_SIZE;
5257 	uint32_t nextoff;
5258 	boolean_t stolen = FALSE;
5259 
5260 #define HAVEROOM(a)	\
5261     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5262 
5263 	while (HAVEROOM(offset)) {
5264 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5265 		    offset);
5266 		int qid, idx, code, len;
5267 
5268 		qid = pkt->hdr.qid;
5269 		idx = pkt->hdr.idx;
5270 
5271 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5272 
5273 		/*
5274 		 * randomly get these from the firmware, no idea why.
5275 		 * they at least seem harmless, so just ignore them for now
5276 		 */
5277 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5278 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5279 			break;
5280 		}
5281 
5282 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5283 		    "rx packet qid=%d idx=%d type=%x\n",
5284 		    qid & ~0x80, pkt->hdr.idx, code);
5285 
5286 		len = iwm_rx_packet_len(pkt);
5287 		len += sizeof(uint32_t); /* account for status word */
5288 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5289 
5290 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5291 
5292 		switch (code) {
5293 		case IWM_REPLY_RX_PHY_CMD:
5294 			iwm_rx_rx_phy_cmd(sc, pkt);
5295 			break;
5296 
5297 		case IWM_REPLY_RX_MPDU_CMD: {
5298 			/*
5299 			 * If this is the last frame in the RX buffer, we
5300 			 * can directly feed the mbuf to the sharks here.
5301 			 */
5302 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5303 			    struct iwm_rx_packet *, nextoff);
5304 			if (!HAVEROOM(nextoff) ||
5305 			    (nextpkt->hdr.code == 0 &&
5306 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5307 			     nextpkt->hdr.idx == 0) ||
5308 			    (nextpkt->len_n_flags ==
5309 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5310 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5311 					stolen = FALSE;
5312 					/* Make sure we abort the loop */
5313 					nextoff = maxoff;
5314 				}
5315 				break;
5316 			}
5317 
5318 			/*
5319 			 * Use m_copym instead of m_split, because that
5320 			 * makes it easier to keep a valid rx buffer in
5321 			 * the ring, when iwm_rx_mpdu() fails.
5322 			 *
5323 			 * We need to start m_copym() at offset 0, to get the
5324 			 * M_PKTHDR flag preserved.
5325 			 */
5326 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5327 			if (m1) {
5328 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5329 					stolen = TRUE;
5330 				else
5331 					m_freem(m1);
5332 			}
5333 			break;
5334 		}
5335 
5336 		case IWM_TX_CMD:
5337 			iwm_rx_tx_cmd(sc, pkt);
5338 			break;
5339 
5340 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5341 			struct iwm_missed_beacons_notif *resp;
5342 			int missed;
5343 
5344 			/* XXX look at mac_id to determine interface ID */
5345 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5346 
5347 			resp = (void *)pkt->data;
5348 			missed = le32toh(resp->consec_missed_beacons);
5349 
5350 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5351 			    "%s: MISSED_BEACON: mac_id=%d, "
5352 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5353 			    "num_rx=%d\n",
5354 			    __func__,
5355 			    le32toh(resp->mac_id),
5356 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5357 			    le32toh(resp->consec_missed_beacons),
5358 			    le32toh(resp->num_expected_beacons),
5359 			    le32toh(resp->num_recvd_beacons));
5360 
5361 			/* Be paranoid */
5362 			if (vap == NULL)
5363 				break;
5364 
5365 			/* XXX no net80211 locking? */
5366 			if (vap->iv_state == IEEE80211_S_RUN &&
5367 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5368 				if (missed > vap->iv_bmissthreshold) {
5369 					/* XXX bad locking; turn into task */
5370 					IWM_UNLOCK(sc);
5371 					ieee80211_beacon_miss(ic);
5372 					IWM_LOCK(sc);
5373 				}
5374 			}
5375 
5376 			break;
5377 		}
5378 
5379 		case IWM_MFUART_LOAD_NOTIFICATION:
5380 			break;
5381 
5382 		case IWM_ALIVE:
5383 			break;
5384 
5385 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5386 			break;
5387 
5388 		case IWM_STATISTICS_NOTIFICATION:
5389 			iwm_handle_rx_statistics(sc, pkt);
5390 			break;
5391 
5392 		case IWM_NVM_ACCESS_CMD:
5393 		case IWM_MCC_UPDATE_CMD:
5394 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5395 				memcpy(sc->sc_cmd_resp,
5396 				    pkt, sizeof(sc->sc_cmd_resp));
5397 			}
5398 			break;
5399 
5400 		case IWM_MCC_CHUB_UPDATE_CMD: {
5401 			struct iwm_mcc_chub_notif *notif;
5402 			notif = (void *)pkt->data;
5403 
5404 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5405 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5406 			sc->sc_fw_mcc[2] = '\0';
5407 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5408 			    "fw source %d sent CC '%s'\n",
5409 			    notif->source_id, sc->sc_fw_mcc);
5410 			break;
5411 		}
5412 
5413 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5414 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5415 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5416 			struct iwm_dts_measurement_notif_v1 *notif;
5417 
5418 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5419 				device_printf(sc->sc_dev,
5420 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5421 				break;
5422 			}
5423 			notif = (void *)pkt->data;
5424 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5425 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5426 			    notif->temp);
5427 			break;
5428 		}
5429 
5430 		case IWM_PHY_CONFIGURATION_CMD:
5431 		case IWM_TX_ANT_CONFIGURATION_CMD:
5432 		case IWM_ADD_STA:
5433 		case IWM_MAC_CONTEXT_CMD:
5434 		case IWM_REPLY_SF_CFG_CMD:
5435 		case IWM_POWER_TABLE_CMD:
5436 		case IWM_LTR_CONFIG:
5437 		case IWM_PHY_CONTEXT_CMD:
5438 		case IWM_BINDING_CONTEXT_CMD:
5439 		case IWM_TIME_EVENT_CMD:
5440 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5441 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5442 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5443 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5444 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5445 		case IWM_REPLY_BEACON_FILTERING_CMD:
5446 		case IWM_MAC_PM_POWER_TABLE:
5447 		case IWM_TIME_QUOTA_CMD:
5448 		case IWM_REMOVE_STA:
5449 		case IWM_TXPATH_FLUSH:
5450 		case IWM_LQ_CMD:
5451 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5452 				 IWM_FW_PAGING_BLOCK_CMD):
5453 		case IWM_BT_CONFIG:
5454 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5455 			cresp = (void *)pkt->data;
5456 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5457 				memcpy(sc->sc_cmd_resp,
5458 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5459 			}
5460 			break;
5461 
5462 		/* ignore */
5463 		case IWM_PHY_DB_CMD:
5464 			break;
5465 
5466 		case IWM_INIT_COMPLETE_NOTIF:
5467 			break;
5468 
5469 		case IWM_SCAN_OFFLOAD_COMPLETE:
5470 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5471 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5472 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5473 				ieee80211_runtask(ic, &sc->sc_es_task);
5474 			}
5475 			break;
5476 
5477 		case IWM_SCAN_ITERATION_COMPLETE: {
5478 			break;
5479 		}
5480 
5481 		case IWM_SCAN_COMPLETE_UMAC:
5482 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5483 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5484 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5485 				ieee80211_runtask(ic, &sc->sc_es_task);
5486 			}
5487 			break;
5488 
5489 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5490 			struct iwm_umac_scan_iter_complete_notif *notif;
5491 			notif = (void *)pkt->data;
5492 
5493 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5494 			    "complete, status=0x%x, %d channels scanned\n",
5495 			    notif->status, notif->scanned_channels);
5496 			break;
5497 		}
5498 
5499 		case IWM_REPLY_ERROR: {
5500 			struct iwm_error_resp *resp;
5501 			resp = (void *)pkt->data;
5502 
5503 			device_printf(sc->sc_dev,
5504 			    "firmware error 0x%x, cmd 0x%x\n",
5505 			    le32toh(resp->error_type),
5506 			    resp->cmd_id);
5507 			break;
5508 		}
5509 
5510 		case IWM_TIME_EVENT_NOTIFICATION:
5511 			iwm_rx_time_event_notif(sc, pkt);
5512 			break;
5513 
5514 		/*
5515 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5516 		 * messages. Just ignore them for now.
5517 		 */
5518 		case IWM_DEBUG_LOG_MSG:
5519 			break;
5520 
5521 		case IWM_MCAST_FILTER_CMD:
5522 			break;
5523 
5524 		case IWM_SCD_QUEUE_CFG: {
5525 			struct iwm_scd_txq_cfg_rsp *rsp;
5526 			rsp = (void *)pkt->data;
5527 
5528 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5529 			    "queue cfg token=0x%x sta_id=%d "
5530 			    "tid=%d scd_queue=%d\n",
5531 			    rsp->token, rsp->sta_id, rsp->tid,
5532 			    rsp->scd_queue);
5533 			break;
5534 		}
5535 
5536 		default:
5537 			device_printf(sc->sc_dev,
5538 			    "code %x, frame %d/%d %x unhandled\n",
5539 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5540 			break;
5541 		}
5542 
5543 		/*
5544 		 * Why test bit 0x80?  The Linux driver:
5545 		 *
5546 		 * There is one exception:  uCode sets bit 15 when it
5547 		 * originates the response/notification, i.e. when the
5548 		 * response/notification is not a direct response to a
5549 		 * command sent by the driver.  For example, uCode issues
5550 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5551 		 * it is not a direct response to any driver command.
5552 		 *
5553 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5554 		 * uses a slightly different format for pkt->hdr, and "qid"
5555 		 * is actually the upper byte of a two-byte field.
5556 		 */
5557 		if (!(qid & (1 << 7)))
5558 			iwm_cmd_done(sc, pkt);
5559 
5560 		offset = nextoff;
5561 	}
5562 	if (stolen)
5563 		m_freem(m);
5564 #undef HAVEROOM
5565 }
5566 
5567 /*
5568  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5569  * Basic structure from if_iwn
5570  */
5571 static void
5572 iwm_notif_intr(struct iwm_softc *sc)
5573 {
5574 	int count;
5575 	uint32_t wreg;
5576 	uint16_t hw;
5577 
5578 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5579 	    BUS_DMASYNC_POSTREAD);
5580 
5581 	if (sc->cfg->mqrx_supported) {
5582 		count = IWM_RX_MQ_RING_COUNT;
5583 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5584 	} else {
5585 		count = IWM_RX_LEGACY_RING_COUNT;
5586 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5587 	}
5588 
5589 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5590 
5591 	/*
5592 	 * Process responses
5593 	 */
5594 	while (sc->rxq.cur != hw) {
5595 		struct iwm_rx_ring *ring = &sc->rxq;
5596 		struct iwm_rx_data *data = &ring->data[ring->cur];
5597 
5598 		bus_dmamap_sync(ring->data_dmat, data->map,
5599 		    BUS_DMASYNC_POSTREAD);
5600 
5601 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5602 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5603 		iwm_handle_rxb(sc, data->m);
5604 
5605 		ring->cur = (ring->cur + 1) % count;
5606 	}
5607 
5608 	/*
5609 	 * Tell the firmware that it can reuse the ring entries that
5610 	 * we have just processed.
5611 	 * Seems like the hardware gets upset unless we align
5612 	 * the write by 8??
5613 	 */
5614 	hw = (hw == 0) ? count - 1 : hw - 1;
5615 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5616 }
5617 
5618 static void
5619 iwm_intr(void *arg)
5620 {
5621 	struct iwm_softc *sc = arg;
5622 	int handled = 0;
5623 	int r1, r2, rv = 0;
5624 	int isperiodic = 0;
5625 
5626 	IWM_LOCK(sc);
5627 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5628 
5629 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5630 		uint32_t *ict = sc->ict_dma.vaddr;
5631 		int tmp;
5632 
5633 		tmp = htole32(ict[sc->ict_cur]);
5634 		if (!tmp)
5635 			goto out_ena;
5636 
5637 		/*
5638 		 * ok, there was something.  keep plowing until we have all.
5639 		 */
5640 		r1 = r2 = 0;
5641 		while (tmp) {
5642 			r1 |= tmp;
5643 			ict[sc->ict_cur] = 0;
5644 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5645 			tmp = htole32(ict[sc->ict_cur]);
5646 		}
5647 
5648 		/* this is where the fun begins.  don't ask */
5649 		if (r1 == 0xffffffff)
5650 			r1 = 0;
5651 
5652 		/* i am not expected to understand this */
5653 		if (r1 & 0xc0000)
5654 			r1 |= 0x8000;
5655 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5656 	} else {
5657 		r1 = IWM_READ(sc, IWM_CSR_INT);
5658 		/* "hardware gone" (where, fishing?) */
5659 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5660 			goto out;
5661 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5662 	}
5663 	if (r1 == 0 && r2 == 0) {
5664 		goto out_ena;
5665 	}
5666 
5667 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5668 
5669 	/* Safely ignore these bits for debug checks below */
5670 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5671 
5672 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5673 		int i;
5674 		struct ieee80211com *ic = &sc->sc_ic;
5675 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5676 
5677 #ifdef IWM_DEBUG
5678 		iwm_nic_error(sc);
5679 #endif
5680 		/* Dump driver status (TX and RX rings) while we're here. */
5681 		device_printf(sc->sc_dev, "driver status:\n");
5682 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5683 			struct iwm_tx_ring *ring = &sc->txq[i];
5684 			device_printf(sc->sc_dev,
5685 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5686 			    "queued=%-3d\n",
5687 			    i, ring->qid, ring->cur, ring->queued);
5688 		}
5689 		device_printf(sc->sc_dev,
5690 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5691 		device_printf(sc->sc_dev,
5692 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5693 
5694 		/* Reset our firmware state tracking. */
5695 		sc->sc_firmware_state = 0;
5696 		/* Don't stop the device; just do a VAP restart */
5697 		IWM_UNLOCK(sc);
5698 
5699 		if (vap == NULL) {
5700 			printf("%s: null vap\n", __func__);
5701 			return;
5702 		}
5703 
5704 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5705 		    "restarting\n", __func__, vap->iv_state);
5706 
5707 		ieee80211_restart_all(ic);
5708 		return;
5709 	}
5710 
5711 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5712 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5713 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5714 		iwm_stop(sc);
5715 		rv = 1;
5716 		goto out;
5717 	}
5718 
5719 	/* firmware chunk loaded */
5720 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5721 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5722 		handled |= IWM_CSR_INT_BIT_FH_TX;
5723 		sc->sc_fw_chunk_done = 1;
5724 		wakeup(&sc->sc_fw);
5725 	}
5726 
5727 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5728 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5729 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5730 	}
5731 
5732 	/*
5733 	 * The Linux driver uses periodic interrupts to avoid races.
5734 	 * We cargo-cult like it's going out of fashion.
5735 	 */
5736 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5737 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5738 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5739 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5740 			IWM_WRITE_1(sc,
5741 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5742 		isperiodic = 1;
5743 	}
5744 
5745 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5746 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5747 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5748 
5749 		iwm_notif_intr(sc);
5750 
5751 		/* enable periodic interrupt, see above */
5752 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5753 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5754 			    IWM_CSR_INT_PERIODIC_ENA);
5755 	}
5756 
5757 	if (__predict_false(r1 & ~handled))
5758 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5759 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5760 	rv = 1;
5761 
5762  out_ena:
5763 	iwm_restore_interrupts(sc);
5764  out:
5765 	IWM_UNLOCK(sc);
5766 	return;
5767 }
5768 
5769 /*
5770  * Autoconf glue-sniffing
5771  */
5772 #define	PCI_VENDOR_INTEL		0x8086
5773 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5774 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5775 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5776 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5777 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5778 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5779 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5780 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5781 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5782 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5783 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5784 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5785 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5786 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5787 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5788 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5789 
5790 static const struct iwm_devices {
5791 	uint16_t		device;
5792 	const struct iwm_cfg	*cfg;
5793 } iwm_devices[] = {
5794 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5795 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5796 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5797 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5798 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5799 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5800 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5801 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5802 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5803 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5804 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5805 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5806 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5807 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5808 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5809 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5810 };
5811 
5812 static int
5813 iwm_probe(device_t dev)
5814 {
5815 	int i;
5816 
5817 	for (i = 0; i < nitems(iwm_devices); i++) {
5818 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5819 		    pci_get_device(dev) == iwm_devices[i].device) {
5820 			device_set_desc(dev, iwm_devices[i].cfg->name);
5821 			return (BUS_PROBE_DEFAULT);
5822 		}
5823 	}
5824 
5825 	return (ENXIO);
5826 }
5827 
5828 static int
5829 iwm_dev_check(device_t dev)
5830 {
5831 	struct iwm_softc *sc;
5832 	uint16_t devid;
5833 	int i;
5834 
5835 	sc = device_get_softc(dev);
5836 
5837 	devid = pci_get_device(dev);
5838 	for (i = 0; i < nitems(iwm_devices); i++) {
5839 		if (iwm_devices[i].device == devid) {
5840 			sc->cfg = iwm_devices[i].cfg;
5841 			return (0);
5842 		}
5843 	}
5844 	device_printf(dev, "unknown adapter type\n");
5845 	return ENXIO;
5846 }
5847 
5848 /* PCI registers */
5849 #define PCI_CFG_RETRY_TIMEOUT	0x041
5850 
5851 static int
5852 iwm_pci_attach(device_t dev)
5853 {
5854 	struct iwm_softc *sc;
5855 	int count, error, rid;
5856 	uint16_t reg;
5857 
5858 	sc = device_get_softc(dev);
5859 
5860 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5861 	 * PCI Tx retries from interfering with C3 CPU state */
5862 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5863 
5864 	/* Enable bus-mastering and hardware bug workaround. */
5865 	pci_enable_busmaster(dev);
5866 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5867 	/* if !MSI */
5868 	if (reg & PCIM_STATUS_INTxSTATE) {
5869 		reg &= ~PCIM_STATUS_INTxSTATE;
5870 	}
5871 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5872 
5873 	rid = PCIR_BAR(0);
5874 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5875 	    RF_ACTIVE);
5876 	if (sc->sc_mem == NULL) {
5877 		device_printf(sc->sc_dev, "can't map mem space\n");
5878 		return (ENXIO);
5879 	}
5880 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5881 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5882 
5883 	/* Install interrupt handler. */
5884 	count = 1;
5885 	rid = 0;
5886 	if (pci_alloc_msi(dev, &count) == 0)
5887 		rid = 1;
5888 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5889 	    (rid != 0 ? 0 : RF_SHAREABLE));
5890 	if (sc->sc_irq == NULL) {
5891 		device_printf(dev, "can't map interrupt\n");
5892 			return (ENXIO);
5893 	}
5894 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5895 	    NULL, iwm_intr, sc, &sc->sc_ih);
5896 	if (sc->sc_ih == NULL) {
5897 		device_printf(dev, "can't establish interrupt");
5898 			return (ENXIO);
5899 	}
5900 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5901 
5902 	return (0);
5903 }
5904 
5905 static void
5906 iwm_pci_detach(device_t dev)
5907 {
5908 	struct iwm_softc *sc = device_get_softc(dev);
5909 
5910 	if (sc->sc_irq != NULL) {
5911 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5912 		bus_release_resource(dev, SYS_RES_IRQ,
5913 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5914 		pci_release_msi(dev);
5915         }
5916 	if (sc->sc_mem != NULL)
5917 		bus_release_resource(dev, SYS_RES_MEMORY,
5918 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5919 }
5920 
5921 static int
5922 iwm_attach(device_t dev)
5923 {
5924 	struct iwm_softc *sc = device_get_softc(dev);
5925 	struct ieee80211com *ic = &sc->sc_ic;
5926 	int error;
5927 	int txq_i, i;
5928 
5929 	sc->sc_dev = dev;
5930 	sc->sc_attached = 1;
5931 	IWM_LOCK_INIT(sc);
5932 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5933 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5934 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5935 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5936 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5937 
5938 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5939 	    taskqueue_thread_enqueue, &sc->sc_tq);
5940 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5941 	if (error != 0) {
5942 		device_printf(dev, "can't start taskq thread, error %d\n",
5943 		    error);
5944 		goto fail;
5945 	}
5946 
5947 	error = iwm_dev_check(dev);
5948 	if (error != 0)
5949 		goto fail;
5950 
5951 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5952 	if (sc->sc_notif_wait == NULL) {
5953 		device_printf(dev, "failed to init notification wait struct\n");
5954 		goto fail;
5955 	}
5956 
5957 	sc->sf_state = IWM_SF_UNINIT;
5958 
5959 	/* Init phy db */
5960 	sc->sc_phy_db = iwm_phy_db_init(sc);
5961 	if (!sc->sc_phy_db) {
5962 		device_printf(dev, "Cannot init phy_db\n");
5963 		goto fail;
5964 	}
5965 
5966 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5967 	sc->last_ebs_successful = TRUE;
5968 
5969 	/* PCI attach */
5970 	error = iwm_pci_attach(dev);
5971 	if (error != 0)
5972 		goto fail;
5973 
5974 	sc->sc_wantresp = -1;
5975 
5976 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5977 	/*
5978 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5979 	 * changed, and now the revision step also includes bit 0-1 (no more
5980 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5981 	 * in the old format.
5982 	 */
5983 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
5984 		int ret;
5985 		uint32_t hw_step;
5986 
5987 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5988 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5989 
5990 		if (iwm_prepare_card_hw(sc) != 0) {
5991 			device_printf(dev, "could not initialize hardware\n");
5992 			goto fail;
5993 		}
5994 
5995 		/*
5996 		 * In order to recognize C step the driver should read the
5997 		 * chip version id located at the AUX bus MISC address.
5998 		 */
5999 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6000 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6001 		DELAY(2);
6002 
6003 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6004 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6005 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6006 				   25000);
6007 		if (!ret) {
6008 			device_printf(sc->sc_dev,
6009 			    "Failed to wake up the nic\n");
6010 			goto fail;
6011 		}
6012 
6013 		if (iwm_nic_lock(sc)) {
6014 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6015 			hw_step |= IWM_ENABLE_WFPM;
6016 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6017 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6018 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6019 			if (hw_step == 0x3)
6020 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6021 						(IWM_SILICON_C_STEP << 2);
6022 			iwm_nic_unlock(sc);
6023 		} else {
6024 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6025 			goto fail;
6026 		}
6027 	}
6028 
6029 	/* special-case 7265D, it has the same PCI IDs. */
6030 	if (sc->cfg == &iwm7265_cfg &&
6031 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6032 		sc->cfg = &iwm7265d_cfg;
6033 	}
6034 
6035 	/* Allocate DMA memory for firmware transfers. */
6036 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6037 		device_printf(dev, "could not allocate memory for firmware\n");
6038 		goto fail;
6039 	}
6040 
6041 	/* Allocate "Keep Warm" page. */
6042 	if ((error = iwm_alloc_kw(sc)) != 0) {
6043 		device_printf(dev, "could not allocate keep warm page\n");
6044 		goto fail;
6045 	}
6046 
6047 	/* We use ICT interrupts */
6048 	if ((error = iwm_alloc_ict(sc)) != 0) {
6049 		device_printf(dev, "could not allocate ICT table\n");
6050 		goto fail;
6051 	}
6052 
6053 	/* Allocate TX scheduler "rings". */
6054 	if ((error = iwm_alloc_sched(sc)) != 0) {
6055 		device_printf(dev, "could not allocate TX scheduler rings\n");
6056 		goto fail;
6057 	}
6058 
6059 	/* Allocate TX rings */
6060 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6061 		if ((error = iwm_alloc_tx_ring(sc,
6062 		    &sc->txq[txq_i], txq_i)) != 0) {
6063 			device_printf(dev,
6064 			    "could not allocate TX ring %d\n",
6065 			    txq_i);
6066 			goto fail;
6067 		}
6068 	}
6069 
6070 	/* Allocate RX ring. */
6071 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6072 		device_printf(dev, "could not allocate RX ring\n");
6073 		goto fail;
6074 	}
6075 
6076 	/* Clear pending interrupts. */
6077 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6078 
6079 	ic->ic_softc = sc;
6080 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6081 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6082 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6083 
6084 	/* Set device capabilities. */
6085 	ic->ic_caps =
6086 	    IEEE80211_C_STA |
6087 	    IEEE80211_C_WPA |		/* WPA/RSN */
6088 	    IEEE80211_C_WME |
6089 	    IEEE80211_C_PMGT |
6090 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6091 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6092 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6093 	    ;
6094 	/* Advertise full-offload scanning */
6095 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6096 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6097 		sc->sc_phyctxt[i].id = i;
6098 		sc->sc_phyctxt[i].color = 0;
6099 		sc->sc_phyctxt[i].ref = 0;
6100 		sc->sc_phyctxt[i].channel = NULL;
6101 	}
6102 
6103 	/* Default noise floor */
6104 	sc->sc_noise = -96;
6105 
6106 	/* Max RSSI */
6107 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6108 
6109 #ifdef IWM_DEBUG
6110 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6111 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6112 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6113 #endif
6114 
6115 	error = iwm_read_firmware(sc);
6116 	if (error) {
6117 		goto fail;
6118 	} else if (sc->sc_fw.fw_fp == NULL) {
6119 		/*
6120 		 * XXX Add a solution for properly deferring firmware load
6121 		 *     during bootup.
6122 		 */
6123 		goto fail;
6124 	} else {
6125 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6126 		sc->sc_preinit_hook.ich_arg = sc;
6127 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6128 			device_printf(dev,
6129 			    "config_intrhook_establish failed\n");
6130 			goto fail;
6131 		}
6132 	}
6133 
6134 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6135 	    "<-%s\n", __func__);
6136 
6137 	return 0;
6138 
6139 	/* Free allocated memory if something failed during attachment. */
6140 fail:
6141 	iwm_detach_local(sc, 0);
6142 
6143 	return ENXIO;
6144 }
6145 
6146 static int
6147 iwm_is_valid_ether_addr(uint8_t *addr)
6148 {
6149 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6150 
6151 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6152 		return (FALSE);
6153 
6154 	return (TRUE);
6155 }
6156 
6157 static int
6158 iwm_wme_update(struct ieee80211com *ic)
6159 {
6160 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6161 	struct iwm_softc *sc = ic->ic_softc;
6162 	struct chanAccParams chp;
6163 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6164 	struct iwm_vap *ivp = IWM_VAP(vap);
6165 	struct iwm_node *in;
6166 	struct wmeParams tmp[WME_NUM_AC];
6167 	int aci, error;
6168 
6169 	if (vap == NULL)
6170 		return (0);
6171 
6172 	ieee80211_wme_ic_getparams(ic, &chp);
6173 
6174 	IEEE80211_LOCK(ic);
6175 	for (aci = 0; aci < WME_NUM_AC; aci++)
6176 		tmp[aci] = chp.cap_wmeParams[aci];
6177 	IEEE80211_UNLOCK(ic);
6178 
6179 	IWM_LOCK(sc);
6180 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6181 		const struct wmeParams *ac = &tmp[aci];
6182 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6183 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6184 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6185 		ivp->queue_params[aci].edca_txop =
6186 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6187 	}
6188 	ivp->have_wme = TRUE;
6189 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6190 		in = IWM_NODE(vap->iv_bss);
6191 		if (in->in_assoc) {
6192 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6193 				device_printf(sc->sc_dev,
6194 				    "%s: failed to update MAC\n", __func__);
6195 			}
6196 		}
6197 	}
6198 	IWM_UNLOCK(sc);
6199 
6200 	return (0);
6201 #undef IWM_EXP2
6202 }
6203 
6204 static void
6205 iwm_preinit(void *arg)
6206 {
6207 	struct iwm_softc *sc = arg;
6208 	device_t dev = sc->sc_dev;
6209 	struct ieee80211com *ic = &sc->sc_ic;
6210 	int error;
6211 
6212 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6213 	    "->%s\n", __func__);
6214 
6215 	IWM_LOCK(sc);
6216 	if ((error = iwm_start_hw(sc)) != 0) {
6217 		device_printf(dev, "could not initialize hardware\n");
6218 		IWM_UNLOCK(sc);
6219 		goto fail;
6220 	}
6221 
6222 	error = iwm_run_init_ucode(sc, 1);
6223 	iwm_stop_device(sc);
6224 	if (error) {
6225 		IWM_UNLOCK(sc);
6226 		goto fail;
6227 	}
6228 	device_printf(dev,
6229 	    "hw rev 0x%x, fw ver %s, address %s\n",
6230 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6231 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6232 
6233 	/* not all hardware can do 5GHz band */
6234 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6235 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6236 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6237 	IWM_UNLOCK(sc);
6238 
6239 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6240 	    ic->ic_channels);
6241 
6242 	/*
6243 	 * At this point we've committed - if we fail to do setup,
6244 	 * we now also have to tear down the net80211 state.
6245 	 */
6246 	ieee80211_ifattach(ic);
6247 	ic->ic_vap_create = iwm_vap_create;
6248 	ic->ic_vap_delete = iwm_vap_delete;
6249 	ic->ic_raw_xmit = iwm_raw_xmit;
6250 	ic->ic_node_alloc = iwm_node_alloc;
6251 	ic->ic_scan_start = iwm_scan_start;
6252 	ic->ic_scan_end = iwm_scan_end;
6253 	ic->ic_update_mcast = iwm_update_mcast;
6254 	ic->ic_getradiocaps = iwm_init_channel_map;
6255 	ic->ic_set_channel = iwm_set_channel;
6256 	ic->ic_scan_curchan = iwm_scan_curchan;
6257 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6258 	ic->ic_wme.wme_update = iwm_wme_update;
6259 	ic->ic_parent = iwm_parent;
6260 	ic->ic_transmit = iwm_transmit;
6261 	iwm_radiotap_attach(sc);
6262 	if (bootverbose)
6263 		ieee80211_announce(ic);
6264 
6265 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6266 	    "<-%s\n", __func__);
6267 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6268 
6269 	return;
6270 fail:
6271 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6272 	iwm_detach_local(sc, 0);
6273 }
6274 
6275 /*
6276  * Attach the interface to 802.11 radiotap.
6277  */
6278 static void
6279 iwm_radiotap_attach(struct iwm_softc *sc)
6280 {
6281         struct ieee80211com *ic = &sc->sc_ic;
6282 
6283 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6284 	    "->%s begin\n", __func__);
6285         ieee80211_radiotap_attach(ic,
6286             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6287                 IWM_TX_RADIOTAP_PRESENT,
6288             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6289                 IWM_RX_RADIOTAP_PRESENT);
6290 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6291 	    "->%s end\n", __func__);
6292 }
6293 
6294 static struct ieee80211vap *
6295 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6296     enum ieee80211_opmode opmode, int flags,
6297     const uint8_t bssid[IEEE80211_ADDR_LEN],
6298     const uint8_t mac[IEEE80211_ADDR_LEN])
6299 {
6300 	struct iwm_vap *ivp;
6301 	struct ieee80211vap *vap;
6302 
6303 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6304 		return NULL;
6305 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6306 	vap = &ivp->iv_vap;
6307 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6308 	vap->iv_bmissthreshold = 10;            /* override default */
6309 	/* Override with driver methods. */
6310 	ivp->iv_newstate = vap->iv_newstate;
6311 	vap->iv_newstate = iwm_newstate;
6312 
6313 	ivp->id = IWM_DEFAULT_MACID;
6314 	ivp->color = IWM_DEFAULT_COLOR;
6315 
6316 	ivp->have_wme = FALSE;
6317 	ivp->ps_disabled = FALSE;
6318 
6319 	ieee80211_ratectl_init(vap);
6320 	/* Complete setup. */
6321 	ieee80211_vap_attach(vap, ieee80211_media_change,
6322 	    ieee80211_media_status, mac);
6323 	ic->ic_opmode = opmode;
6324 
6325 	return vap;
6326 }
6327 
6328 static void
6329 iwm_vap_delete(struct ieee80211vap *vap)
6330 {
6331 	struct iwm_vap *ivp = IWM_VAP(vap);
6332 
6333 	ieee80211_ratectl_deinit(vap);
6334 	ieee80211_vap_detach(vap);
6335 	free(ivp, M_80211_VAP);
6336 }
6337 
6338 static void
6339 iwm_xmit_queue_drain(struct iwm_softc *sc)
6340 {
6341 	struct mbuf *m;
6342 	struct ieee80211_node *ni;
6343 
6344 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6345 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6346 		ieee80211_free_node(ni);
6347 		m_freem(m);
6348 	}
6349 }
6350 
6351 static void
6352 iwm_scan_start(struct ieee80211com *ic)
6353 {
6354 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6355 	struct iwm_softc *sc = ic->ic_softc;
6356 	int error;
6357 
6358 	IWM_LOCK(sc);
6359 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6360 		/* This should not be possible */
6361 		device_printf(sc->sc_dev,
6362 		    "%s: Previous scan not completed yet\n", __func__);
6363 	}
6364 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6365 		error = iwm_umac_scan(sc);
6366 	else
6367 		error = iwm_lmac_scan(sc);
6368 	if (error != 0) {
6369 		device_printf(sc->sc_dev, "could not initiate scan\n");
6370 		IWM_UNLOCK(sc);
6371 		ieee80211_cancel_scan(vap);
6372 	} else {
6373 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6374 		iwm_led_blink_start(sc);
6375 		IWM_UNLOCK(sc);
6376 	}
6377 }
6378 
6379 static void
6380 iwm_scan_end(struct ieee80211com *ic)
6381 {
6382 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6383 	struct iwm_softc *sc = ic->ic_softc;
6384 
6385 	IWM_LOCK(sc);
6386 	iwm_led_blink_stop(sc);
6387 	if (vap->iv_state == IEEE80211_S_RUN)
6388 		iwm_led_enable(sc);
6389 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6390 		/*
6391 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6392 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6393 		 * taskqueue.
6394 		 */
6395 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6396 		iwm_scan_stop_wait(sc);
6397 	}
6398 	IWM_UNLOCK(sc);
6399 
6400 	/*
6401 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6402 	 * This is to make sure that it won't call ieee80211_scan_done
6403 	 * when we have already started the next scan.
6404 	 */
6405 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6406 }
6407 
6408 static void
6409 iwm_update_mcast(struct ieee80211com *ic)
6410 {
6411 }
6412 
6413 static void
6414 iwm_set_channel(struct ieee80211com *ic)
6415 {
6416 }
6417 
6418 static void
6419 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6420 {
6421 }
6422 
6423 static void
6424 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6425 {
6426 }
6427 
6428 void
6429 iwm_init_task(void *arg1)
6430 {
6431 	struct iwm_softc *sc = arg1;
6432 
6433 	IWM_LOCK(sc);
6434 	while (sc->sc_flags & IWM_FLAG_BUSY)
6435 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6436 	sc->sc_flags |= IWM_FLAG_BUSY;
6437 	iwm_stop(sc);
6438 	if (sc->sc_ic.ic_nrunning > 0)
6439 		iwm_init(sc);
6440 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6441 	wakeup(&sc->sc_flags);
6442 	IWM_UNLOCK(sc);
6443 }
6444 
6445 static int
6446 iwm_resume(device_t dev)
6447 {
6448 	struct iwm_softc *sc = device_get_softc(dev);
6449 	int do_reinit = 0;
6450 
6451 	/*
6452 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6453 	 * PCI Tx retries from interfering with C3 CPU state.
6454 	 */
6455 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6456 
6457 	if (!sc->sc_attached)
6458 		return 0;
6459 
6460 	iwm_init_task(device_get_softc(dev));
6461 
6462 	IWM_LOCK(sc);
6463 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6464 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6465 		do_reinit = 1;
6466 	}
6467 	IWM_UNLOCK(sc);
6468 
6469 	if (do_reinit)
6470 		ieee80211_resume_all(&sc->sc_ic);
6471 
6472 	return 0;
6473 }
6474 
6475 static int
6476 iwm_suspend(device_t dev)
6477 {
6478 	int do_stop = 0;
6479 	struct iwm_softc *sc = device_get_softc(dev);
6480 
6481 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6482 
6483 	if (!sc->sc_attached)
6484 		return (0);
6485 
6486 	ieee80211_suspend_all(&sc->sc_ic);
6487 
6488 	if (do_stop) {
6489 		IWM_LOCK(sc);
6490 		iwm_stop(sc);
6491 		sc->sc_flags |= IWM_FLAG_SCANNING;
6492 		IWM_UNLOCK(sc);
6493 	}
6494 
6495 	return (0);
6496 }
6497 
6498 static int
6499 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6500 {
6501 	struct iwm_fw_info *fw = &sc->sc_fw;
6502 	device_t dev = sc->sc_dev;
6503 	int i;
6504 
6505 	if (!sc->sc_attached)
6506 		return 0;
6507 	sc->sc_attached = 0;
6508 	if (do_net80211) {
6509 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6510 	}
6511 	iwm_stop_device(sc);
6512 	taskqueue_drain_all(sc->sc_tq);
6513 	taskqueue_free(sc->sc_tq);
6514 	if (do_net80211) {
6515 		IWM_LOCK(sc);
6516 		iwm_xmit_queue_drain(sc);
6517 		IWM_UNLOCK(sc);
6518 		ieee80211_ifdetach(&sc->sc_ic);
6519 	}
6520 	callout_drain(&sc->sc_led_blink_to);
6521 	callout_drain(&sc->sc_watchdog_to);
6522 
6523 	iwm_phy_db_free(sc->sc_phy_db);
6524 	sc->sc_phy_db = NULL;
6525 
6526 	iwm_free_nvm_data(sc->nvm_data);
6527 
6528 	/* Free descriptor rings */
6529 	iwm_free_rx_ring(sc, &sc->rxq);
6530 	for (i = 0; i < nitems(sc->txq); i++)
6531 		iwm_free_tx_ring(sc, &sc->txq[i]);
6532 
6533 	/* Free firmware */
6534 	if (fw->fw_fp != NULL)
6535 		iwm_fw_info_free(fw);
6536 
6537 	/* Free scheduler */
6538 	iwm_dma_contig_free(&sc->sched_dma);
6539 	iwm_dma_contig_free(&sc->ict_dma);
6540 	iwm_dma_contig_free(&sc->kw_dma);
6541 	iwm_dma_contig_free(&sc->fw_dma);
6542 
6543 	iwm_free_fw_paging(sc);
6544 
6545 	/* Finished with the hardware - detach things */
6546 	iwm_pci_detach(dev);
6547 
6548 	if (sc->sc_notif_wait != NULL) {
6549 		iwm_notification_wait_free(sc->sc_notif_wait);
6550 		sc->sc_notif_wait = NULL;
6551 	}
6552 
6553 	IWM_LOCK_DESTROY(sc);
6554 
6555 	return (0);
6556 }
6557 
6558 static int
6559 iwm_detach(device_t dev)
6560 {
6561 	struct iwm_softc *sc = device_get_softc(dev);
6562 
6563 	return (iwm_detach_local(sc, 1));
6564 }
6565 
6566 static device_method_t iwm_pci_methods[] = {
6567         /* Device interface */
6568         DEVMETHOD(device_probe,         iwm_probe),
6569         DEVMETHOD(device_attach,        iwm_attach),
6570         DEVMETHOD(device_detach,        iwm_detach),
6571         DEVMETHOD(device_suspend,       iwm_suspend),
6572         DEVMETHOD(device_resume,        iwm_resume),
6573 
6574         DEVMETHOD_END
6575 };
6576 
6577 static driver_t iwm_pci_driver = {
6578         "iwm",
6579         iwm_pci_methods,
6580         sizeof (struct iwm_softc)
6581 };
6582 
6583 static devclass_t iwm_devclass;
6584 
6585 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6586 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6587     iwm_devices, nitems(iwm_devices));
6588 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6589 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6590 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6591