xref: /freebsd/sys/dev/iwm/if_iwm.c (revision b08fc26cbdd00df6852e71e1be58fa9cc92019f0)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126 
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130 
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133 
134 #include <net/bpf.h>
135 
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142 
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147 
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152 
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_config.h>
156 #include <dev/iwm/if_iwm_debug.h>
157 #include <dev/iwm/if_iwm_notif_wait.h>
158 #include <dev/iwm/if_iwm_util.h>
159 #include <dev/iwm/if_iwm_binding.h>
160 #include <dev/iwm/if_iwm_phy_db.h>
161 #include <dev/iwm/if_iwm_mac_ctxt.h>
162 #include <dev/iwm/if_iwm_phy_ctxt.h>
163 #include <dev/iwm/if_iwm_time_event.h>
164 #include <dev/iwm/if_iwm_power.h>
165 #include <dev/iwm/if_iwm_scan.h>
166 
167 #include <dev/iwm/if_iwm_pcie_trans.h>
168 #include <dev/iwm/if_iwm_led.h>
169 #include <dev/iwm/if_iwm_fw.h>
170 
171 const uint8_t iwm_nvm_channels[] = {
172 	/* 2.4 GHz */
173 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
174 	/* 5 GHz */
175 	36, 40, 44, 48, 52, 56, 60, 64,
176 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
177 	149, 153, 157, 161, 165
178 };
179 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
180     "IWM_NUM_CHANNELS is too small");
181 
182 const uint8_t iwm_nvm_channels_8000[] = {
183 	/* 2.4 GHz */
184 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
185 	/* 5 GHz */
186 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
187 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
188 	149, 153, 157, 161, 165, 169, 173, 177, 181
189 };
190 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
191     "IWM_NUM_CHANNELS_8000 is too small");
192 
193 #define IWM_NUM_2GHZ_CHANNELS	14
194 #define IWM_N_HW_ADDR_MASK	0xF
195 
196 /*
197  * XXX For now, there's simply a fixed set of rate table entries
198  * that are populated.
199  */
200 const struct iwm_rate {
201 	uint8_t rate;
202 	uint8_t plcp;
203 } iwm_rates[] = {
204 	{   2,	IWM_RATE_1M_PLCP  },
205 	{   4,	IWM_RATE_2M_PLCP  },
206 	{  11,	IWM_RATE_5M_PLCP  },
207 	{  22,	IWM_RATE_11M_PLCP },
208 	{  12,	IWM_RATE_6M_PLCP  },
209 	{  18,	IWM_RATE_9M_PLCP  },
210 	{  24,	IWM_RATE_12M_PLCP },
211 	{  36,	IWM_RATE_18M_PLCP },
212 	{  48,	IWM_RATE_24M_PLCP },
213 	{  72,	IWM_RATE_36M_PLCP },
214 	{  96,	IWM_RATE_48M_PLCP },
215 	{ 108,	IWM_RATE_54M_PLCP },
216 };
217 #define IWM_RIDX_CCK	0
218 #define IWM_RIDX_OFDM	4
219 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222 
223 struct iwm_nvm_section {
224 	uint16_t length;
225 	uint8_t *data;
226 };
227 
228 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
229 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
230 
231 struct iwm_mvm_alive_data {
232 	int valid;
233 	uint32_t scd_base_addr;
234 };
235 
236 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
237 static int	iwm_firmware_store_section(struct iwm_softc *,
238                                            enum iwm_ucode_type,
239                                            const uint8_t *, size_t);
240 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241 static void	iwm_fw_info_free(struct iwm_fw_info *);
242 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static int	iwm_alloc_fwmem(struct iwm_softc *);
244 static int	iwm_alloc_sched(struct iwm_softc *);
245 static int	iwm_alloc_kw(struct iwm_softc *);
246 static int	iwm_alloc_ict(struct iwm_softc *);
247 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
250 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
251                                   int);
252 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
253 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254 static void	iwm_enable_interrupts(struct iwm_softc *);
255 static void	iwm_restore_interrupts(struct iwm_softc *);
256 static void	iwm_disable_interrupts(struct iwm_softc *);
257 static void	iwm_ict_reset(struct iwm_softc *);
258 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
259 static void	iwm_stop_device(struct iwm_softc *);
260 static void	iwm_mvm_nic_config(struct iwm_softc *);
261 static int	iwm_nic_rx_init(struct iwm_softc *);
262 static int	iwm_nic_tx_init(struct iwm_softc *);
263 static int	iwm_nic_init(struct iwm_softc *);
264 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
265 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
266 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
267                                    uint16_t, uint8_t *, uint16_t *);
268 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
269 				     uint16_t *, uint32_t);
270 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
271 static void	iwm_add_channel_band(struct iwm_softc *,
272 		    struct ieee80211_channel[], int, int *, int, size_t,
273 		    const uint8_t[]);
274 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
275 		    struct ieee80211_channel[]);
276 static struct iwm_nvm_data *
277 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
278 			   const uint16_t *, const uint16_t *,
279 			   const uint16_t *, const uint16_t *,
280 			   const uint16_t *);
281 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
282 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
283 					       struct iwm_nvm_data *,
284 					       const uint16_t *,
285 					       const uint16_t *);
286 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
287 			    const uint16_t *);
288 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
289 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
290 				  const uint16_t *);
291 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
292 				   const uint16_t *);
293 static void	iwm_set_radio_cfg(const struct iwm_softc *,
294 				  struct iwm_nvm_data *, uint32_t);
295 static struct iwm_nvm_data *
296 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
297 static int	iwm_nvm_init(struct iwm_softc *);
298 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
299 				      const struct iwm_fw_desc *);
300 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
301 					     bus_addr_t, uint32_t);
302 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
303 						const struct iwm_fw_sects *,
304 						int, int *);
305 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
306 					   const struct iwm_fw_sects *,
307 					   int, int *);
308 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
309 					       const struct iwm_fw_sects *);
310 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
311 					  const struct iwm_fw_sects *);
312 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
313 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
314 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
315 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
316                                               enum iwm_ucode_type);
317 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
318 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
319 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
320 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
321 					    struct iwm_rx_phy_info *);
322 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
323                                       struct iwm_rx_packet *,
324                                       struct iwm_rx_data *);
325 static int	iwm_get_noise(struct iwm_softc *sc,
326 		    const struct iwm_mvm_statistics_rx_non_phy *);
327 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
328                                    struct iwm_rx_data *);
329 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
330                                          struct iwm_rx_packet *,
331 				         struct iwm_node *);
332 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
333                                   struct iwm_rx_data *);
334 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
335 #if 0
336 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
337                                  uint16_t);
338 #endif
339 static const struct iwm_rate *
340 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
341 			struct mbuf *, struct iwm_tx_cmd *);
342 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
343                        struct ieee80211_node *, int);
344 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
345 			     const struct ieee80211_bpf_params *);
346 static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
347 				      uint32_t tfd_msk, uint32_t flags);
348 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
349 					        struct iwm_mvm_add_sta_cmd_v7 *,
350                                                 int *);
351 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
352                                        int);
353 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
354 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
355 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
356                                            struct iwm_int_sta *,
357 				           const uint8_t *, uint16_t, uint16_t);
358 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
359 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
360 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
361 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
362 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
363 static struct ieee80211_node *
364 		iwm_node_alloc(struct ieee80211vap *,
365 		               const uint8_t[IEEE80211_ADDR_LEN]);
366 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
367 static int	iwm_media_change(struct ifnet *);
368 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
369 static void	iwm_endscan_cb(void *, int);
370 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
371 					struct iwm_sf_cfg_cmd *,
372 					struct ieee80211_node *);
373 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
374 static int	iwm_send_bt_init_conf(struct iwm_softc *);
375 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
376 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
377 static int	iwm_init_hw(struct iwm_softc *);
378 static void	iwm_init(struct iwm_softc *);
379 static void	iwm_start(struct iwm_softc *);
380 static void	iwm_stop(struct iwm_softc *);
381 static void	iwm_watchdog(void *);
382 static void	iwm_parent(struct ieee80211com *);
383 #ifdef IWM_DEBUG
384 static const char *
385 		iwm_desc_lookup(uint32_t);
386 static void	iwm_nic_error(struct iwm_softc *);
387 static void	iwm_nic_umac_error(struct iwm_softc *);
388 #endif
389 static void	iwm_notif_intr(struct iwm_softc *);
390 static void	iwm_intr(void *);
391 static int	iwm_attach(device_t);
392 static int	iwm_is_valid_ether_addr(uint8_t *);
393 static void	iwm_preinit(void *);
394 static int	iwm_detach_local(struct iwm_softc *sc, int);
395 static void	iwm_init_task(void *);
396 static void	iwm_radiotap_attach(struct iwm_softc *);
397 static struct ieee80211vap *
398 		iwm_vap_create(struct ieee80211com *,
399 		               const char [IFNAMSIZ], int,
400 		               enum ieee80211_opmode, int,
401 		               const uint8_t [IEEE80211_ADDR_LEN],
402 		               const uint8_t [IEEE80211_ADDR_LEN]);
403 static void	iwm_vap_delete(struct ieee80211vap *);
404 static void	iwm_scan_start(struct ieee80211com *);
405 static void	iwm_scan_end(struct ieee80211com *);
406 static void	iwm_update_mcast(struct ieee80211com *);
407 static void	iwm_set_channel(struct ieee80211com *);
408 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
409 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
410 static int	iwm_detach(device_t);
411 
412 /*
413  * Firmware parser.
414  */
415 
416 static int
417 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
418 {
419 	const struct iwm_fw_cscheme_list *l = (const void *)data;
420 
421 	if (dlen < sizeof(*l) ||
422 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
423 		return EINVAL;
424 
425 	/* we don't actually store anything for now, always use s/w crypto */
426 
427 	return 0;
428 }
429 
430 static int
431 iwm_firmware_store_section(struct iwm_softc *sc,
432     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
433 {
434 	struct iwm_fw_sects *fws;
435 	struct iwm_fw_desc *fwone;
436 
437 	if (type >= IWM_UCODE_TYPE_MAX)
438 		return EINVAL;
439 	if (dlen < sizeof(uint32_t))
440 		return EINVAL;
441 
442 	fws = &sc->sc_fw.fw_sects[type];
443 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
444 		return EINVAL;
445 
446 	fwone = &fws->fw_sect[fws->fw_count];
447 
448 	/* first 32bit are device load offset */
449 	memcpy(&fwone->offset, data, sizeof(uint32_t));
450 
451 	/* rest is data */
452 	fwone->data = data + sizeof(uint32_t);
453 	fwone->len = dlen - sizeof(uint32_t);
454 
455 	fws->fw_count++;
456 
457 	return 0;
458 }
459 
460 #define IWM_DEFAULT_SCAN_CHANNELS 40
461 
462 /* iwlwifi: iwl-drv.c */
463 struct iwm_tlv_calib_data {
464 	uint32_t ucode_type;
465 	struct iwm_tlv_calib_ctrl calib;
466 } __packed;
467 
468 static int
469 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
470 {
471 	const struct iwm_tlv_calib_data *def_calib = data;
472 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
473 
474 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
475 		device_printf(sc->sc_dev,
476 		    "Wrong ucode_type %u for default "
477 		    "calibration.\n", ucode_type);
478 		return EINVAL;
479 	}
480 
481 	sc->sc_default_calib[ucode_type].flow_trigger =
482 	    def_calib->calib.flow_trigger;
483 	sc->sc_default_calib[ucode_type].event_trigger =
484 	    def_calib->calib.event_trigger;
485 
486 	return 0;
487 }
488 
489 static void
490 iwm_fw_info_free(struct iwm_fw_info *fw)
491 {
492 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
493 	fw->fw_fp = NULL;
494 	/* don't touch fw->fw_status */
495 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
496 }
497 
498 static int
499 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
500 {
501 	struct iwm_fw_info *fw = &sc->sc_fw;
502 	const struct iwm_tlv_ucode_header *uhdr;
503 	struct iwm_ucode_tlv tlv;
504 	enum iwm_ucode_tlv_type tlv_type;
505 	const struct firmware *fwp;
506 	const uint8_t *data;
507 	uint32_t usniffer_img;
508 	uint32_t paging_mem_size;
509 	int num_of_cpus;
510 	int error = 0;
511 	size_t len;
512 
513 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
514 	    ucode_type != IWM_UCODE_INIT)
515 		return 0;
516 
517 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
518 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
519 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
520 
521 	if (fw->fw_fp != NULL)
522 		iwm_fw_info_free(fw);
523 
524 	/*
525 	 * Load firmware into driver memory.
526 	 * fw_fp will be set.
527 	 */
528 	IWM_UNLOCK(sc);
529 	fwp = firmware_get(sc->cfg->fw_name);
530 	IWM_LOCK(sc);
531 	if (fwp == NULL) {
532 		device_printf(sc->sc_dev,
533 		    "could not read firmware %s (error %d)\n",
534 		    sc->cfg->fw_name, error);
535 		goto out;
536 	}
537 	fw->fw_fp = fwp;
538 
539 	/* (Re-)Initialize default values. */
540 	sc->sc_capaflags = 0;
541 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
542 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
543 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
544 
545 	/*
546 	 * Parse firmware contents
547 	 */
548 
549 	uhdr = (const void *)fw->fw_fp->data;
550 	if (*(const uint32_t *)fw->fw_fp->data != 0
551 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
552 		device_printf(sc->sc_dev, "invalid firmware %s\n",
553 		    sc->cfg->fw_name);
554 		error = EINVAL;
555 		goto out;
556 	}
557 
558 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
559 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
560 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
561 	    IWM_UCODE_API(le32toh(uhdr->ver)));
562 	data = uhdr->data;
563 	len = fw->fw_fp->datasize - sizeof(*uhdr);
564 
565 	while (len >= sizeof(tlv)) {
566 		size_t tlv_len;
567 		const void *tlv_data;
568 
569 		memcpy(&tlv, data, sizeof(tlv));
570 		tlv_len = le32toh(tlv.length);
571 		tlv_type = le32toh(tlv.type);
572 
573 		len -= sizeof(tlv);
574 		data += sizeof(tlv);
575 		tlv_data = data;
576 
577 		if (len < tlv_len) {
578 			device_printf(sc->sc_dev,
579 			    "firmware too short: %zu bytes\n",
580 			    len);
581 			error = EINVAL;
582 			goto parse_out;
583 		}
584 
585 		switch ((int)tlv_type) {
586 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
587 			if (tlv_len < sizeof(uint32_t)) {
588 				device_printf(sc->sc_dev,
589 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
590 				    __func__,
591 				    (int) tlv_len);
592 				error = EINVAL;
593 				goto parse_out;
594 			}
595 			sc->sc_capa_max_probe_len
596 			    = le32toh(*(const uint32_t *)tlv_data);
597 			/* limit it to something sensible */
598 			if (sc->sc_capa_max_probe_len >
599 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
600 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
601 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
602 				    "ridiculous\n", __func__);
603 				error = EINVAL;
604 				goto parse_out;
605 			}
606 			break;
607 		case IWM_UCODE_TLV_PAN:
608 			if (tlv_len) {
609 				device_printf(sc->sc_dev,
610 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
611 				    __func__,
612 				    (int) tlv_len);
613 				error = EINVAL;
614 				goto parse_out;
615 			}
616 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
617 			break;
618 		case IWM_UCODE_TLV_FLAGS:
619 			if (tlv_len < sizeof(uint32_t)) {
620 				device_printf(sc->sc_dev,
621 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
622 				    __func__,
623 				    (int) tlv_len);
624 				error = EINVAL;
625 				goto parse_out;
626 			}
627 			/*
628 			 * Apparently there can be many flags, but Linux driver
629 			 * parses only the first one, and so do we.
630 			 *
631 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
632 			 * Intentional or a bug?  Observations from
633 			 * current firmware file:
634 			 *  1) TLV_PAN is parsed first
635 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
636 			 * ==> this resets TLV_PAN to itself... hnnnk
637 			 */
638 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
639 			break;
640 		case IWM_UCODE_TLV_CSCHEME:
641 			if ((error = iwm_store_cscheme(sc,
642 			    tlv_data, tlv_len)) != 0) {
643 				device_printf(sc->sc_dev,
644 				    "%s: iwm_store_cscheme(): returned %d\n",
645 				    __func__,
646 				    error);
647 				goto parse_out;
648 			}
649 			break;
650 		case IWM_UCODE_TLV_NUM_OF_CPU:
651 			if (tlv_len != sizeof(uint32_t)) {
652 				device_printf(sc->sc_dev,
653 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
654 				    __func__,
655 				    (int) tlv_len);
656 				error = EINVAL;
657 				goto parse_out;
658 			}
659 			num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
660 			if (num_of_cpus == 2) {
661 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
662 					TRUE;
663 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
664 					TRUE;
665 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
666 					TRUE;
667 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
668 				device_printf(sc->sc_dev,
669 				    "%s: Driver supports only 1 or 2 CPUs\n",
670 				    __func__);
671 				error = EINVAL;
672 				goto parse_out;
673 			}
674 			break;
675 		case IWM_UCODE_TLV_SEC_RT:
676 			if ((error = iwm_firmware_store_section(sc,
677 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
678 				device_printf(sc->sc_dev,
679 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
680 				    __func__,
681 				    error);
682 				goto parse_out;
683 			}
684 			break;
685 		case IWM_UCODE_TLV_SEC_INIT:
686 			if ((error = iwm_firmware_store_section(sc,
687 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
688 				device_printf(sc->sc_dev,
689 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
690 				    __func__,
691 				    error);
692 				goto parse_out;
693 			}
694 			break;
695 		case IWM_UCODE_TLV_SEC_WOWLAN:
696 			if ((error = iwm_firmware_store_section(sc,
697 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
698 				device_printf(sc->sc_dev,
699 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
700 				    __func__,
701 				    error);
702 				goto parse_out;
703 			}
704 			break;
705 		case IWM_UCODE_TLV_DEF_CALIB:
706 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
707 				device_printf(sc->sc_dev,
708 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
709 				    __func__,
710 				    (int) tlv_len,
711 				    (int) sizeof(struct iwm_tlv_calib_data));
712 				error = EINVAL;
713 				goto parse_out;
714 			}
715 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
716 				device_printf(sc->sc_dev,
717 				    "%s: iwm_set_default_calib() failed: %d\n",
718 				    __func__,
719 				    error);
720 				goto parse_out;
721 			}
722 			break;
723 		case IWM_UCODE_TLV_PHY_SKU:
724 			if (tlv_len != sizeof(uint32_t)) {
725 				error = EINVAL;
726 				device_printf(sc->sc_dev,
727 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
728 				    __func__,
729 				    (int) tlv_len);
730 				goto parse_out;
731 			}
732 			sc->sc_fw.phy_config =
733 			    le32toh(*(const uint32_t *)tlv_data);
734 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
735 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
736 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
737 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
738 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
739 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
740 			break;
741 
742 		case IWM_UCODE_TLV_API_CHANGES_SET: {
743 			const struct iwm_ucode_api *api;
744 			if (tlv_len != sizeof(*api)) {
745 				error = EINVAL;
746 				goto parse_out;
747 			}
748 			api = (const struct iwm_ucode_api *)tlv_data;
749 			/* Flags may exceed 32 bits in future firmware. */
750 			if (le32toh(api->api_index) > 0) {
751 				device_printf(sc->sc_dev,
752 				    "unsupported API index %d\n",
753 				    le32toh(api->api_index));
754 				goto parse_out;
755 			}
756 			sc->sc_ucode_api = le32toh(api->api_flags);
757 			break;
758 		}
759 
760 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
761 			const struct iwm_ucode_capa *capa;
762 			int idx, i;
763 			if (tlv_len != sizeof(*capa)) {
764 				error = EINVAL;
765 				goto parse_out;
766 			}
767 			capa = (const struct iwm_ucode_capa *)tlv_data;
768 			idx = le32toh(capa->api_index);
769 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
770 				device_printf(sc->sc_dev,
771 				    "unsupported API index %d\n", idx);
772 				goto parse_out;
773 			}
774 			for (i = 0; i < 32; i++) {
775 				if ((le32toh(capa->api_capa) & (1U << i)) == 0)
776 					continue;
777 				setbit(sc->sc_enabled_capa, i + (32 * idx));
778 			}
779 			break;
780 		}
781 
782 		case 48: /* undocumented TLV */
783 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
784 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
785 			/* ignore, not used by current driver */
786 			break;
787 
788 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
789 			if ((error = iwm_firmware_store_section(sc,
790 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
791 			    tlv_len)) != 0)
792 				goto parse_out;
793 			break;
794 
795 		case IWM_UCODE_TLV_PAGING:
796 			if (tlv_len != sizeof(uint32_t)) {
797 				error = EINVAL;
798 				goto parse_out;
799 			}
800 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
801 
802 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
803 			    "%s: Paging: paging enabled (size = %u bytes)\n",
804 			    __func__, paging_mem_size);
805 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
806 				device_printf(sc->sc_dev,
807 					"%s: Paging: driver supports up to %u bytes for paging image\n",
808 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
809 				error = EINVAL;
810 				goto out;
811 			}
812 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
813 				device_printf(sc->sc_dev,
814 				    "%s: Paging: image isn't multiple %u\n",
815 				    __func__, IWM_FW_PAGING_SIZE);
816 				error = EINVAL;
817 				goto out;
818 			}
819 
820 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
821 			    paging_mem_size;
822 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
823 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
824 			    paging_mem_size;
825 			break;
826 
827 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
828 			if (tlv_len != sizeof(uint32_t)) {
829 				error = EINVAL;
830 				goto parse_out;
831 			}
832 			sc->sc_capa_n_scan_channels =
833 			  le32toh(*(const uint32_t *)tlv_data);
834 			break;
835 
836 		case IWM_UCODE_TLV_FW_VERSION:
837 			if (tlv_len != sizeof(uint32_t) * 3) {
838 				error = EINVAL;
839 				goto parse_out;
840 			}
841 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
842 			    "%d.%d.%d",
843 			    le32toh(((const uint32_t *)tlv_data)[0]),
844 			    le32toh(((const uint32_t *)tlv_data)[1]),
845 			    le32toh(((const uint32_t *)tlv_data)[2]));
846 			break;
847 
848 		case IWM_UCODE_TLV_FW_MEM_SEG:
849 			break;
850 
851 		default:
852 			device_printf(sc->sc_dev,
853 			    "%s: unknown firmware section %d, abort\n",
854 			    __func__, tlv_type);
855 			error = EINVAL;
856 			goto parse_out;
857 		}
858 
859 		len -= roundup(tlv_len, 4);
860 		data += roundup(tlv_len, 4);
861 	}
862 
863 	KASSERT(error == 0, ("unhandled error"));
864 
865  parse_out:
866 	if (error) {
867 		device_printf(sc->sc_dev, "firmware parse error %d, "
868 		    "section type %d\n", error, tlv_type);
869 	}
870 
871 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
872 		device_printf(sc->sc_dev,
873 		    "device uses unsupported power ops\n");
874 		error = ENOTSUP;
875 	}
876 
877  out:
878 	if (error) {
879 		fw->fw_status = IWM_FW_STATUS_NONE;
880 		if (fw->fw_fp != NULL)
881 			iwm_fw_info_free(fw);
882 	} else
883 		fw->fw_status = IWM_FW_STATUS_DONE;
884 	wakeup(&sc->sc_fw);
885 
886 	return error;
887 }
888 
889 /*
890  * DMA resource routines
891  */
892 
893 /* fwmem is used to load firmware onto the card */
894 static int
895 iwm_alloc_fwmem(struct iwm_softc *sc)
896 {
897 	/* Must be aligned on a 16-byte boundary. */
898 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
899 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
900 }
901 
902 /* tx scheduler rings.  not used? */
903 static int
904 iwm_alloc_sched(struct iwm_softc *sc)
905 {
906 	/* TX scheduler rings must be aligned on a 1KB boundary. */
907 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
908 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
909 }
910 
911 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
912 static int
913 iwm_alloc_kw(struct iwm_softc *sc)
914 {
915 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
916 }
917 
918 /* interrupt cause table */
919 static int
920 iwm_alloc_ict(struct iwm_softc *sc)
921 {
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
923 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
924 }
925 
926 static int
927 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
928 {
929 	bus_size_t size;
930 	int i, error;
931 
932 	ring->cur = 0;
933 
934 	/* Allocate RX descriptors (256-byte aligned). */
935 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
936 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
937 	if (error != 0) {
938 		device_printf(sc->sc_dev,
939 		    "could not allocate RX ring DMA memory\n");
940 		goto fail;
941 	}
942 	ring->desc = ring->desc_dma.vaddr;
943 
944 	/* Allocate RX status area (16-byte aligned). */
945 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
946 	    sizeof(*ring->stat), 16);
947 	if (error != 0) {
948 		device_printf(sc->sc_dev,
949 		    "could not allocate RX status DMA memory\n");
950 		goto fail;
951 	}
952 	ring->stat = ring->stat_dma.vaddr;
953 
954         /* Create RX buffer DMA tag. */
955         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
956             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
957             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
958         if (error != 0) {
959                 device_printf(sc->sc_dev,
960                     "%s: could not create RX buf DMA tag, error %d\n",
961                     __func__, error);
962                 goto fail;
963         }
964 
965 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
966 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
967 	if (error != 0) {
968 		device_printf(sc->sc_dev,
969 		    "%s: could not create RX buf DMA map, error %d\n",
970 		    __func__, error);
971 		goto fail;
972 	}
973 	/*
974 	 * Allocate and map RX buffers.
975 	 */
976 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
977 		struct iwm_rx_data *data = &ring->data[i];
978 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
979 		if (error != 0) {
980 			device_printf(sc->sc_dev,
981 			    "%s: could not create RX buf DMA map, error %d\n",
982 			    __func__, error);
983 			goto fail;
984 		}
985 		data->m = NULL;
986 
987 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
988 			goto fail;
989 		}
990 	}
991 	return 0;
992 
993 fail:	iwm_free_rx_ring(sc, ring);
994 	return error;
995 }
996 
997 static void
998 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
999 {
1000 	/* Reset the ring state */
1001 	ring->cur = 0;
1002 
1003 	/*
1004 	 * The hw rx ring index in shared memory must also be cleared,
1005 	 * otherwise the discrepancy can cause reprocessing chaos.
1006 	 */
1007 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1008 }
1009 
1010 static void
1011 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1012 {
1013 	int i;
1014 
1015 	iwm_dma_contig_free(&ring->desc_dma);
1016 	iwm_dma_contig_free(&ring->stat_dma);
1017 
1018 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1019 		struct iwm_rx_data *data = &ring->data[i];
1020 
1021 		if (data->m != NULL) {
1022 			bus_dmamap_sync(ring->data_dmat, data->map,
1023 			    BUS_DMASYNC_POSTREAD);
1024 			bus_dmamap_unload(ring->data_dmat, data->map);
1025 			m_freem(data->m);
1026 			data->m = NULL;
1027 		}
1028 		if (data->map != NULL) {
1029 			bus_dmamap_destroy(ring->data_dmat, data->map);
1030 			data->map = NULL;
1031 		}
1032 	}
1033 	if (ring->spare_map != NULL) {
1034 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1035 		ring->spare_map = NULL;
1036 	}
1037 	if (ring->data_dmat != NULL) {
1038 		bus_dma_tag_destroy(ring->data_dmat);
1039 		ring->data_dmat = NULL;
1040 	}
1041 }
1042 
1043 static int
1044 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1045 {
1046 	bus_addr_t paddr;
1047 	bus_size_t size;
1048 	size_t maxsize;
1049 	int nsegments;
1050 	int i, error;
1051 
1052 	ring->qid = qid;
1053 	ring->queued = 0;
1054 	ring->cur = 0;
1055 
1056 	/* Allocate TX descriptors (256-byte aligned). */
1057 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1058 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1059 	if (error != 0) {
1060 		device_printf(sc->sc_dev,
1061 		    "could not allocate TX ring DMA memory\n");
1062 		goto fail;
1063 	}
1064 	ring->desc = ring->desc_dma.vaddr;
1065 
1066 	/*
1067 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1068 	 * to allocate commands space for other rings.
1069 	 */
1070 	if (qid > IWM_MVM_CMD_QUEUE)
1071 		return 0;
1072 
1073 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1074 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1075 	if (error != 0) {
1076 		device_printf(sc->sc_dev,
1077 		    "could not allocate TX cmd DMA memory\n");
1078 		goto fail;
1079 	}
1080 	ring->cmd = ring->cmd_dma.vaddr;
1081 
1082 	/* FW commands may require more mapped space than packets. */
1083 	if (qid == IWM_MVM_CMD_QUEUE) {
1084 		maxsize = IWM_RBUF_SIZE;
1085 		nsegments = 1;
1086 	} else {
1087 		maxsize = MCLBYTES;
1088 		nsegments = IWM_MAX_SCATTER - 2;
1089 	}
1090 
1091 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1092 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1093             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1094 	if (error != 0) {
1095 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1096 		goto fail;
1097 	}
1098 
1099 	paddr = ring->cmd_dma.paddr;
1100 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1101 		struct iwm_tx_data *data = &ring->data[i];
1102 
1103 		data->cmd_paddr = paddr;
1104 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1105 		    + offsetof(struct iwm_tx_cmd, scratch);
1106 		paddr += sizeof(struct iwm_device_cmd);
1107 
1108 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1109 		if (error != 0) {
1110 			device_printf(sc->sc_dev,
1111 			    "could not create TX buf DMA map\n");
1112 			goto fail;
1113 		}
1114 	}
1115 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1116 	    ("invalid physical address"));
1117 	return 0;
1118 
1119 fail:	iwm_free_tx_ring(sc, ring);
1120 	return error;
1121 }
1122 
1123 static void
1124 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1125 {
1126 	int i;
1127 
1128 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1129 		struct iwm_tx_data *data = &ring->data[i];
1130 
1131 		if (data->m != NULL) {
1132 			bus_dmamap_sync(ring->data_dmat, data->map,
1133 			    BUS_DMASYNC_POSTWRITE);
1134 			bus_dmamap_unload(ring->data_dmat, data->map);
1135 			m_freem(data->m);
1136 			data->m = NULL;
1137 		}
1138 	}
1139 	/* Clear TX descriptors. */
1140 	memset(ring->desc, 0, ring->desc_dma.size);
1141 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1142 	    BUS_DMASYNC_PREWRITE);
1143 	sc->qfullmsk &= ~(1 << ring->qid);
1144 	ring->queued = 0;
1145 	ring->cur = 0;
1146 
1147 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1148 		iwm_pcie_clear_cmd_in_flight(sc);
1149 }
1150 
1151 static void
1152 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1153 {
1154 	int i;
1155 
1156 	iwm_dma_contig_free(&ring->desc_dma);
1157 	iwm_dma_contig_free(&ring->cmd_dma);
1158 
1159 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1160 		struct iwm_tx_data *data = &ring->data[i];
1161 
1162 		if (data->m != NULL) {
1163 			bus_dmamap_sync(ring->data_dmat, data->map,
1164 			    BUS_DMASYNC_POSTWRITE);
1165 			bus_dmamap_unload(ring->data_dmat, data->map);
1166 			m_freem(data->m);
1167 			data->m = NULL;
1168 		}
1169 		if (data->map != NULL) {
1170 			bus_dmamap_destroy(ring->data_dmat, data->map);
1171 			data->map = NULL;
1172 		}
1173 	}
1174 	if (ring->data_dmat != NULL) {
1175 		bus_dma_tag_destroy(ring->data_dmat);
1176 		ring->data_dmat = NULL;
1177 	}
1178 }
1179 
1180 /*
1181  * High-level hardware frobbing routines
1182  */
1183 
1184 static void
1185 iwm_enable_interrupts(struct iwm_softc *sc)
1186 {
1187 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1188 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1189 }
1190 
1191 static void
1192 iwm_restore_interrupts(struct iwm_softc *sc)
1193 {
1194 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1195 }
1196 
1197 static void
1198 iwm_disable_interrupts(struct iwm_softc *sc)
1199 {
1200 	/* disable interrupts */
1201 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1202 
1203 	/* acknowledge all interrupts */
1204 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1205 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1206 }
1207 
1208 static void
1209 iwm_ict_reset(struct iwm_softc *sc)
1210 {
1211 	iwm_disable_interrupts(sc);
1212 
1213 	/* Reset ICT table. */
1214 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1215 	sc->ict_cur = 0;
1216 
1217 	/* Set physical address of ICT table (4KB aligned). */
1218 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1219 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1220 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1221 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1222 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1223 
1224 	/* Switch to ICT interrupt mode in driver. */
1225 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1226 
1227 	/* Re-enable interrupts. */
1228 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1229 	iwm_enable_interrupts(sc);
1230 }
1231 
1232 /* iwlwifi pcie/trans.c */
1233 
1234 /*
1235  * Since this .. hard-resets things, it's time to actually
1236  * mark the first vap (if any) as having no mac context.
1237  * It's annoying, but since the driver is potentially being
1238  * stop/start'ed whilst active (thanks openbsd port!) we
1239  * have to correctly track this.
1240  */
1241 static void
1242 iwm_stop_device(struct iwm_softc *sc)
1243 {
1244 	struct ieee80211com *ic = &sc->sc_ic;
1245 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1246 	int chnl, qid;
1247 	uint32_t mask = 0;
1248 
1249 	/* tell the device to stop sending interrupts */
1250 	iwm_disable_interrupts(sc);
1251 
1252 	/*
1253 	 * FreeBSD-local: mark the first vap as not-uploaded,
1254 	 * so the next transition through auth/assoc
1255 	 * will correctly populate the MAC context.
1256 	 */
1257 	if (vap) {
1258 		struct iwm_vap *iv = IWM_VAP(vap);
1259 		iv->is_uploaded = 0;
1260 	}
1261 
1262 	/* device going down, Stop using ICT table */
1263 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1264 
1265 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1266 
1267 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1268 
1269 	if (iwm_nic_lock(sc)) {
1270 		/* Stop each Tx DMA channel */
1271 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1272 			IWM_WRITE(sc,
1273 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1274 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1275 		}
1276 
1277 		/* Wait for DMA channels to be idle */
1278 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1279 		    5000)) {
1280 			device_printf(sc->sc_dev,
1281 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1282 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1283 		}
1284 		iwm_nic_unlock(sc);
1285 	}
1286 	iwm_pcie_rx_stop(sc);
1287 
1288 	/* Stop RX ring. */
1289 	iwm_reset_rx_ring(sc, &sc->rxq);
1290 
1291 	/* Reset all TX rings. */
1292 	for (qid = 0; qid < nitems(sc->txq); qid++)
1293 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1294 
1295 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1296 		/* Power-down device's busmaster DMA clocks */
1297 		iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1298 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1299 		DELAY(5);
1300 	}
1301 
1302 	/* Make sure (redundant) we've released our request to stay awake */
1303 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1304 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1305 
1306 	/* Stop the device, and put it in low power state */
1307 	iwm_apm_stop(sc);
1308 
1309 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1310 	 * Clean again the interrupt here
1311 	 */
1312 	iwm_disable_interrupts(sc);
1313 	/* stop and reset the on-board processor */
1314 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1315 
1316 	/*
1317 	 * Even if we stop the HW, we still want the RF kill
1318 	 * interrupt
1319 	 */
1320 	iwm_enable_rfkill_int(sc);
1321 	iwm_check_rfkill(sc);
1322 }
1323 
1324 /* iwlwifi: mvm/ops.c */
1325 static void
1326 iwm_mvm_nic_config(struct iwm_softc *sc)
1327 {
1328 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1329 	uint32_t reg_val = 0;
1330 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1331 
1332 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1333 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1334 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1335 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1336 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1337 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1338 
1339 	/* SKU control */
1340 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1341 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1342 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1343 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1344 
1345 	/* radio configuration */
1346 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1347 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1348 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1349 
1350 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1351 
1352 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1353 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1354 	    radio_cfg_step, radio_cfg_dash);
1355 
1356 	/*
1357 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1358 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1359 	 * to lose ownership and not being able to obtain it back.
1360 	 */
1361 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1362 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1363 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1364 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1365 	}
1366 }
1367 
1368 static int
1369 iwm_nic_rx_init(struct iwm_softc *sc)
1370 {
1371 	/*
1372 	 * Initialize RX ring.  This is from the iwn driver.
1373 	 */
1374 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1375 
1376 	/* Stop Rx DMA */
1377 	iwm_pcie_rx_stop(sc);
1378 
1379 	if (!iwm_nic_lock(sc))
1380 		return EBUSY;
1381 
1382 	/* reset and flush pointers */
1383 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1384 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1385 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1386 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1387 
1388 	/* Set physical address of RX ring (256-byte aligned). */
1389 	IWM_WRITE(sc,
1390 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1391 
1392 	/* Set physical address of RX status (16-byte aligned). */
1393 	IWM_WRITE(sc,
1394 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1395 
1396 	/* Enable RX. */
1397 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1398 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1399 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1400 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1401 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1402 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1403 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1404 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1405 
1406 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1407 
1408 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1409 	if (sc->cfg->host_interrupt_operation_mode)
1410 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1411 
1412 	/*
1413 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1414 	 *
1415 	 * This value should initially be 0 (before preparing any
1416 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1417 	 */
1418 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1419 
1420 	iwm_nic_unlock(sc);
1421 
1422 	return 0;
1423 }
1424 
1425 static int
1426 iwm_nic_tx_init(struct iwm_softc *sc)
1427 {
1428 	int qid;
1429 
1430 	if (!iwm_nic_lock(sc))
1431 		return EBUSY;
1432 
1433 	/* Deactivate TX scheduler. */
1434 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1435 
1436 	/* Set physical address of "keep warm" page (16-byte aligned). */
1437 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1438 
1439 	/* Initialize TX rings. */
1440 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1441 		struct iwm_tx_ring *txq = &sc->txq[qid];
1442 
1443 		/* Set physical address of TX ring (256-byte aligned). */
1444 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1445 		    txq->desc_dma.paddr >> 8);
1446 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1447 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1448 		    __func__,
1449 		    qid, txq->desc,
1450 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1451 	}
1452 
1453 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1454 
1455 	iwm_nic_unlock(sc);
1456 
1457 	return 0;
1458 }
1459 
1460 static int
1461 iwm_nic_init(struct iwm_softc *sc)
1462 {
1463 	int error;
1464 
1465 	iwm_apm_init(sc);
1466 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1467 		iwm_set_pwr(sc);
1468 
1469 	iwm_mvm_nic_config(sc);
1470 
1471 	if ((error = iwm_nic_rx_init(sc)) != 0)
1472 		return error;
1473 
1474 	/*
1475 	 * Ditto for TX, from iwn
1476 	 */
1477 	if ((error = iwm_nic_tx_init(sc)) != 0)
1478 		return error;
1479 
1480 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1481 	    "%s: shadow registers enabled\n", __func__);
1482 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1483 
1484 	return 0;
1485 }
1486 
1487 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1488 	IWM_MVM_TX_FIFO_VO,
1489 	IWM_MVM_TX_FIFO_VI,
1490 	IWM_MVM_TX_FIFO_BE,
1491 	IWM_MVM_TX_FIFO_BK,
1492 };
1493 
1494 static int
1495 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1496 {
1497 	if (!iwm_nic_lock(sc)) {
1498 		device_printf(sc->sc_dev,
1499 		    "%s: cannot enable txq %d\n",
1500 		    __func__,
1501 		    qid);
1502 		return EBUSY;
1503 	}
1504 
1505 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1506 
1507 	if (qid == IWM_MVM_CMD_QUEUE) {
1508 		/* unactivate before configuration */
1509 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1510 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1511 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1512 
1513 		iwm_nic_unlock(sc);
1514 
1515 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1516 
1517 		if (!iwm_nic_lock(sc)) {
1518 			device_printf(sc->sc_dev,
1519 			    "%s: cannot enable txq %d\n", __func__, qid);
1520 			return EBUSY;
1521 		}
1522 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1523 		iwm_nic_unlock(sc);
1524 
1525 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1526 		/* Set scheduler window size and frame limit. */
1527 		iwm_write_mem32(sc,
1528 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1529 		    sizeof(uint32_t),
1530 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1531 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1532 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1533 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1534 
1535 		if (!iwm_nic_lock(sc)) {
1536 			device_printf(sc->sc_dev,
1537 			    "%s: cannot enable txq %d\n", __func__, qid);
1538 			return EBUSY;
1539 		}
1540 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1541 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1542 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1543 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1544 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1545 	} else {
1546 		struct iwm_scd_txq_cfg_cmd cmd;
1547 		int error;
1548 
1549 		iwm_nic_unlock(sc);
1550 
1551 		memset(&cmd, 0, sizeof(cmd));
1552 		cmd.scd_queue = qid;
1553 		cmd.enable = 1;
1554 		cmd.sta_id = sta_id;
1555 		cmd.tx_fifo = fifo;
1556 		cmd.aggregate = 0;
1557 		cmd.window = IWM_FRAME_LIMIT;
1558 
1559 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1560 		    sizeof(cmd), &cmd);
1561 		if (error) {
1562 			device_printf(sc->sc_dev,
1563 			    "cannot enable txq %d\n", qid);
1564 			return error;
1565 		}
1566 
1567 		if (!iwm_nic_lock(sc))
1568 			return EBUSY;
1569 	}
1570 
1571 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1572 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1573 
1574 	iwm_nic_unlock(sc);
1575 
1576 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1577 	    __func__, qid, fifo);
1578 
1579 	return 0;
1580 }
1581 
1582 static int
1583 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1584 {
1585 	int error, chnl;
1586 
1587 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1588 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1589 
1590 	if (!iwm_nic_lock(sc))
1591 		return EBUSY;
1592 
1593 	iwm_ict_reset(sc);
1594 
1595 	iwm_nic_unlock(sc);
1596 
1597 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1598 	if (scd_base_addr != 0 &&
1599 	    scd_base_addr != sc->scd_base_addr) {
1600 		device_printf(sc->sc_dev,
1601 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1602 		    __func__, sc->scd_base_addr, scd_base_addr);
1603 	}
1604 
1605 	/* reset context data, TX status and translation data */
1606 	error = iwm_write_mem(sc,
1607 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1608 	    NULL, clear_dwords);
1609 	if (error)
1610 		return EBUSY;
1611 
1612 	if (!iwm_nic_lock(sc))
1613 		return EBUSY;
1614 
1615 	/* Set physical address of TX scheduler rings (1KB aligned). */
1616 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1617 
1618 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1619 
1620 	iwm_nic_unlock(sc);
1621 
1622 	/* enable command channel */
1623 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1624 	if (error)
1625 		return error;
1626 
1627 	if (!iwm_nic_lock(sc))
1628 		return EBUSY;
1629 
1630 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1631 
1632 	/* Enable DMA channels. */
1633 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1634 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1635 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1636 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1637 	}
1638 
1639 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1640 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1641 
1642 	iwm_nic_unlock(sc);
1643 
1644 	/* Enable L1-Active */
1645 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1646 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1647 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1648 	}
1649 
1650 	return error;
1651 }
1652 
1653 /*
1654  * NVM read access and content parsing.  We do not support
1655  * external NVM or writing NVM.
1656  * iwlwifi/mvm/nvm.c
1657  */
1658 
1659 /* Default NVM size to read */
1660 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1661 
1662 #define IWM_NVM_WRITE_OPCODE 1
1663 #define IWM_NVM_READ_OPCODE 0
1664 
1665 /* load nvm chunk response */
1666 enum {
1667 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1668 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1669 };
1670 
1671 static int
1672 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1673 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1674 {
1675 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1676 		.offset = htole16(offset),
1677 		.length = htole16(length),
1678 		.type = htole16(section),
1679 		.op_code = IWM_NVM_READ_OPCODE,
1680 	};
1681 	struct iwm_nvm_access_resp *nvm_resp;
1682 	struct iwm_rx_packet *pkt;
1683 	struct iwm_host_cmd cmd = {
1684 		.id = IWM_NVM_ACCESS_CMD,
1685 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1686 		.data = { &nvm_access_cmd, },
1687 	};
1688 	int ret, bytes_read, offset_read;
1689 	uint8_t *resp_data;
1690 
1691 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1692 
1693 	ret = iwm_send_cmd(sc, &cmd);
1694 	if (ret) {
1695 		device_printf(sc->sc_dev,
1696 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1697 		return ret;
1698 	}
1699 
1700 	pkt = cmd.resp_pkt;
1701 
1702 	/* Extract NVM response */
1703 	nvm_resp = (void *)pkt->data;
1704 	ret = le16toh(nvm_resp->status);
1705 	bytes_read = le16toh(nvm_resp->length);
1706 	offset_read = le16toh(nvm_resp->offset);
1707 	resp_data = nvm_resp->data;
1708 	if (ret) {
1709 		if ((offset != 0) &&
1710 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1711 			/*
1712 			 * meaning of NOT_VALID_ADDRESS:
1713 			 * driver try to read chunk from address that is
1714 			 * multiple of 2K and got an error since addr is empty.
1715 			 * meaning of (offset != 0): driver already
1716 			 * read valid data from another chunk so this case
1717 			 * is not an error.
1718 			 */
1719 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1720 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1721 				    offset);
1722 			*len = 0;
1723 			ret = 0;
1724 		} else {
1725 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1726 				    "NVM access command failed with status %d\n", ret);
1727 			ret = EIO;
1728 		}
1729 		goto exit;
1730 	}
1731 
1732 	if (offset_read != offset) {
1733 		device_printf(sc->sc_dev,
1734 		    "NVM ACCESS response with invalid offset %d\n",
1735 		    offset_read);
1736 		ret = EINVAL;
1737 		goto exit;
1738 	}
1739 
1740 	if (bytes_read > length) {
1741 		device_printf(sc->sc_dev,
1742 		    "NVM ACCESS response with too much data "
1743 		    "(%d bytes requested, %d bytes received)\n",
1744 		    length, bytes_read);
1745 		ret = EINVAL;
1746 		goto exit;
1747 	}
1748 
1749 	/* Write data to NVM */
1750 	memcpy(data + offset, resp_data, bytes_read);
1751 	*len = bytes_read;
1752 
1753  exit:
1754 	iwm_free_resp(sc, &cmd);
1755 	return ret;
1756 }
1757 
1758 /*
1759  * Reads an NVM section completely.
1760  * NICs prior to 7000 family don't have a real NVM, but just read
1761  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1762  * by uCode, we need to manually check in this case that we don't
1763  * overflow and try to read more than the EEPROM size.
1764  * For 7000 family NICs, we supply the maximal size we can read, and
1765  * the uCode fills the response with as much data as we can,
1766  * without overflowing, so no check is needed.
1767  */
1768 static int
1769 iwm_nvm_read_section(struct iwm_softc *sc,
1770 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1771 {
1772 	uint16_t seglen, length, offset = 0;
1773 	int ret;
1774 
1775 	/* Set nvm section read length */
1776 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1777 
1778 	seglen = length;
1779 
1780 	/* Read the NVM until exhausted (reading less than requested) */
1781 	while (seglen == length) {
1782 		/* Check no memory assumptions fail and cause an overflow */
1783 		if ((size_read + offset + length) >
1784 		    sc->cfg->eeprom_size) {
1785 			device_printf(sc->sc_dev,
1786 			    "EEPROM size is too small for NVM\n");
1787 			return ENOBUFS;
1788 		}
1789 
1790 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1791 		if (ret) {
1792 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1793 				    "Cannot read NVM from section %d offset %d, length %d\n",
1794 				    section, offset, length);
1795 			return ret;
1796 		}
1797 		offset += seglen;
1798 	}
1799 
1800 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1801 		    "NVM section %d read completed\n", section);
1802 	*len = offset;
1803 	return 0;
1804 }
1805 
1806 /*
1807  * BEGIN IWM_NVM_PARSE
1808  */
1809 
1810 /* iwlwifi/iwl-nvm-parse.c */
1811 
1812 /* NVM offsets (in words) definitions */
1813 enum iwm_nvm_offsets {
1814 	/* NVM HW-Section offset (in words) definitions */
1815 	IWM_HW_ADDR = 0x15,
1816 
1817 /* NVM SW-Section offset (in words) definitions */
1818 	IWM_NVM_SW_SECTION = 0x1C0,
1819 	IWM_NVM_VERSION = 0,
1820 	IWM_RADIO_CFG = 1,
1821 	IWM_SKU = 2,
1822 	IWM_N_HW_ADDRS = 3,
1823 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1824 
1825 /* NVM calibration section offset (in words) definitions */
1826 	IWM_NVM_CALIB_SECTION = 0x2B8,
1827 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1828 };
1829 
1830 enum iwm_8000_nvm_offsets {
1831 	/* NVM HW-Section offset (in words) definitions */
1832 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1833 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1834 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1835 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1836 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1837 
1838 	/* NVM SW-Section offset (in words) definitions */
1839 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1840 	IWM_NVM_VERSION_8000 = 0,
1841 	IWM_RADIO_CFG_8000 = 0,
1842 	IWM_SKU_8000 = 2,
1843 	IWM_N_HW_ADDRS_8000 = 3,
1844 
1845 	/* NVM REGULATORY -Section offset (in words) definitions */
1846 	IWM_NVM_CHANNELS_8000 = 0,
1847 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1848 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1849 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1850 
1851 	/* NVM calibration section offset (in words) definitions */
1852 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1853 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1854 };
1855 
1856 /* SKU Capabilities (actual values from NVM definition) */
1857 enum nvm_sku_bits {
1858 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1859 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1860 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1861 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1862 };
1863 
1864 /* radio config bits (actual values from NVM definition) */
1865 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1866 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1867 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1868 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1869 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1870 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1871 
1872 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1873 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1874 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1875 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1876 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1877 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1878 
1879 #define DEFAULT_MAX_TX_POWER 16
1880 
1881 /**
1882  * enum iwm_nvm_channel_flags - channel flags in NVM
1883  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1884  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1885  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1886  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1887  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1888  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1889  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1890  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1891  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1892  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1893  */
1894 enum iwm_nvm_channel_flags {
1895 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1896 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1897 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1898 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1899 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1900 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1901 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1902 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1903 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1904 };
1905 
1906 /*
1907  * Translate EEPROM flags to net80211.
1908  */
1909 static uint32_t
1910 iwm_eeprom_channel_flags(uint16_t ch_flags)
1911 {
1912 	uint32_t nflags;
1913 
1914 	nflags = 0;
1915 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1916 		nflags |= IEEE80211_CHAN_PASSIVE;
1917 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1918 		nflags |= IEEE80211_CHAN_NOADHOC;
1919 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1920 		nflags |= IEEE80211_CHAN_DFS;
1921 		/* Just in case. */
1922 		nflags |= IEEE80211_CHAN_NOADHOC;
1923 	}
1924 
1925 	return (nflags);
1926 }
1927 
1928 static void
1929 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1930     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1931     const uint8_t bands[])
1932 {
1933 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1934 	uint32_t nflags;
1935 	uint16_t ch_flags;
1936 	uint8_t ieee;
1937 	int error;
1938 
1939 	for (; ch_idx < ch_num; ch_idx++) {
1940 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1941 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1942 			ieee = iwm_nvm_channels[ch_idx];
1943 		else
1944 			ieee = iwm_nvm_channels_8000[ch_idx];
1945 
1946 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1947 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1948 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1949 			    ieee, ch_flags,
1950 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1951 			    "5.2" : "2.4");
1952 			continue;
1953 		}
1954 
1955 		nflags = iwm_eeprom_channel_flags(ch_flags);
1956 		error = ieee80211_add_channel(chans, maxchans, nchans,
1957 		    ieee, 0, 0, nflags, bands);
1958 		if (error != 0)
1959 			break;
1960 
1961 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1962 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1963 		    ieee, ch_flags,
1964 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1965 		    "5.2" : "2.4");
1966 	}
1967 }
1968 
1969 static void
1970 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1971     struct ieee80211_channel chans[])
1972 {
1973 	struct iwm_softc *sc = ic->ic_softc;
1974 	struct iwm_nvm_data *data = sc->nvm_data;
1975 	uint8_t bands[IEEE80211_MODE_BYTES];
1976 	size_t ch_num;
1977 
1978 	memset(bands, 0, sizeof(bands));
1979 	/* 1-13: 11b/g channels. */
1980 	setbit(bands, IEEE80211_MODE_11B);
1981 	setbit(bands, IEEE80211_MODE_11G);
1982 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1983 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1984 
1985 	/* 14: 11b channel only. */
1986 	clrbit(bands, IEEE80211_MODE_11G);
1987 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1988 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1989 
1990 	if (data->sku_cap_band_52GHz_enable) {
1991 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1992 			ch_num = nitems(iwm_nvm_channels);
1993 		else
1994 			ch_num = nitems(iwm_nvm_channels_8000);
1995 		memset(bands, 0, sizeof(bands));
1996 		setbit(bands, IEEE80211_MODE_11A);
1997 		iwm_add_channel_band(sc, chans, maxchans, nchans,
1998 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
1999 	}
2000 }
2001 
2002 static void
2003 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2004 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2005 {
2006 	const uint8_t *hw_addr;
2007 
2008 	if (mac_override) {
2009 		static const uint8_t reserved_mac[] = {
2010 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2011 		};
2012 
2013 		hw_addr = (const uint8_t *)(mac_override +
2014 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2015 
2016 		/*
2017 		 * Store the MAC address from MAO section.
2018 		 * No byte swapping is required in MAO section
2019 		 */
2020 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2021 
2022 		/*
2023 		 * Force the use of the OTP MAC address in case of reserved MAC
2024 		 * address in the NVM, or if address is given but invalid.
2025 		 */
2026 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2027 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2028 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2029 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2030 			return;
2031 
2032 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2033 		    "%s: mac address from nvm override section invalid\n",
2034 		    __func__);
2035 	}
2036 
2037 	if (nvm_hw) {
2038 		/* read the mac address from WFMP registers */
2039 		uint32_t mac_addr0 =
2040 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2041 		uint32_t mac_addr1 =
2042 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2043 
2044 		hw_addr = (const uint8_t *)&mac_addr0;
2045 		data->hw_addr[0] = hw_addr[3];
2046 		data->hw_addr[1] = hw_addr[2];
2047 		data->hw_addr[2] = hw_addr[1];
2048 		data->hw_addr[3] = hw_addr[0];
2049 
2050 		hw_addr = (const uint8_t *)&mac_addr1;
2051 		data->hw_addr[4] = hw_addr[1];
2052 		data->hw_addr[5] = hw_addr[0];
2053 
2054 		return;
2055 	}
2056 
2057 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2058 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2059 }
2060 
2061 static int
2062 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2063 	    const uint16_t *phy_sku)
2064 {
2065 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2066 		return le16_to_cpup(nvm_sw + IWM_SKU);
2067 
2068 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2069 }
2070 
2071 static int
2072 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2073 {
2074 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2075 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2076 	else
2077 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2078 						IWM_NVM_VERSION_8000));
2079 }
2080 
2081 static int
2082 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2083 		  const uint16_t *phy_sku)
2084 {
2085         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2086                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2087 
2088         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2089 }
2090 
2091 static int
2092 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2093 {
2094 	int n_hw_addr;
2095 
2096 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2097 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2098 
2099 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2100 
2101         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2102 }
2103 
2104 static void
2105 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2106 		  uint32_t radio_cfg)
2107 {
2108 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2109 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2110 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2111 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2112 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2113 		return;
2114 	}
2115 
2116 	/* set the radio configuration for family 8000 */
2117 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2118 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2119 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2120 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2121 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2122 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2123 }
2124 
2125 static int
2126 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2127 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2128 {
2129 #ifdef notyet /* for FAMILY 9000 */
2130 	if (cfg->mac_addr_from_csr) {
2131 		iwm_set_hw_address_from_csr(sc, data);
2132         } else
2133 #endif
2134 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2135 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2136 
2137 		/* The byte order is little endian 16 bit, meaning 214365 */
2138 		data->hw_addr[0] = hw_addr[1];
2139 		data->hw_addr[1] = hw_addr[0];
2140 		data->hw_addr[2] = hw_addr[3];
2141 		data->hw_addr[3] = hw_addr[2];
2142 		data->hw_addr[4] = hw_addr[5];
2143 		data->hw_addr[5] = hw_addr[4];
2144 	} else {
2145 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2146 	}
2147 
2148 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2149 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2150 		return EINVAL;
2151 	}
2152 
2153 	return 0;
2154 }
2155 
2156 static struct iwm_nvm_data *
2157 iwm_parse_nvm_data(struct iwm_softc *sc,
2158 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2159 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2160 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2161 {
2162 	struct iwm_nvm_data *data;
2163 	uint32_t sku, radio_cfg;
2164 
2165 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2166 		data = malloc(sizeof(*data) +
2167 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2168 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2169 	} else {
2170 		data = malloc(sizeof(*data) +
2171 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2172 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2173 	}
2174 	if (!data)
2175 		return NULL;
2176 
2177 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2178 
2179 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2180 	iwm_set_radio_cfg(sc, data, radio_cfg);
2181 
2182 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2183 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2184 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2185 	data->sku_cap_11n_enable = 0;
2186 
2187 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2188 
2189 	/* If no valid mac address was found - bail out */
2190 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2191 		free(data, M_DEVBUF);
2192 		return NULL;
2193 	}
2194 
2195 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2196 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2197 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2198 	} else {
2199 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2200 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2201 	}
2202 
2203 	return data;
2204 }
2205 
2206 static void
2207 iwm_free_nvm_data(struct iwm_nvm_data *data)
2208 {
2209 	if (data != NULL)
2210 		free(data, M_DEVBUF);
2211 }
2212 
2213 static struct iwm_nvm_data *
2214 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2215 {
2216 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2217 
2218 	/* Checking for required sections */
2219 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2220 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2221 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2222 			device_printf(sc->sc_dev,
2223 			    "Can't parse empty OTP/NVM sections\n");
2224 			return NULL;
2225 		}
2226 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2227 		/* SW and REGULATORY sections are mandatory */
2228 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2229 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2230 			device_printf(sc->sc_dev,
2231 			    "Can't parse empty OTP/NVM sections\n");
2232 			return NULL;
2233 		}
2234 		/* MAC_OVERRIDE or at least HW section must exist */
2235 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2236 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2237 			device_printf(sc->sc_dev,
2238 			    "Can't parse mac_address, empty sections\n");
2239 			return NULL;
2240 		}
2241 
2242 		/* PHY_SKU section is mandatory in B0 */
2243 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2244 			device_printf(sc->sc_dev,
2245 			    "Can't parse phy_sku in B0, empty sections\n");
2246 			return NULL;
2247 		}
2248 	} else {
2249 		panic("unknown device family %d\n", sc->cfg->device_family);
2250 	}
2251 
2252 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2253 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2254 	calib = (const uint16_t *)
2255 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2256 	regulatory = (const uint16_t *)
2257 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2258 	mac_override = (const uint16_t *)
2259 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2260 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2261 
2262 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2263 	    phy_sku, regulatory);
2264 }
2265 
2266 static int
2267 iwm_nvm_init(struct iwm_softc *sc)
2268 {
2269 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2270 	int i, ret, section;
2271 	uint32_t size_read = 0;
2272 	uint8_t *nvm_buffer, *temp;
2273 	uint16_t len;
2274 
2275 	memset(nvm_sections, 0, sizeof(nvm_sections));
2276 
2277 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2278 		return EINVAL;
2279 
2280 	/* load NVM values from nic */
2281 	/* Read From FW NVM */
2282 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2283 
2284 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2285 	if (!nvm_buffer)
2286 		return ENOMEM;
2287 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2288 		/* we override the constness for initial read */
2289 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2290 					   &len, size_read);
2291 		if (ret)
2292 			continue;
2293 		size_read += len;
2294 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2295 		if (!temp) {
2296 			ret = ENOMEM;
2297 			break;
2298 		}
2299 		memcpy(temp, nvm_buffer, len);
2300 
2301 		nvm_sections[section].data = temp;
2302 		nvm_sections[section].length = len;
2303 	}
2304 	if (!size_read)
2305 		device_printf(sc->sc_dev, "OTP is blank\n");
2306 	free(nvm_buffer, M_DEVBUF);
2307 
2308 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2309 	if (!sc->nvm_data)
2310 		return EINVAL;
2311 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2312 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2313 
2314 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2315 		if (nvm_sections[i].data != NULL)
2316 			free(nvm_sections[i].data, M_DEVBUF);
2317 	}
2318 
2319 	return 0;
2320 }
2321 
2322 static int
2323 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2324 	const struct iwm_fw_desc *section)
2325 {
2326 	struct iwm_dma_info *dma = &sc->fw_dma;
2327 	uint8_t *v_addr;
2328 	bus_addr_t p_addr;
2329 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2330 	int ret = 0;
2331 
2332 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2333 		    "%s: [%d] uCode section being loaded...\n",
2334 		    __func__, section_num);
2335 
2336 	v_addr = dma->vaddr;
2337 	p_addr = dma->paddr;
2338 
2339 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2340 		uint32_t copy_size, dst_addr;
2341 		int extended_addr = FALSE;
2342 
2343 		copy_size = MIN(chunk_sz, section->len - offset);
2344 		dst_addr = section->offset + offset;
2345 
2346 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2347 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2348 			extended_addr = TRUE;
2349 
2350 		if (extended_addr)
2351 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2352 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2353 
2354 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2355 		    copy_size);
2356 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2357 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2358 						   copy_size);
2359 
2360 		if (extended_addr)
2361 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2362 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2363 
2364 		if (ret) {
2365 			device_printf(sc->sc_dev,
2366 			    "%s: Could not load the [%d] uCode section\n",
2367 			    __func__, section_num);
2368 			break;
2369 		}
2370 	}
2371 
2372 	return ret;
2373 }
2374 
2375 /*
2376  * ucode
2377  */
2378 static int
2379 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2380 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2381 {
2382 	int ret;
2383 
2384 	sc->sc_fw_chunk_done = 0;
2385 
2386 	if (!iwm_nic_lock(sc))
2387 		return EBUSY;
2388 
2389 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2390 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2391 
2392 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2393 	    dst_addr);
2394 
2395 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2396 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2397 
2398 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2399 	    (iwm_get_dma_hi_addr(phy_addr)
2400 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2401 
2402 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2403 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2404 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2405 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2406 
2407 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2408 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2409 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2410 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2411 
2412 	iwm_nic_unlock(sc);
2413 
2414 	/* wait up to 5s for this segment to load */
2415 	ret = 0;
2416 	while (!sc->sc_fw_chunk_done) {
2417 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2418 		if (ret)
2419 			break;
2420 	}
2421 
2422 	if (ret != 0) {
2423 		device_printf(sc->sc_dev,
2424 		    "fw chunk addr 0x%x len %d failed to load\n",
2425 		    dst_addr, byte_cnt);
2426 		return ETIMEDOUT;
2427 	}
2428 
2429 	return 0;
2430 }
2431 
2432 static int
2433 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2434 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2435 {
2436 	int shift_param;
2437 	int i, ret = 0, sec_num = 0x1;
2438 	uint32_t val, last_read_idx = 0;
2439 
2440 	if (cpu == 1) {
2441 		shift_param = 0;
2442 		*first_ucode_section = 0;
2443 	} else {
2444 		shift_param = 16;
2445 		(*first_ucode_section)++;
2446 	}
2447 
2448 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2449 		last_read_idx = i;
2450 
2451 		/*
2452 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2453 		 * CPU1 to CPU2.
2454 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2455 		 * CPU2 non paged to CPU2 paging sec.
2456 		 */
2457 		if (!image->fw_sect[i].data ||
2458 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2459 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2460 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2461 				    "Break since Data not valid or Empty section, sec = %d\n",
2462 				    i);
2463 			break;
2464 		}
2465 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2466 		if (ret)
2467 			return ret;
2468 
2469 		/* Notify the ucode of the loaded section number and status */
2470 		if (iwm_nic_lock(sc)) {
2471 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2472 			val = val | (sec_num << shift_param);
2473 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2474 			sec_num = (sec_num << 1) | 0x1;
2475 			iwm_nic_unlock(sc);
2476 		}
2477 	}
2478 
2479 	*first_ucode_section = last_read_idx;
2480 
2481 	iwm_enable_interrupts(sc);
2482 
2483 	if (iwm_nic_lock(sc)) {
2484 		if (cpu == 1)
2485 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2486 		else
2487 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2488 		iwm_nic_unlock(sc);
2489 	}
2490 
2491 	return 0;
2492 }
2493 
2494 static int
2495 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2496 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2497 {
2498 	int shift_param;
2499 	int i, ret = 0;
2500 	uint32_t last_read_idx = 0;
2501 
2502 	if (cpu == 1) {
2503 		shift_param = 0;
2504 		*first_ucode_section = 0;
2505 	} else {
2506 		shift_param = 16;
2507 		(*first_ucode_section)++;
2508 	}
2509 
2510 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2511 		last_read_idx = i;
2512 
2513 		/*
2514 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2515 		 * CPU1 to CPU2.
2516 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2517 		 * CPU2 non paged to CPU2 paging sec.
2518 		 */
2519 		if (!image->fw_sect[i].data ||
2520 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2521 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2522 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2523 				    "Break since Data not valid or Empty section, sec = %d\n",
2524 				     i);
2525 			break;
2526 		}
2527 
2528 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2529 		if (ret)
2530 			return ret;
2531 	}
2532 
2533 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2534 		iwm_set_bits_prph(sc,
2535 				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2536 				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2537 				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2538 				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2539 					shift_param);
2540 
2541 	*first_ucode_section = last_read_idx;
2542 
2543 	return 0;
2544 
2545 }
2546 
2547 static int
2548 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2549 	const struct iwm_fw_sects *image)
2550 {
2551 	int ret = 0;
2552 	int first_ucode_section;
2553 
2554 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2555 		     image->is_dual_cpus ? "Dual" : "Single");
2556 
2557 	/* load to FW the binary non secured sections of CPU1 */
2558 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2559 	if (ret)
2560 		return ret;
2561 
2562 	if (image->is_dual_cpus) {
2563 		/* set CPU2 header address */
2564                 iwm_write_prph(sc,
2565 			       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2566 			       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2567 
2568 		/* load to FW the binary sections of CPU2 */
2569 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2570 						 &first_ucode_section);
2571 		if (ret)
2572 			return ret;
2573 	}
2574 
2575 	iwm_enable_interrupts(sc);
2576 
2577 	/* release CPU reset */
2578 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2579 
2580 	return 0;
2581 }
2582 
2583 int
2584 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2585 	const struct iwm_fw_sects *image)
2586 {
2587 	int ret = 0;
2588 	int first_ucode_section;
2589 
2590 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2591 		    image->is_dual_cpus ? "Dual" : "Single");
2592 
2593 	/* configure the ucode to be ready to get the secured image */
2594 	/* release CPU reset */
2595 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2596 
2597 	/* load to FW the binary Secured sections of CPU1 */
2598 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2599 	    &first_ucode_section);
2600 	if (ret)
2601 		return ret;
2602 
2603 	/* load to FW the binary sections of CPU2 */
2604 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2605 	    &first_ucode_section);
2606 }
2607 
2608 /* XXX Get rid of this definition */
2609 static inline void
2610 iwm_enable_fw_load_int(struct iwm_softc *sc)
2611 {
2612 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2613 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2614 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2615 }
2616 
2617 /* XXX Add proper rfkill support code */
2618 static int
2619 iwm_start_fw(struct iwm_softc *sc,
2620 	const struct iwm_fw_sects *fw)
2621 {
2622 	int ret;
2623 
2624 	/* This may fail if AMT took ownership of the device */
2625 	if (iwm_prepare_card_hw(sc)) {
2626 		device_printf(sc->sc_dev,
2627 		    "%s: Exit HW not ready\n", __func__);
2628 		ret = EIO;
2629 		goto out;
2630 	}
2631 
2632 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2633 
2634 	iwm_disable_interrupts(sc);
2635 
2636 	/* make sure rfkill handshake bits are cleared */
2637 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2638 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2639 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2640 
2641 	/* clear (again), then enable host interrupts */
2642 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2643 
2644 	ret = iwm_nic_init(sc);
2645 	if (ret) {
2646 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2647 		goto out;
2648 	}
2649 
2650 	/*
2651 	 * Now, we load the firmware and don't want to be interrupted, even
2652 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2653 	 * FH_TX interrupt which is needed to load the firmware). If the
2654 	 * RF-Kill switch is toggled, we will find out after having loaded
2655 	 * the firmware and return the proper value to the caller.
2656 	 */
2657 	iwm_enable_fw_load_int(sc);
2658 
2659 	/* really make sure rfkill handshake bits are cleared */
2660 	/* maybe we should write a few times more?  just to make sure */
2661 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2662 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2663 
2664 	/* Load the given image to the HW */
2665 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2666 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2667 	else
2668 		ret = iwm_pcie_load_given_ucode(sc, fw);
2669 
2670 	/* XXX re-check RF-Kill state */
2671 
2672 out:
2673 	return ret;
2674 }
2675 
2676 static int
2677 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2678 {
2679 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2680 		.valid = htole32(valid_tx_ant),
2681 	};
2682 
2683 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2684 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2685 }
2686 
2687 /* iwlwifi: mvm/fw.c */
2688 static int
2689 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2690 {
2691 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2692 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2693 
2694 	/* Set parameters */
2695 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2696 	phy_cfg_cmd.calib_control.event_trigger =
2697 	    sc->sc_default_calib[ucode_type].event_trigger;
2698 	phy_cfg_cmd.calib_control.flow_trigger =
2699 	    sc->sc_default_calib[ucode_type].flow_trigger;
2700 
2701 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2702 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2703 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2704 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2705 }
2706 
2707 static int
2708 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2709 {
2710 	struct iwm_mvm_alive_data *alive_data = data;
2711 	struct iwm_mvm_alive_resp_ver1 *palive1;
2712 	struct iwm_mvm_alive_resp_ver2 *palive2;
2713 	struct iwm_mvm_alive_resp *palive;
2714 
2715 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2716 		palive1 = (void *)pkt->data;
2717 
2718 		sc->support_umac_log = FALSE;
2719                 sc->error_event_table =
2720                         le32toh(palive1->error_event_table_ptr);
2721                 sc->log_event_table =
2722                         le32toh(palive1->log_event_table_ptr);
2723                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2724 
2725                 alive_data->valid = le16toh(palive1->status) ==
2726                                     IWM_ALIVE_STATUS_OK;
2727                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2728 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2729 			     le16toh(palive1->status), palive1->ver_type,
2730                              palive1->ver_subtype, palive1->flags);
2731 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2732 		palive2 = (void *)pkt->data;
2733 		sc->error_event_table =
2734 			le32toh(palive2->error_event_table_ptr);
2735 		sc->log_event_table =
2736 			le32toh(palive2->log_event_table_ptr);
2737 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2738 		sc->umac_error_event_table =
2739                         le32toh(palive2->error_info_addr);
2740 
2741 		alive_data->valid = le16toh(palive2->status) ==
2742 				    IWM_ALIVE_STATUS_OK;
2743 		if (sc->umac_error_event_table)
2744 			sc->support_umac_log = TRUE;
2745 
2746 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2747 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2748 			    le16toh(palive2->status), palive2->ver_type,
2749 			    palive2->ver_subtype, palive2->flags);
2750 
2751 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2752 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2753 			    palive2->umac_major, palive2->umac_minor);
2754 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2755 		palive = (void *)pkt->data;
2756 
2757 		sc->error_event_table =
2758 			le32toh(palive->error_event_table_ptr);
2759 		sc->log_event_table =
2760 			le32toh(palive->log_event_table_ptr);
2761 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2762 		sc->umac_error_event_table =
2763 			le32toh(palive->error_info_addr);
2764 
2765 		alive_data->valid = le16toh(palive->status) ==
2766 				    IWM_ALIVE_STATUS_OK;
2767 		if (sc->umac_error_event_table)
2768 			sc->support_umac_log = TRUE;
2769 
2770 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2771 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2772 			    le16toh(palive->status), palive->ver_type,
2773 			    palive->ver_subtype, palive->flags);
2774 
2775 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2776 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2777 			    le32toh(palive->umac_major),
2778 			    le32toh(palive->umac_minor));
2779 	}
2780 
2781 	return TRUE;
2782 }
2783 
2784 static int
2785 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2786 	struct iwm_rx_packet *pkt, void *data)
2787 {
2788 	struct iwm_phy_db *phy_db = data;
2789 
2790 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2791 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2792 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2793 			    __func__, pkt->hdr.code);
2794 		}
2795 		return TRUE;
2796 	}
2797 
2798 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2799 		device_printf(sc->sc_dev,
2800 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2801 	}
2802 
2803 	return FALSE;
2804 }
2805 
2806 static int
2807 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2808 	enum iwm_ucode_type ucode_type)
2809 {
2810 	struct iwm_notification_wait alive_wait;
2811 	struct iwm_mvm_alive_data alive_data;
2812 	const struct iwm_fw_sects *fw;
2813 	enum iwm_ucode_type old_type = sc->cur_ucode;
2814 	int error;
2815 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2816 
2817 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2818 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2819 			error);
2820 		return error;
2821 	}
2822 	fw = &sc->sc_fw.fw_sects[ucode_type];
2823 	sc->cur_ucode = ucode_type;
2824 	sc->ucode_loaded = FALSE;
2825 
2826 	memset(&alive_data, 0, sizeof(alive_data));
2827 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2828 				   alive_cmd, nitems(alive_cmd),
2829 				   iwm_alive_fn, &alive_data);
2830 
2831 	error = iwm_start_fw(sc, fw);
2832 	if (error) {
2833 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2834 		sc->cur_ucode = old_type;
2835 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2836 		return error;
2837 	}
2838 
2839 	/*
2840 	 * Some things may run in the background now, but we
2841 	 * just wait for the ALIVE notification here.
2842 	 */
2843 	IWM_UNLOCK(sc);
2844 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2845 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2846 	IWM_LOCK(sc);
2847 	if (error) {
2848 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2849 			device_printf(sc->sc_dev,
2850 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2851 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2852 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2853 		}
2854 		sc->cur_ucode = old_type;
2855 		return error;
2856 	}
2857 
2858 	if (!alive_data.valid) {
2859 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2860 		    __func__);
2861 		sc->cur_ucode = old_type;
2862 		return EIO;
2863 	}
2864 
2865 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2866 
2867 	/*
2868 	 * configure and operate fw paging mechanism.
2869 	 * driver configures the paging flow only once, CPU2 paging image
2870 	 * included in the IWM_UCODE_INIT image.
2871 	 */
2872 	if (fw->paging_mem_size) {
2873 		error = iwm_save_fw_paging(sc, fw);
2874 		if (error) {
2875 			device_printf(sc->sc_dev,
2876 			    "%s: failed to save the FW paging image\n",
2877 			    __func__);
2878 			return error;
2879 		}
2880 
2881 		error = iwm_send_paging_cmd(sc, fw);
2882 		if (error) {
2883 			device_printf(sc->sc_dev,
2884 			    "%s: failed to send the paging cmd\n", __func__);
2885 			iwm_free_fw_paging(sc);
2886 			return error;
2887 		}
2888 	}
2889 
2890 	if (!error)
2891 		sc->ucode_loaded = TRUE;
2892 	return error;
2893 }
2894 
2895 /*
2896  * mvm misc bits
2897  */
2898 
2899 /*
2900  * follows iwlwifi/fw.c
2901  */
2902 static int
2903 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2904 {
2905 	struct iwm_notification_wait calib_wait;
2906 	static const uint16_t init_complete[] = {
2907 		IWM_INIT_COMPLETE_NOTIF,
2908 		IWM_CALIB_RES_NOTIF_PHY_DB
2909 	};
2910 	int ret;
2911 
2912 	/* do not operate with rfkill switch turned on */
2913 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2914 		device_printf(sc->sc_dev,
2915 		    "radio is disabled by hardware switch\n");
2916 		return EPERM;
2917 	}
2918 
2919 	iwm_init_notification_wait(sc->sc_notif_wait,
2920 				   &calib_wait,
2921 				   init_complete,
2922 				   nitems(init_complete),
2923 				   iwm_wait_phy_db_entry,
2924 				   sc->sc_phy_db);
2925 
2926 	/* Will also start the device */
2927 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2928 	if (ret) {
2929 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2930 		    ret);
2931 		goto error;
2932 	}
2933 
2934 	if (justnvm) {
2935 		/* Read nvm */
2936 		ret = iwm_nvm_init(sc);
2937 		if (ret) {
2938 			device_printf(sc->sc_dev, "failed to read nvm\n");
2939 			goto error;
2940 		}
2941 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2942 		goto error;
2943 	}
2944 
2945 	ret = iwm_send_bt_init_conf(sc);
2946 	if (ret) {
2947 		device_printf(sc->sc_dev,
2948 		    "failed to send bt coex configuration: %d\n", ret);
2949 		goto error;
2950 	}
2951 
2952 	/* Init Smart FIFO. */
2953 	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2954 	if (ret)
2955 		goto error;
2956 
2957 	/* Send TX valid antennas before triggering calibrations */
2958 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2959 	if (ret) {
2960 		device_printf(sc->sc_dev,
2961 		    "failed to send antennas before calibration: %d\n", ret);
2962 		goto error;
2963 	}
2964 
2965 	/*
2966 	 * Send phy configurations command to init uCode
2967 	 * to start the 16.0 uCode init image internal calibrations.
2968 	 */
2969 	ret = iwm_send_phy_cfg_cmd(sc);
2970 	if (ret) {
2971 		device_printf(sc->sc_dev,
2972 		    "%s: Failed to run INIT calibrations: %d\n",
2973 		    __func__, ret);
2974 		goto error;
2975 	}
2976 
2977 	/*
2978 	 * Nothing to do but wait for the init complete notification
2979 	 * from the firmware.
2980 	 */
2981 	IWM_UNLOCK(sc);
2982 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2983 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
2984 	IWM_LOCK(sc);
2985 
2986 
2987 	goto out;
2988 
2989 error:
2990 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2991 out:
2992 	return ret;
2993 }
2994 
2995 /*
2996  * receive side
2997  */
2998 
2999 /* (re)stock rx ring, called at init-time and at runtime */
3000 static int
3001 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3002 {
3003 	struct iwm_rx_ring *ring = &sc->rxq;
3004 	struct iwm_rx_data *data = &ring->data[idx];
3005 	struct mbuf *m;
3006 	bus_dmamap_t dmamap = NULL;
3007 	bus_dma_segment_t seg;
3008 	int nsegs, error;
3009 
3010 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3011 	if (m == NULL)
3012 		return ENOBUFS;
3013 
3014 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3015 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3016 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3017 	if (error != 0) {
3018 		device_printf(sc->sc_dev,
3019 		    "%s: can't map mbuf, error %d\n", __func__, error);
3020 		goto fail;
3021 	}
3022 
3023 	if (data->m != NULL)
3024 		bus_dmamap_unload(ring->data_dmat, data->map);
3025 
3026 	/* Swap ring->spare_map with data->map */
3027 	dmamap = data->map;
3028 	data->map = ring->spare_map;
3029 	ring->spare_map = dmamap;
3030 
3031 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3032 	data->m = m;
3033 
3034 	/* Update RX descriptor. */
3035 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3036 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3037 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3038 	    BUS_DMASYNC_PREWRITE);
3039 
3040 	return 0;
3041 fail:
3042 	m_freem(m);
3043 	return error;
3044 }
3045 
3046 /* iwlwifi: mvm/rx.c */
3047 #define IWM_RSSI_OFFSET 50
3048 static int
3049 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3050 {
3051 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3052 	uint32_t agc_a, agc_b;
3053 	uint32_t val;
3054 
3055 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3056 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3057 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3058 
3059 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3060 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3061 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3062 
3063 	/*
3064 	 * dBm = rssi dB - agc dB - constant.
3065 	 * Higher AGC (higher radio gain) means lower signal.
3066 	 */
3067 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3068 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3069 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3070 
3071 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3072 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3073 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3074 
3075 	return max_rssi_dbm;
3076 }
3077 
3078 /* iwlwifi: mvm/rx.c */
3079 /*
3080  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3081  * values are reported by the fw as positive values - need to negate
3082  * to obtain their dBM.  Account for missing antennas by replacing 0
3083  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3084  */
3085 static int
3086 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3087 {
3088 	int energy_a, energy_b, energy_c, max_energy;
3089 	uint32_t val;
3090 
3091 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3092 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3093 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3094 	energy_a = energy_a ? -energy_a : -256;
3095 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3096 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3097 	energy_b = energy_b ? -energy_b : -256;
3098 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3099 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3100 	energy_c = energy_c ? -energy_c : -256;
3101 	max_energy = MAX(energy_a, energy_b);
3102 	max_energy = MAX(max_energy, energy_c);
3103 
3104 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3105 	    "energy In A %d B %d C %d , and max %d\n",
3106 	    energy_a, energy_b, energy_c, max_energy);
3107 
3108 	return max_energy;
3109 }
3110 
3111 static void
3112 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3113 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3114 {
3115 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3116 
3117 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3118 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3119 
3120 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3121 }
3122 
3123 /*
3124  * Retrieve the average noise (in dBm) among receivers.
3125  */
3126 static int
3127 iwm_get_noise(struct iwm_softc *sc,
3128     const struct iwm_mvm_statistics_rx_non_phy *stats)
3129 {
3130 	int i, total, nbant, noise;
3131 
3132 	total = nbant = noise = 0;
3133 	for (i = 0; i < 3; i++) {
3134 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3135 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3136 		    __func__,
3137 		    i,
3138 		    noise);
3139 
3140 		if (noise) {
3141 			total += noise;
3142 			nbant++;
3143 		}
3144 	}
3145 
3146 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3147 	    __func__, nbant, total);
3148 #if 0
3149 	/* There should be at least one antenna but check anyway. */
3150 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3151 #else
3152 	/* For now, just hard-code it to -96 to be safe */
3153 	return (-96);
3154 #endif
3155 }
3156 
3157 /*
3158  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3159  *
3160  * Handles the actual data of the Rx packet from the fw
3161  */
3162 static void
3163 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3164 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3165 {
3166 	struct ieee80211com *ic = &sc->sc_ic;
3167 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3168 	struct ieee80211_frame *wh;
3169 	struct ieee80211_node *ni;
3170 	struct ieee80211_rx_stats rxs;
3171 	struct mbuf *m;
3172 	struct iwm_rx_phy_info *phy_info;
3173 	struct iwm_rx_mpdu_res_start *rx_res;
3174 	uint32_t len;
3175 	uint32_t rx_pkt_status;
3176 	int rssi;
3177 
3178 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3179 
3180 	phy_info = &sc->sc_last_phy_info;
3181 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3182 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3183 	len = le16toh(rx_res->byte_count);
3184 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3185 
3186 	m = data->m;
3187 	m->m_data = pkt->data + sizeof(*rx_res);
3188 	m->m_pkthdr.len = m->m_len = len;
3189 
3190 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3191 		device_printf(sc->sc_dev,
3192 		    "dsp size out of range [0,20]: %d\n",
3193 		    phy_info->cfg_phy_cnt);
3194 		goto fail;
3195 	}
3196 
3197 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3198 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3199 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3200 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3201 		goto fail;
3202 	}
3203 
3204 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3205 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3206 	} else {
3207 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3208 	}
3209 
3210 	/* Note: RSSI is absolute (ie a -ve value) */
3211 	if (rssi < IWM_MIN_DBM)
3212 		rssi = IWM_MIN_DBM;
3213 	else if (rssi > IWM_MAX_DBM)
3214 		rssi = IWM_MAX_DBM;
3215 
3216 	/* Map it to relative value */
3217 	rssi = rssi - sc->sc_noise;
3218 
3219 	/* replenish ring for the buffer we're going to feed to the sharks */
3220 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3221 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3222 		    __func__);
3223 		goto fail;
3224 	}
3225 
3226 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3227 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3228 
3229 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3230 
3231 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3232 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3233 	    __func__,
3234 	    le16toh(phy_info->channel),
3235 	    le16toh(phy_info->phy_flags));
3236 
3237 	/*
3238 	 * Populate an RX state struct with the provided information.
3239 	 */
3240 	bzero(&rxs, sizeof(rxs));
3241 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3242 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3243 	rxs.c_ieee = le16toh(phy_info->channel);
3244 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3245 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3246 	} else {
3247 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3248 	}
3249 
3250 	/* rssi is in 1/2db units */
3251 	rxs.c_rssi = rssi * 2;
3252 	rxs.c_nf = sc->sc_noise;
3253 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3254 		if (ni)
3255 			ieee80211_free_node(ni);
3256 		goto fail;
3257 	}
3258 
3259 	if (ieee80211_radiotap_active_vap(vap)) {
3260 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3261 
3262 		tap->wr_flags = 0;
3263 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3264 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3265 		tap->wr_chan_freq = htole16(rxs.c_freq);
3266 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3267 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3268 		tap->wr_dbm_antsignal = (int8_t)rssi;
3269 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3270 		tap->wr_tsft = phy_info->system_timestamp;
3271 		switch (phy_info->rate) {
3272 		/* CCK rates. */
3273 		case  10: tap->wr_rate =   2; break;
3274 		case  20: tap->wr_rate =   4; break;
3275 		case  55: tap->wr_rate =  11; break;
3276 		case 110: tap->wr_rate =  22; break;
3277 		/* OFDM rates. */
3278 		case 0xd: tap->wr_rate =  12; break;
3279 		case 0xf: tap->wr_rate =  18; break;
3280 		case 0x5: tap->wr_rate =  24; break;
3281 		case 0x7: tap->wr_rate =  36; break;
3282 		case 0x9: tap->wr_rate =  48; break;
3283 		case 0xb: tap->wr_rate =  72; break;
3284 		case 0x1: tap->wr_rate =  96; break;
3285 		case 0x3: tap->wr_rate = 108; break;
3286 		/* Unknown rate: should not happen. */
3287 		default:  tap->wr_rate =   0;
3288 		}
3289 	}
3290 
3291 	IWM_UNLOCK(sc);
3292 	if (ni != NULL) {
3293 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3294 		ieee80211_input_mimo(ni, m);
3295 		ieee80211_free_node(ni);
3296 	} else {
3297 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3298 		ieee80211_input_mimo_all(ic, m);
3299 	}
3300 	IWM_LOCK(sc);
3301 
3302 	return;
3303 
3304 fail:
3305 	counter_u64_add(ic->ic_ierrors, 1);
3306 }
3307 
3308 static int
3309 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3310 	struct iwm_node *in)
3311 {
3312 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3313 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3314 	struct ieee80211_node *ni = &in->in_ni;
3315 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3316 
3317 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3318 
3319 	/* Update rate control statistics. */
3320 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3321 	    __func__,
3322 	    (int) le16toh(tx_resp->status.status),
3323 	    (int) le16toh(tx_resp->status.sequence),
3324 	    tx_resp->frame_count,
3325 	    tx_resp->bt_kill_count,
3326 	    tx_resp->failure_rts,
3327 	    tx_resp->failure_frame,
3328 	    le32toh(tx_resp->initial_rate),
3329 	    (int) le16toh(tx_resp->wireless_media_time));
3330 
3331 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3332 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3333 	txs->short_retries = tx_resp->failure_rts;
3334 	txs->long_retries = tx_resp->failure_frame;
3335 	if (status != IWM_TX_STATUS_SUCCESS &&
3336 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3337 		switch (status) {
3338 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3339 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3340 			break;
3341 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3342 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3343 			break;
3344 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3345 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3346 			break;
3347 		default:
3348 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3349 			break;
3350 		}
3351 	} else {
3352 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3353 	}
3354 	ieee80211_ratectl_tx_complete(ni, txs);
3355 
3356 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3357 }
3358 
3359 static void
3360 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3361 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3362 {
3363 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3364 	int idx = cmd_hdr->idx;
3365 	int qid = cmd_hdr->qid;
3366 	struct iwm_tx_ring *ring = &sc->txq[qid];
3367 	struct iwm_tx_data *txd = &ring->data[idx];
3368 	struct iwm_node *in = txd->in;
3369 	struct mbuf *m = txd->m;
3370 	int status;
3371 
3372 	KASSERT(txd->done == 0, ("txd not done"));
3373 	KASSERT(txd->in != NULL, ("txd without node"));
3374 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3375 
3376 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3377 
3378 	sc->sc_tx_timer = 0;
3379 
3380 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3381 
3382 	/* Unmap and free mbuf. */
3383 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3384 	bus_dmamap_unload(ring->data_dmat, txd->map);
3385 
3386 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3387 	    "free txd %p, in %p\n", txd, txd->in);
3388 	txd->done = 1;
3389 	txd->m = NULL;
3390 	txd->in = NULL;
3391 
3392 	ieee80211_tx_complete(&in->in_ni, m, status);
3393 
3394 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3395 		sc->qfullmsk &= ~(1 << ring->qid);
3396 		if (sc->qfullmsk == 0) {
3397 			iwm_start(sc);
3398 		}
3399 	}
3400 }
3401 
3402 /*
3403  * transmit side
3404  */
3405 
3406 /*
3407  * Process a "command done" firmware notification.  This is where we wakeup
3408  * processes waiting for a synchronous command completion.
3409  * from if_iwn
3410  */
3411 static void
3412 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3413 {
3414 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3415 	struct iwm_tx_data *data;
3416 
3417 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3418 		return;	/* Not a command ack. */
3419 	}
3420 
3421 	/* XXX wide commands? */
3422 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3423 	    "cmd notification type 0x%x qid %d idx %d\n",
3424 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3425 
3426 	data = &ring->data[pkt->hdr.idx];
3427 
3428 	/* If the command was mapped in an mbuf, free it. */
3429 	if (data->m != NULL) {
3430 		bus_dmamap_sync(ring->data_dmat, data->map,
3431 		    BUS_DMASYNC_POSTWRITE);
3432 		bus_dmamap_unload(ring->data_dmat, data->map);
3433 		m_freem(data->m);
3434 		data->m = NULL;
3435 	}
3436 	wakeup(&ring->desc[pkt->hdr.idx]);
3437 
3438 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3439 		device_printf(sc->sc_dev,
3440 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3441 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3442 		/* XXX call iwm_force_nmi() */
3443 	}
3444 
3445 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3446 	ring->queued--;
3447 	if (ring->queued == 0)
3448 		iwm_pcie_clear_cmd_in_flight(sc);
3449 }
3450 
3451 #if 0
3452 /*
3453  * necessary only for block ack mode
3454  */
3455 void
3456 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3457 	uint16_t len)
3458 {
3459 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3460 	uint16_t w_val;
3461 
3462 	scd_bc_tbl = sc->sched_dma.vaddr;
3463 
3464 	len += 8; /* magic numbers came naturally from paris */
3465 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3466 		len = roundup(len, 4) / 4;
3467 
3468 	w_val = htole16(sta_id << 12 | len);
3469 
3470 	/* Update TX scheduler. */
3471 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3472 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3473 	    BUS_DMASYNC_PREWRITE);
3474 
3475 	/* I really wonder what this is ?!? */
3476 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3477 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3478 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3479 		    BUS_DMASYNC_PREWRITE);
3480 	}
3481 }
3482 #endif
3483 
3484 /*
3485  * Take an 802.11 (non-n) rate, find the relevant rate
3486  * table entry.  return the index into in_ridx[].
3487  *
3488  * The caller then uses that index back into in_ridx
3489  * to figure out the rate index programmed /into/
3490  * the firmware for this given node.
3491  */
3492 static int
3493 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3494     uint8_t rate)
3495 {
3496 	int i;
3497 	uint8_t r;
3498 
3499 	for (i = 0; i < nitems(in->in_ridx); i++) {
3500 		r = iwm_rates[in->in_ridx[i]].rate;
3501 		if (rate == r)
3502 			return (i);
3503 	}
3504 
3505 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3506 	    "%s: couldn't find an entry for rate=%d\n",
3507 	    __func__,
3508 	    rate);
3509 
3510 	/* XXX Return the first */
3511 	/* XXX TODO: have it return the /lowest/ */
3512 	return (0);
3513 }
3514 
3515 static int
3516 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3517 {
3518 	int i;
3519 
3520 	for (i = 0; i < nitems(iwm_rates); i++) {
3521 		if (iwm_rates[i].rate == rate)
3522 			return (i);
3523 	}
3524 	/* XXX error? */
3525 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3526 	    "%s: couldn't find an entry for rate=%d\n",
3527 	    __func__,
3528 	    rate);
3529 	return (0);
3530 }
3531 
3532 /*
3533  * Fill in the rate related information for a transmit command.
3534  */
3535 static const struct iwm_rate *
3536 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3537 	struct mbuf *m, struct iwm_tx_cmd *tx)
3538 {
3539 	struct ieee80211_node *ni = &in->in_ni;
3540 	struct ieee80211_frame *wh;
3541 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3542 	const struct iwm_rate *rinfo;
3543 	int type;
3544 	int ridx, rate_flags;
3545 
3546 	wh = mtod(m, struct ieee80211_frame *);
3547 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3548 
3549 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3550 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3551 
3552 	if (type == IEEE80211_FC0_TYPE_MGT ||
3553 	    type == IEEE80211_FC0_TYPE_CTL ||
3554 	    (m->m_flags & M_EAPOL) != 0) {
3555 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3556 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3557 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3558 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3559 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3560 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3561 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3562 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3563 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3564 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3565 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3566 	} else {
3567 		int i;
3568 
3569 		/* for data frames, use RS table */
3570 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3571 		/* XXX pass pktlen */
3572 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3573 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3574 		ridx = in->in_ridx[i];
3575 
3576 		/* This is the index into the programmed table */
3577 		tx->initial_rate_index = i;
3578 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3579 
3580 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3581 		    "%s: start with i=%d, txrate %d\n",
3582 		    __func__, i, iwm_rates[ridx].rate);
3583 	}
3584 
3585 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3586 	    "%s: frame type=%d txrate %d\n",
3587 	        __func__, type, iwm_rates[ridx].rate);
3588 
3589 	rinfo = &iwm_rates[ridx];
3590 
3591 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3592 	    __func__, ridx,
3593 	    rinfo->rate,
3594 	    !! (IWM_RIDX_IS_CCK(ridx))
3595 	    );
3596 
3597 	/* XXX TODO: hard-coded TX antenna? */
3598 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3599 	if (IWM_RIDX_IS_CCK(ridx))
3600 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3601 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3602 
3603 	return rinfo;
3604 }
3605 
3606 #define TB0_SIZE 16
3607 static int
3608 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3609 {
3610 	struct ieee80211com *ic = &sc->sc_ic;
3611 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3612 	struct iwm_node *in = IWM_NODE(ni);
3613 	struct iwm_tx_ring *ring;
3614 	struct iwm_tx_data *data;
3615 	struct iwm_tfd *desc;
3616 	struct iwm_device_cmd *cmd;
3617 	struct iwm_tx_cmd *tx;
3618 	struct ieee80211_frame *wh;
3619 	struct ieee80211_key *k = NULL;
3620 	struct mbuf *m1;
3621 	const struct iwm_rate *rinfo;
3622 	uint32_t flags;
3623 	u_int hdrlen;
3624 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3625 	int nsegs;
3626 	uint8_t tid, type;
3627 	int i, totlen, error, pad;
3628 
3629 	wh = mtod(m, struct ieee80211_frame *);
3630 	hdrlen = ieee80211_anyhdrsize(wh);
3631 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3632 	tid = 0;
3633 	ring = &sc->txq[ac];
3634 	desc = &ring->desc[ring->cur];
3635 	memset(desc, 0, sizeof(*desc));
3636 	data = &ring->data[ring->cur];
3637 
3638 	/* Fill out iwm_tx_cmd to send to the firmware */
3639 	cmd = &ring->cmd[ring->cur];
3640 	cmd->hdr.code = IWM_TX_CMD;
3641 	cmd->hdr.flags = 0;
3642 	cmd->hdr.qid = ring->qid;
3643 	cmd->hdr.idx = ring->cur;
3644 
3645 	tx = (void *)cmd->data;
3646 	memset(tx, 0, sizeof(*tx));
3647 
3648 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3649 
3650 	/* Encrypt the frame if need be. */
3651 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3652 		/* Retrieve key for TX && do software encryption. */
3653 		k = ieee80211_crypto_encap(ni, m);
3654 		if (k == NULL) {
3655 			m_freem(m);
3656 			return (ENOBUFS);
3657 		}
3658 		/* 802.11 header may have moved. */
3659 		wh = mtod(m, struct ieee80211_frame *);
3660 	}
3661 
3662 	if (ieee80211_radiotap_active_vap(vap)) {
3663 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3664 
3665 		tap->wt_flags = 0;
3666 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3667 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3668 		tap->wt_rate = rinfo->rate;
3669 		if (k != NULL)
3670 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3671 		ieee80211_radiotap_tx(vap, m);
3672 	}
3673 
3674 
3675 	totlen = m->m_pkthdr.len;
3676 
3677 	flags = 0;
3678 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3679 		flags |= IWM_TX_CMD_FLG_ACK;
3680 	}
3681 
3682 	if (type == IEEE80211_FC0_TYPE_DATA
3683 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3684 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3685 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3686 	}
3687 
3688 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3689 	    type != IEEE80211_FC0_TYPE_DATA)
3690 		tx->sta_id = sc->sc_aux_sta.sta_id;
3691 	else
3692 		tx->sta_id = IWM_STATION_ID;
3693 
3694 	if (type == IEEE80211_FC0_TYPE_MGT) {
3695 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3696 
3697 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3698 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3699 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3700 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3701 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3702 		} else {
3703 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3704 		}
3705 	} else {
3706 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3707 	}
3708 
3709 	if (hdrlen & 3) {
3710 		/* First segment length must be a multiple of 4. */
3711 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3712 		pad = 4 - (hdrlen & 3);
3713 	} else
3714 		pad = 0;
3715 
3716 	tx->driver_txop = 0;
3717 	tx->next_frame_len = 0;
3718 
3719 	tx->len = htole16(totlen);
3720 	tx->tid_tspec = tid;
3721 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3722 
3723 	/* Set physical address of "scratch area". */
3724 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3725 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3726 
3727 	/* Copy 802.11 header in TX command. */
3728 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3729 
3730 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3731 
3732 	tx->sec_ctl = 0;
3733 	tx->tx_flags |= htole32(flags);
3734 
3735 	/* Trim 802.11 header. */
3736 	m_adj(m, hdrlen);
3737 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3738 	    segs, &nsegs, BUS_DMA_NOWAIT);
3739 	if (error != 0) {
3740 		if (error != EFBIG) {
3741 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3742 			    error);
3743 			m_freem(m);
3744 			return error;
3745 		}
3746 		/* Too many DMA segments, linearize mbuf. */
3747 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3748 		if (m1 == NULL) {
3749 			device_printf(sc->sc_dev,
3750 			    "%s: could not defrag mbuf\n", __func__);
3751 			m_freem(m);
3752 			return (ENOBUFS);
3753 		}
3754 		m = m1;
3755 
3756 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3757 		    segs, &nsegs, BUS_DMA_NOWAIT);
3758 		if (error != 0) {
3759 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3760 			    error);
3761 			m_freem(m);
3762 			return error;
3763 		}
3764 	}
3765 	data->m = m;
3766 	data->in = in;
3767 	data->done = 0;
3768 
3769 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3770 	    "sending txd %p, in %p\n", data, data->in);
3771 	KASSERT(data->in != NULL, ("node is NULL"));
3772 
3773 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3774 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3775 	    ring->qid, ring->cur, totlen, nsegs,
3776 	    le32toh(tx->tx_flags),
3777 	    le32toh(tx->rate_n_flags),
3778 	    tx->initial_rate_index
3779 	    );
3780 
3781 	/* Fill TX descriptor. */
3782 	desc->num_tbs = 2 + nsegs;
3783 
3784 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3785 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3786 	    (TB0_SIZE << 4);
3787 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3788 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3789 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3790 	      + hdrlen + pad - TB0_SIZE) << 4);
3791 
3792 	/* Other DMA segments are for data payload. */
3793 	for (i = 0; i < nsegs; i++) {
3794 		seg = &segs[i];
3795 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3796 		desc->tbs[i+2].hi_n_len = \
3797 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3798 		    | ((seg->ds_len) << 4);
3799 	}
3800 
3801 	bus_dmamap_sync(ring->data_dmat, data->map,
3802 	    BUS_DMASYNC_PREWRITE);
3803 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3804 	    BUS_DMASYNC_PREWRITE);
3805 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3806 	    BUS_DMASYNC_PREWRITE);
3807 
3808 #if 0
3809 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3810 #endif
3811 
3812 	/* Kick TX ring. */
3813 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3814 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3815 
3816 	/* Mark TX ring as full if we reach a certain threshold. */
3817 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3818 		sc->qfullmsk |= 1 << ring->qid;
3819 	}
3820 
3821 	return 0;
3822 }
3823 
3824 static int
3825 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3826     const struct ieee80211_bpf_params *params)
3827 {
3828 	struct ieee80211com *ic = ni->ni_ic;
3829 	struct iwm_softc *sc = ic->ic_softc;
3830 	int error = 0;
3831 
3832 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3833 	    "->%s begin\n", __func__);
3834 
3835 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3836 		m_freem(m);
3837 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3838 		    "<-%s not RUNNING\n", __func__);
3839 		return (ENETDOWN);
3840         }
3841 
3842 	IWM_LOCK(sc);
3843 	/* XXX fix this */
3844         if (params == NULL) {
3845 		error = iwm_tx(sc, m, ni, 0);
3846 	} else {
3847 		error = iwm_tx(sc, m, ni, 0);
3848 	}
3849 	sc->sc_tx_timer = 5;
3850 	IWM_UNLOCK(sc);
3851 
3852         return (error);
3853 }
3854 
3855 /*
3856  * mvm/tx.c
3857  */
3858 
3859 /*
3860  * Note that there are transports that buffer frames before they reach
3861  * the firmware. This means that after flush_tx_path is called, the
3862  * queue might not be empty. The race-free way to handle this is to:
3863  * 1) set the station as draining
3864  * 2) flush the Tx path
3865  * 3) wait for the transport queues to be empty
3866  */
3867 int
3868 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3869 {
3870 	int ret;
3871 	struct iwm_tx_path_flush_cmd flush_cmd = {
3872 		.queues_ctl = htole32(tfd_msk),
3873 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3874 	};
3875 
3876 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3877 	    sizeof(flush_cmd), &flush_cmd);
3878 	if (ret)
3879                 device_printf(sc->sc_dev,
3880 		    "Flushing tx queue failed: %d\n", ret);
3881 	return ret;
3882 }
3883 
3884 /*
3885  * BEGIN mvm/sta.c
3886  */
3887 
3888 static int
3889 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3890 	struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3891 {
3892 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3893 	    cmd, status);
3894 }
3895 
3896 /* send station add/update command to firmware */
3897 static int
3898 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3899 {
3900 	struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3901 	int ret;
3902 	uint32_t status;
3903 
3904 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3905 
3906 	add_sta_cmd.sta_id = IWM_STATION_ID;
3907 	add_sta_cmd.mac_id_n_color
3908 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3909 	        IWM_DEFAULT_COLOR));
3910 	if (!update) {
3911 		int ac;
3912 		for (ac = 0; ac < WME_NUM_AC; ac++) {
3913 			add_sta_cmd.tfd_queue_msk |=
3914 			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3915 		}
3916 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3917 	}
3918 	add_sta_cmd.add_modify = update ? 1 : 0;
3919 	add_sta_cmd.station_flags_msk
3920 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3921 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3922 	if (update)
3923 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3924 
3925 	status = IWM_ADD_STA_SUCCESS;
3926 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3927 	if (ret)
3928 		return ret;
3929 
3930 	switch (status) {
3931 	case IWM_ADD_STA_SUCCESS:
3932 		break;
3933 	default:
3934 		ret = EIO;
3935 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3936 		break;
3937 	}
3938 
3939 	return ret;
3940 }
3941 
3942 static int
3943 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3944 {
3945 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3946 }
3947 
3948 static int
3949 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3950 {
3951 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3952 }
3953 
3954 static int
3955 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3956 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3957 {
3958 	struct iwm_mvm_add_sta_cmd_v7 cmd;
3959 	int ret;
3960 	uint32_t status;
3961 
3962 	memset(&cmd, 0, sizeof(cmd));
3963 	cmd.sta_id = sta->sta_id;
3964 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3965 
3966 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3967 	cmd.tid_disable_tx = htole16(0xffff);
3968 
3969 	if (addr)
3970 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3971 
3972 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3973 	if (ret)
3974 		return ret;
3975 
3976 	switch (status) {
3977 	case IWM_ADD_STA_SUCCESS:
3978 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3979 		    "%s: Internal station added.\n", __func__);
3980 		return 0;
3981 	default:
3982 		device_printf(sc->sc_dev,
3983 		    "%s: Add internal station failed, status=0x%x\n",
3984 		    __func__, status);
3985 		ret = EIO;
3986 		break;
3987 	}
3988 	return ret;
3989 }
3990 
3991 static int
3992 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3993 {
3994 	int ret;
3995 
3996 	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3997 	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3998 
3999 	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4000 	if (ret)
4001 		return ret;
4002 
4003 	ret = iwm_mvm_add_int_sta_common(sc,
4004 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4005 
4006 	if (ret)
4007 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4008 	return ret;
4009 }
4010 
4011 /*
4012  * END mvm/sta.c
4013  */
4014 
4015 /*
4016  * BEGIN mvm/quota.c
4017  */
4018 
4019 static int
4020 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4021 {
4022 	struct iwm_time_quota_cmd cmd;
4023 	int i, idx, ret, num_active_macs, quota, quota_rem;
4024 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4025 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4026 	uint16_t id;
4027 
4028 	memset(&cmd, 0, sizeof(cmd));
4029 
4030 	/* currently, PHY ID == binding ID */
4031 	if (in) {
4032 		id = in->in_phyctxt->id;
4033 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4034 		colors[id] = in->in_phyctxt->color;
4035 
4036 		if (1)
4037 			n_ifs[id] = 1;
4038 	}
4039 
4040 	/*
4041 	 * The FW's scheduling session consists of
4042 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4043 	 * equally between all the bindings that require quota
4044 	 */
4045 	num_active_macs = 0;
4046 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4047 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4048 		num_active_macs += n_ifs[i];
4049 	}
4050 
4051 	quota = 0;
4052 	quota_rem = 0;
4053 	if (num_active_macs) {
4054 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4055 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4056 	}
4057 
4058 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4059 		if (colors[i] < 0)
4060 			continue;
4061 
4062 		cmd.quotas[idx].id_and_color =
4063 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4064 
4065 		if (n_ifs[i] <= 0) {
4066 			cmd.quotas[idx].quota = htole32(0);
4067 			cmd.quotas[idx].max_duration = htole32(0);
4068 		} else {
4069 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4070 			cmd.quotas[idx].max_duration = htole32(0);
4071 		}
4072 		idx++;
4073 	}
4074 
4075 	/* Give the remainder of the session to the first binding */
4076 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4077 
4078 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4079 	    sizeof(cmd), &cmd);
4080 	if (ret)
4081 		device_printf(sc->sc_dev,
4082 		    "%s: Failed to send quota: %d\n", __func__, ret);
4083 	return ret;
4084 }
4085 
4086 /*
4087  * END mvm/quota.c
4088  */
4089 
4090 /*
4091  * ieee80211 routines
4092  */
4093 
4094 /*
4095  * Change to AUTH state in 80211 state machine.  Roughly matches what
4096  * Linux does in bss_info_changed().
4097  */
4098 static int
4099 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4100 {
4101 	struct ieee80211_node *ni;
4102 	struct iwm_node *in;
4103 	struct iwm_vap *iv = IWM_VAP(vap);
4104 	uint32_t duration;
4105 	int error;
4106 
4107 	/*
4108 	 * XXX i have a feeling that the vap node is being
4109 	 * freed from underneath us. Grr.
4110 	 */
4111 	ni = ieee80211_ref_node(vap->iv_bss);
4112 	in = IWM_NODE(ni);
4113 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4114 	    "%s: called; vap=%p, bss ni=%p\n",
4115 	    __func__,
4116 	    vap,
4117 	    ni);
4118 
4119 	in->in_assoc = 0;
4120 
4121 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4122 	if (error != 0)
4123 		return error;
4124 
4125 	error = iwm_allow_mcast(vap, sc);
4126 	if (error) {
4127 		device_printf(sc->sc_dev,
4128 		    "%s: failed to set multicast\n", __func__);
4129 		goto out;
4130 	}
4131 
4132 	/*
4133 	 * This is where it deviates from what Linux does.
4134 	 *
4135 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4136 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4137 	 * and always does a mac_ctx_changed().
4138 	 *
4139 	 * The openbsd port doesn't attempt to do that - it reset things
4140 	 * at odd states and does the add here.
4141 	 *
4142 	 * So, until the state handling is fixed (ie, we never reset
4143 	 * the NIC except for a firmware failure, which should drag
4144 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4145 	 * contexts that are required), let's do a dirty hack here.
4146 	 */
4147 	if (iv->is_uploaded) {
4148 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4149 			device_printf(sc->sc_dev,
4150 			    "%s: failed to update MAC\n", __func__);
4151 			goto out;
4152 		}
4153 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4154 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4155 			device_printf(sc->sc_dev,
4156 			    "%s: failed update phy ctxt\n", __func__);
4157 			goto out;
4158 		}
4159 		in->in_phyctxt = &sc->sc_phyctxt[0];
4160 
4161 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4162 			device_printf(sc->sc_dev,
4163 			    "%s: binding update cmd\n", __func__);
4164 			goto out;
4165 		}
4166 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4167 			device_printf(sc->sc_dev,
4168 			    "%s: failed to update sta\n", __func__);
4169 			goto out;
4170 		}
4171 	} else {
4172 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4173 			device_printf(sc->sc_dev,
4174 			    "%s: failed to add MAC\n", __func__);
4175 			goto out;
4176 		}
4177 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4178 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4179 			device_printf(sc->sc_dev,
4180 			    "%s: failed add phy ctxt!\n", __func__);
4181 			error = ETIMEDOUT;
4182 			goto out;
4183 		}
4184 		in->in_phyctxt = &sc->sc_phyctxt[0];
4185 
4186 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4187 			device_printf(sc->sc_dev,
4188 			    "%s: binding add cmd\n", __func__);
4189 			goto out;
4190 		}
4191 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4192 			device_printf(sc->sc_dev,
4193 			    "%s: failed to add sta\n", __func__);
4194 			goto out;
4195 		}
4196 	}
4197 
4198 	/*
4199 	 * Prevent the FW from wandering off channel during association
4200 	 * by "protecting" the session with a time event.
4201 	 */
4202 	/* XXX duration is in units of TU, not MS */
4203 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4204 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4205 	DELAY(100);
4206 
4207 	error = 0;
4208 out:
4209 	ieee80211_free_node(ni);
4210 	return (error);
4211 }
4212 
4213 static int
4214 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4215 {
4216 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4217 	int error;
4218 
4219 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4220 		device_printf(sc->sc_dev,
4221 		    "%s: failed to update STA\n", __func__);
4222 		return error;
4223 	}
4224 
4225 	in->in_assoc = 1;
4226 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4227 		device_printf(sc->sc_dev,
4228 		    "%s: failed to update MAC\n", __func__);
4229 		return error;
4230 	}
4231 
4232 	return 0;
4233 }
4234 
4235 static int
4236 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4237 {
4238 	uint32_t tfd_msk;
4239 
4240 	/*
4241 	 * Ok, so *technically* the proper set of calls for going
4242 	 * from RUN back to SCAN is:
4243 	 *
4244 	 * iwm_mvm_power_mac_disable(sc, in);
4245 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4246 	 * iwm_mvm_rm_sta(sc, in);
4247 	 * iwm_mvm_update_quotas(sc, NULL);
4248 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4249 	 * iwm_mvm_binding_remove_vif(sc, in);
4250 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4251 	 *
4252 	 * However, that freezes the device not matter which permutations
4253 	 * and modifications are attempted.  Obviously, this driver is missing
4254 	 * something since it works in the Linux driver, but figuring out what
4255 	 * is missing is a little more complicated.  Now, since we're going
4256 	 * back to nothing anyway, we'll just do a complete device reset.
4257 	 * Up your's, device!
4258 	 */
4259 	/*
4260 	 * Just using 0xf for the queues mask is fine as long as we only
4261 	 * get here from RUN state.
4262 	 */
4263 	tfd_msk = 0xf;
4264 	mbufq_drain(&sc->sc_snd);
4265 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4266 	/*
4267 	 * We seem to get away with just synchronously sending the
4268 	 * IWM_TXPATH_FLUSH command.
4269 	 */
4270 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4271 	iwm_stop_device(sc);
4272 	iwm_init_hw(sc);
4273 	if (in)
4274 		in->in_assoc = 0;
4275 	return 0;
4276 
4277 #if 0
4278 	int error;
4279 
4280 	iwm_mvm_power_mac_disable(sc, in);
4281 
4282 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4283 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4284 		return error;
4285 	}
4286 
4287 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4288 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4289 		return error;
4290 	}
4291 	error = iwm_mvm_rm_sta(sc, in);
4292 	in->in_assoc = 0;
4293 	iwm_mvm_update_quotas(sc, NULL);
4294 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4295 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4296 		return error;
4297 	}
4298 	iwm_mvm_binding_remove_vif(sc, in);
4299 
4300 	iwm_mvm_mac_ctxt_remove(sc, in);
4301 
4302 	return error;
4303 #endif
4304 }
4305 
4306 static struct ieee80211_node *
4307 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4308 {
4309 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4310 	    M_NOWAIT | M_ZERO);
4311 }
4312 
4313 static void
4314 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4315 {
4316 	struct ieee80211_node *ni = &in->in_ni;
4317 	struct iwm_lq_cmd *lq = &in->in_lq;
4318 	int nrates = ni->ni_rates.rs_nrates;
4319 	int i, ridx, tab = 0;
4320 //	int txant = 0;
4321 
4322 	if (nrates > nitems(lq->rs_table)) {
4323 		device_printf(sc->sc_dev,
4324 		    "%s: node supports %d rates, driver handles "
4325 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4326 		return;
4327 	}
4328 	if (nrates == 0) {
4329 		device_printf(sc->sc_dev,
4330 		    "%s: node supports 0 rates, odd!\n", __func__);
4331 		return;
4332 	}
4333 
4334 	/*
4335 	 * XXX .. and most of iwm_node is not initialised explicitly;
4336 	 * it's all just 0x0 passed to the firmware.
4337 	 */
4338 
4339 	/* first figure out which rates we should support */
4340 	/* XXX TODO: this isn't 11n aware /at all/ */
4341 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4342 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4343 	    "%s: nrates=%d\n", __func__, nrates);
4344 
4345 	/*
4346 	 * Loop over nrates and populate in_ridx from the highest
4347 	 * rate to the lowest rate.  Remember, in_ridx[] has
4348 	 * IEEE80211_RATE_MAXSIZE entries!
4349 	 */
4350 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4351 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4352 
4353 		/* Map 802.11 rate to HW rate index. */
4354 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4355 			if (iwm_rates[ridx].rate == rate)
4356 				break;
4357 		if (ridx > IWM_RIDX_MAX) {
4358 			device_printf(sc->sc_dev,
4359 			    "%s: WARNING: device rate for %d not found!\n",
4360 			    __func__, rate);
4361 		} else {
4362 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4363 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4364 			    __func__,
4365 			    i,
4366 			    rate,
4367 			    ridx);
4368 			in->in_ridx[i] = ridx;
4369 		}
4370 	}
4371 
4372 	/* then construct a lq_cmd based on those */
4373 	memset(lq, 0, sizeof(*lq));
4374 	lq->sta_id = IWM_STATION_ID;
4375 
4376 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4377 	if (ni->ni_flags & IEEE80211_NODE_HT)
4378 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4379 
4380 	/*
4381 	 * are these used? (we don't do SISO or MIMO)
4382 	 * need to set them to non-zero, though, or we get an error.
4383 	 */
4384 	lq->single_stream_ant_msk = 1;
4385 	lq->dual_stream_ant_msk = 1;
4386 
4387 	/*
4388 	 * Build the actual rate selection table.
4389 	 * The lowest bits are the rates.  Additionally,
4390 	 * CCK needs bit 9 to be set.  The rest of the bits
4391 	 * we add to the table select the tx antenna
4392 	 * Note that we add the rates in the highest rate first
4393 	 * (opposite of ni_rates).
4394 	 */
4395 	/*
4396 	 * XXX TODO: this should be looping over the min of nrates
4397 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4398 	 */
4399 	for (i = 0; i < nrates; i++) {
4400 		int nextant;
4401 
4402 #if 0
4403 		if (txant == 0)
4404 			txant = iwm_mvm_get_valid_tx_ant(sc);
4405 		nextant = 1<<(ffs(txant)-1);
4406 		txant &= ~nextant;
4407 #else
4408 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4409 #endif
4410 		/*
4411 		 * Map the rate id into a rate index into
4412 		 * our hardware table containing the
4413 		 * configuration to use for this rate.
4414 		 */
4415 		ridx = in->in_ridx[i];
4416 		tab = iwm_rates[ridx].plcp;
4417 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4418 		if (IWM_RIDX_IS_CCK(ridx))
4419 			tab |= IWM_RATE_MCS_CCK_MSK;
4420 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4421 		    "station rate i=%d, rate=%d, hw=%x\n",
4422 		    i, iwm_rates[ridx].rate, tab);
4423 		lq->rs_table[i] = htole32(tab);
4424 	}
4425 	/* then fill the rest with the lowest possible rate */
4426 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4427 		KASSERT(tab != 0, ("invalid tab"));
4428 		lq->rs_table[i] = htole32(tab);
4429 	}
4430 }
4431 
4432 static int
4433 iwm_media_change(struct ifnet *ifp)
4434 {
4435 	struct ieee80211vap *vap = ifp->if_softc;
4436 	struct ieee80211com *ic = vap->iv_ic;
4437 	struct iwm_softc *sc = ic->ic_softc;
4438 	int error;
4439 
4440 	error = ieee80211_media_change(ifp);
4441 	if (error != ENETRESET)
4442 		return error;
4443 
4444 	IWM_LOCK(sc);
4445 	if (ic->ic_nrunning > 0) {
4446 		iwm_stop(sc);
4447 		iwm_init(sc);
4448 	}
4449 	IWM_UNLOCK(sc);
4450 	return error;
4451 }
4452 
4453 
4454 static int
4455 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4456 {
4457 	struct iwm_vap *ivp = IWM_VAP(vap);
4458 	struct ieee80211com *ic = vap->iv_ic;
4459 	struct iwm_softc *sc = ic->ic_softc;
4460 	struct iwm_node *in;
4461 	int error;
4462 
4463 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4464 	    "switching state %s -> %s\n",
4465 	    ieee80211_state_name[vap->iv_state],
4466 	    ieee80211_state_name[nstate]);
4467 	IEEE80211_UNLOCK(ic);
4468 	IWM_LOCK(sc);
4469 
4470 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4471 		iwm_led_blink_stop(sc);
4472 
4473 	/* disable beacon filtering if we're hopping out of RUN */
4474 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4475 		iwm_mvm_disable_beacon_filter(sc);
4476 
4477 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4478 			in->in_assoc = 0;
4479 
4480 		if (nstate == IEEE80211_S_INIT) {
4481 			IWM_UNLOCK(sc);
4482 			IEEE80211_LOCK(ic);
4483 			error = ivp->iv_newstate(vap, nstate, arg);
4484 			IEEE80211_UNLOCK(ic);
4485 			IWM_LOCK(sc);
4486 			iwm_release(sc, NULL);
4487 			IWM_UNLOCK(sc);
4488 			IEEE80211_LOCK(ic);
4489 			return error;
4490 		}
4491 
4492 		/*
4493 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4494 		 * above then the card will be completely reinitialized,
4495 		 * so the driver must do everything necessary to bring the card
4496 		 * from INIT to SCAN.
4497 		 *
4498 		 * Additionally, upon receiving deauth frame from AP,
4499 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4500 		 * state. This will also fail with this driver, so bring the FSM
4501 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4502 		 *
4503 		 * XXX TODO: fix this for FreeBSD!
4504 		 */
4505 		if (nstate == IEEE80211_S_SCAN ||
4506 		    nstate == IEEE80211_S_AUTH ||
4507 		    nstate == IEEE80211_S_ASSOC) {
4508 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4509 			    "Force transition to INIT; MGT=%d\n", arg);
4510 			IWM_UNLOCK(sc);
4511 			IEEE80211_LOCK(ic);
4512 			/* Always pass arg as -1 since we can't Tx right now. */
4513 			/*
4514 			 * XXX arg is just ignored anyway when transitioning
4515 			 *     to IEEE80211_S_INIT.
4516 			 */
4517 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4518 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4519 			    "Going INIT->SCAN\n");
4520 			nstate = IEEE80211_S_SCAN;
4521 			IEEE80211_UNLOCK(ic);
4522 			IWM_LOCK(sc);
4523 		}
4524 	}
4525 
4526 	switch (nstate) {
4527 	case IEEE80211_S_INIT:
4528 		break;
4529 
4530 	case IEEE80211_S_AUTH:
4531 		if ((error = iwm_auth(vap, sc)) != 0) {
4532 			device_printf(sc->sc_dev,
4533 			    "%s: could not move to auth state: %d\n",
4534 			    __func__, error);
4535 			break;
4536 		}
4537 		break;
4538 
4539 	case IEEE80211_S_ASSOC:
4540 		if ((error = iwm_assoc(vap, sc)) != 0) {
4541 			device_printf(sc->sc_dev,
4542 			    "%s: failed to associate: %d\n", __func__,
4543 			    error);
4544 			break;
4545 		}
4546 		break;
4547 
4548 	case IEEE80211_S_RUN:
4549 	{
4550 		struct iwm_host_cmd cmd = {
4551 			.id = IWM_LQ_CMD,
4552 			.len = { sizeof(in->in_lq), },
4553 			.flags = IWM_CMD_SYNC,
4554 		};
4555 
4556 		/* Update the association state, now we have it all */
4557 		/* (eg associd comes in at this point */
4558 		error = iwm_assoc(vap, sc);
4559 		if (error != 0) {
4560 			device_printf(sc->sc_dev,
4561 			    "%s: failed to update association state: %d\n",
4562 			    __func__,
4563 			    error);
4564 			break;
4565 		}
4566 
4567 		in = IWM_NODE(vap->iv_bss);
4568 		iwm_mvm_power_mac_update_mode(sc, in);
4569 		iwm_mvm_enable_beacon_filter(sc, in);
4570 		iwm_mvm_update_quotas(sc, in);
4571 		iwm_setrates(sc, in);
4572 
4573 		cmd.data[0] = &in->in_lq;
4574 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4575 			device_printf(sc->sc_dev,
4576 			    "%s: IWM_LQ_CMD failed\n", __func__);
4577 		}
4578 
4579 		iwm_mvm_led_enable(sc);
4580 		break;
4581 	}
4582 
4583 	default:
4584 		break;
4585 	}
4586 	IWM_UNLOCK(sc);
4587 	IEEE80211_LOCK(ic);
4588 
4589 	return (ivp->iv_newstate(vap, nstate, arg));
4590 }
4591 
4592 void
4593 iwm_endscan_cb(void *arg, int pending)
4594 {
4595 	struct iwm_softc *sc = arg;
4596 	struct ieee80211com *ic = &sc->sc_ic;
4597 
4598 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4599 	    "%s: scan ended\n",
4600 	    __func__);
4601 
4602 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4603 }
4604 
4605 /*
4606  * Aging and idle timeouts for the different possible scenarios
4607  * in default configuration
4608  */
4609 static const uint32_t
4610 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4611 	{
4612 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4613 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4614 	},
4615 	{
4616 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4617 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4618 	},
4619 	{
4620 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4621 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4622 	},
4623 	{
4624 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4625 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4626 	},
4627 	{
4628 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4629 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4630 	},
4631 };
4632 
4633 /*
4634  * Aging and idle timeouts for the different possible scenarios
4635  * in single BSS MAC configuration.
4636  */
4637 static const uint32_t
4638 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4639 	{
4640 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4641 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4642 	},
4643 	{
4644 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4645 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4646 	},
4647 	{
4648 		htole32(IWM_SF_MCAST_AGING_TIMER),
4649 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4650 	},
4651 	{
4652 		htole32(IWM_SF_BA_AGING_TIMER),
4653 		htole32(IWM_SF_BA_IDLE_TIMER)
4654 	},
4655 	{
4656 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4657 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4658 	},
4659 };
4660 
4661 static void
4662 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4663     struct ieee80211_node *ni)
4664 {
4665 	int i, j, watermark;
4666 
4667 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4668 
4669 	/*
4670 	 * If we are in association flow - check antenna configuration
4671 	 * capabilities of the AP station, and choose the watermark accordingly.
4672 	 */
4673 	if (ni) {
4674 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4675 #ifdef notyet
4676 			if (ni->ni_rxmcs[2] != 0)
4677 				watermark = IWM_SF_W_MARK_MIMO3;
4678 			else if (ni->ni_rxmcs[1] != 0)
4679 				watermark = IWM_SF_W_MARK_MIMO2;
4680 			else
4681 #endif
4682 				watermark = IWM_SF_W_MARK_SISO;
4683 		} else {
4684 			watermark = IWM_SF_W_MARK_LEGACY;
4685 		}
4686 	/* default watermark value for unassociated mode. */
4687 	} else {
4688 		watermark = IWM_SF_W_MARK_MIMO2;
4689 	}
4690 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4691 
4692 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4693 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4694 			sf_cmd->long_delay_timeouts[i][j] =
4695 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4696 		}
4697 	}
4698 
4699 	if (ni) {
4700 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4701 		       sizeof(iwm_sf_full_timeout));
4702 	} else {
4703 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4704 		       sizeof(iwm_sf_full_timeout_def));
4705 	}
4706 }
4707 
4708 static int
4709 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4710 {
4711 	struct ieee80211com *ic = &sc->sc_ic;
4712 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4713 	struct iwm_sf_cfg_cmd sf_cmd = {
4714 		.state = htole32(IWM_SF_FULL_ON),
4715 	};
4716 	int ret = 0;
4717 
4718 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4719 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4720 
4721 	switch (new_state) {
4722 	case IWM_SF_UNINIT:
4723 	case IWM_SF_INIT_OFF:
4724 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4725 		break;
4726 	case IWM_SF_FULL_ON:
4727 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4728 		break;
4729 	default:
4730 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4731 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4732 			  new_state);
4733 		return EINVAL;
4734 	}
4735 
4736 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4737 				   sizeof(sf_cmd), &sf_cmd);
4738 	return ret;
4739 }
4740 
4741 static int
4742 iwm_send_bt_init_conf(struct iwm_softc *sc)
4743 {
4744 	struct iwm_bt_coex_cmd bt_cmd;
4745 
4746 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4747 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4748 
4749 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4750 	    &bt_cmd);
4751 }
4752 
4753 static int
4754 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4755 {
4756 	struct iwm_mcc_update_cmd mcc_cmd;
4757 	struct iwm_host_cmd hcmd = {
4758 		.id = IWM_MCC_UPDATE_CMD,
4759 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4760 		.data = { &mcc_cmd },
4761 	};
4762 	int ret;
4763 #ifdef IWM_DEBUG
4764 	struct iwm_rx_packet *pkt;
4765 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4766 	struct iwm_mcc_update_resp *mcc_resp;
4767 	int n_channels;
4768 	uint16_t mcc;
4769 #endif
4770 	int resp_v2 = isset(sc->sc_enabled_capa,
4771 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4772 
4773 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4774 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4775 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4776 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4777 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4778 	else
4779 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4780 
4781 	if (resp_v2)
4782 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4783 	else
4784 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4785 
4786 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4787 	    "send MCC update to FW with '%c%c' src = %d\n",
4788 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4789 
4790 	ret = iwm_send_cmd(sc, &hcmd);
4791 	if (ret)
4792 		return ret;
4793 
4794 #ifdef IWM_DEBUG
4795 	pkt = hcmd.resp_pkt;
4796 
4797 	/* Extract MCC response */
4798 	if (resp_v2) {
4799 		mcc_resp = (void *)pkt->data;
4800 		mcc = mcc_resp->mcc;
4801 		n_channels =  le32toh(mcc_resp->n_channels);
4802 	} else {
4803 		mcc_resp_v1 = (void *)pkt->data;
4804 		mcc = mcc_resp_v1->mcc;
4805 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4806 	}
4807 
4808 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4809 	if (mcc == 0)
4810 		mcc = 0x3030;  /* "00" - world */
4811 
4812 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4813 	    "regulatory domain '%c%c' (%d channels available)\n",
4814 	    mcc >> 8, mcc & 0xff, n_channels);
4815 #endif
4816 	iwm_free_resp(sc, &hcmd);
4817 
4818 	return 0;
4819 }
4820 
4821 static void
4822 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4823 {
4824 	struct iwm_host_cmd cmd = {
4825 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4826 		.len = { sizeof(uint32_t), },
4827 		.data = { &backoff, },
4828 	};
4829 
4830 	if (iwm_send_cmd(sc, &cmd) != 0) {
4831 		device_printf(sc->sc_dev,
4832 		    "failed to change thermal tx backoff\n");
4833 	}
4834 }
4835 
4836 static int
4837 iwm_init_hw(struct iwm_softc *sc)
4838 {
4839 	struct ieee80211com *ic = &sc->sc_ic;
4840 	int error, i, ac;
4841 
4842 	if ((error = iwm_start_hw(sc)) != 0) {
4843 		printf("iwm_start_hw: failed %d\n", error);
4844 		return error;
4845 	}
4846 
4847 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4848 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4849 		return error;
4850 	}
4851 
4852 	/*
4853 	 * should stop and start HW since that INIT
4854 	 * image just loaded
4855 	 */
4856 	iwm_stop_device(sc);
4857 	if ((error = iwm_start_hw(sc)) != 0) {
4858 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4859 		return error;
4860 	}
4861 
4862 	/* omstart, this time with the regular firmware */
4863 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4864 	if (error) {
4865 		device_printf(sc->sc_dev, "could not load firmware\n");
4866 		goto error;
4867 	}
4868 
4869 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4870 		device_printf(sc->sc_dev, "bt init conf failed\n");
4871 		goto error;
4872 	}
4873 
4874 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4875 	if (error != 0) {
4876 		device_printf(sc->sc_dev, "antenna config failed\n");
4877 		goto error;
4878 	}
4879 
4880 	/* Send phy db control command and then phy db calibration */
4881 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4882 		goto error;
4883 
4884 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4885 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4886 		goto error;
4887 	}
4888 
4889 	/* Add auxiliary station for scanning */
4890 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4891 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4892 		goto error;
4893 	}
4894 
4895 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4896 		/*
4897 		 * The channel used here isn't relevant as it's
4898 		 * going to be overwritten in the other flows.
4899 		 * For now use the first channel we have.
4900 		 */
4901 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4902 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4903 			goto error;
4904 	}
4905 
4906 	/* Initialize tx backoffs to the minimum. */
4907 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4908 		iwm_mvm_tt_tx_backoff(sc, 0);
4909 
4910 	error = iwm_mvm_power_update_device(sc);
4911 	if (error)
4912 		goto error;
4913 
4914 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4915 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4916 			goto error;
4917 	}
4918 
4919 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4920 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4921 			goto error;
4922 	}
4923 
4924 	/* Enable Tx queues. */
4925 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4926 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4927 		    iwm_mvm_ac_to_tx_fifo[ac]);
4928 		if (error)
4929 			goto error;
4930 	}
4931 
4932 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4933 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4934 		goto error;
4935 	}
4936 
4937 	return 0;
4938 
4939  error:
4940 	iwm_stop_device(sc);
4941 	return error;
4942 }
4943 
4944 /* Allow multicast from our BSSID. */
4945 static int
4946 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4947 {
4948 	struct ieee80211_node *ni = vap->iv_bss;
4949 	struct iwm_mcast_filter_cmd *cmd;
4950 	size_t size;
4951 	int error;
4952 
4953 	size = roundup(sizeof(*cmd), 4);
4954 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4955 	if (cmd == NULL)
4956 		return ENOMEM;
4957 	cmd->filter_own = 1;
4958 	cmd->port_id = 0;
4959 	cmd->count = 0;
4960 	cmd->pass_all = 1;
4961 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4962 
4963 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4964 	    IWM_CMD_SYNC, size, cmd);
4965 	free(cmd, M_DEVBUF);
4966 
4967 	return (error);
4968 }
4969 
4970 /*
4971  * ifnet interfaces
4972  */
4973 
4974 static void
4975 iwm_init(struct iwm_softc *sc)
4976 {
4977 	int error;
4978 
4979 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4980 		return;
4981 	}
4982 	sc->sc_generation++;
4983 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4984 
4985 	if ((error = iwm_init_hw(sc)) != 0) {
4986 		printf("iwm_init_hw failed %d\n", error);
4987 		iwm_stop(sc);
4988 		return;
4989 	}
4990 
4991 	/*
4992 	 * Ok, firmware loaded and we are jogging
4993 	 */
4994 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4995 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4996 }
4997 
4998 static int
4999 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5000 {
5001 	struct iwm_softc *sc;
5002 	int error;
5003 
5004 	sc = ic->ic_softc;
5005 
5006 	IWM_LOCK(sc);
5007 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5008 		IWM_UNLOCK(sc);
5009 		return (ENXIO);
5010 	}
5011 	error = mbufq_enqueue(&sc->sc_snd, m);
5012 	if (error) {
5013 		IWM_UNLOCK(sc);
5014 		return (error);
5015 	}
5016 	iwm_start(sc);
5017 	IWM_UNLOCK(sc);
5018 	return (0);
5019 }
5020 
5021 /*
5022  * Dequeue packets from sendq and call send.
5023  */
5024 static void
5025 iwm_start(struct iwm_softc *sc)
5026 {
5027 	struct ieee80211_node *ni;
5028 	struct mbuf *m;
5029 	int ac = 0;
5030 
5031 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5032 	while (sc->qfullmsk == 0 &&
5033 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5034 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5035 		if (iwm_tx(sc, m, ni, ac) != 0) {
5036 			if_inc_counter(ni->ni_vap->iv_ifp,
5037 			    IFCOUNTER_OERRORS, 1);
5038 			ieee80211_free_node(ni);
5039 			continue;
5040 		}
5041 		sc->sc_tx_timer = 15;
5042 	}
5043 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5044 }
5045 
5046 static void
5047 iwm_stop(struct iwm_softc *sc)
5048 {
5049 
5050 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5051 	sc->sc_flags |= IWM_FLAG_STOPPED;
5052 	sc->sc_generation++;
5053 	iwm_led_blink_stop(sc);
5054 	sc->sc_tx_timer = 0;
5055 	iwm_stop_device(sc);
5056 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5057 }
5058 
5059 static void
5060 iwm_watchdog(void *arg)
5061 {
5062 	struct iwm_softc *sc = arg;
5063 	struct ieee80211com *ic = &sc->sc_ic;
5064 
5065 	if (sc->sc_tx_timer > 0) {
5066 		if (--sc->sc_tx_timer == 0) {
5067 			device_printf(sc->sc_dev, "device timeout\n");
5068 #ifdef IWM_DEBUG
5069 			iwm_nic_error(sc);
5070 #endif
5071 			ieee80211_restart_all(ic);
5072 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5073 			return;
5074 		}
5075 	}
5076 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5077 }
5078 
5079 static void
5080 iwm_parent(struct ieee80211com *ic)
5081 {
5082 	struct iwm_softc *sc = ic->ic_softc;
5083 	int startall = 0;
5084 
5085 	IWM_LOCK(sc);
5086 	if (ic->ic_nrunning > 0) {
5087 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5088 			iwm_init(sc);
5089 			startall = 1;
5090 		}
5091 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5092 		iwm_stop(sc);
5093 	IWM_UNLOCK(sc);
5094 	if (startall)
5095 		ieee80211_start_all(ic);
5096 }
5097 
5098 /*
5099  * The interrupt side of things
5100  */
5101 
5102 /*
5103  * error dumping routines are from iwlwifi/mvm/utils.c
5104  */
5105 
5106 /*
5107  * Note: This structure is read from the device with IO accesses,
5108  * and the reading already does the endian conversion. As it is
5109  * read with uint32_t-sized accesses, any members with a different size
5110  * need to be ordered correctly though!
5111  */
5112 struct iwm_error_event_table {
5113 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5114 	uint32_t error_id;		/* type of error */
5115 	uint32_t trm_hw_status0;	/* TRM HW status */
5116 	uint32_t trm_hw_status1;	/* TRM HW status */
5117 	uint32_t blink2;		/* branch link */
5118 	uint32_t ilink1;		/* interrupt link */
5119 	uint32_t ilink2;		/* interrupt link */
5120 	uint32_t data1;		/* error-specific data */
5121 	uint32_t data2;		/* error-specific data */
5122 	uint32_t data3;		/* error-specific data */
5123 	uint32_t bcon_time;		/* beacon timer */
5124 	uint32_t tsf_low;		/* network timestamp function timer */
5125 	uint32_t tsf_hi;		/* network timestamp function timer */
5126 	uint32_t gp1;		/* GP1 timer register */
5127 	uint32_t gp2;		/* GP2 timer register */
5128 	uint32_t fw_rev_type;	/* firmware revision type */
5129 	uint32_t major;		/* uCode version major */
5130 	uint32_t minor;		/* uCode version minor */
5131 	uint32_t hw_ver;		/* HW Silicon version */
5132 	uint32_t brd_ver;		/* HW board version */
5133 	uint32_t log_pc;		/* log program counter */
5134 	uint32_t frame_ptr;		/* frame pointer */
5135 	uint32_t stack_ptr;		/* stack pointer */
5136 	uint32_t hcmd;		/* last host command header */
5137 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5138 				 * rxtx_flag */
5139 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5140 				 * host_flag */
5141 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5142 				 * enc_flag */
5143 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5144 				 * time_flag */
5145 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5146 				 * wico interrupt */
5147 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5148 	uint32_t wait_event;		/* wait event() caller address */
5149 	uint32_t l2p_control;	/* L2pControlField */
5150 	uint32_t l2p_duration;	/* L2pDurationField */
5151 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5152 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5153 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5154 				 * (LMPM_PMG_SEL) */
5155 	uint32_t u_timestamp;	/* indicate when the date and time of the
5156 				 * compilation */
5157 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5158 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5159 
5160 /*
5161  * UMAC error struct - relevant starting from family 8000 chip.
5162  * Note: This structure is read from the device with IO accesses,
5163  * and the reading already does the endian conversion. As it is
5164  * read with u32-sized accesses, any members with a different size
5165  * need to be ordered correctly though!
5166  */
5167 struct iwm_umac_error_event_table {
5168 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5169 	uint32_t error_id;	/* type of error */
5170 	uint32_t blink1;	/* branch link */
5171 	uint32_t blink2;	/* branch link */
5172 	uint32_t ilink1;	/* interrupt link */
5173 	uint32_t ilink2;	/* interrupt link */
5174 	uint32_t data1;		/* error-specific data */
5175 	uint32_t data2;		/* error-specific data */
5176 	uint32_t data3;		/* error-specific data */
5177 	uint32_t umac_major;
5178 	uint32_t umac_minor;
5179 	uint32_t frame_pointer;	/* core register 27*/
5180 	uint32_t stack_pointer;	/* core register 28 */
5181 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5182 	uint32_t nic_isr_pref;	/* ISR status register */
5183 } __packed;
5184 
5185 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5186 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5187 
5188 #ifdef IWM_DEBUG
5189 struct {
5190 	const char *name;
5191 	uint8_t num;
5192 } advanced_lookup[] = {
5193 	{ "NMI_INTERRUPT_WDG", 0x34 },
5194 	{ "SYSASSERT", 0x35 },
5195 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5196 	{ "BAD_COMMAND", 0x38 },
5197 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5198 	{ "FATAL_ERROR", 0x3D },
5199 	{ "NMI_TRM_HW_ERR", 0x46 },
5200 	{ "NMI_INTERRUPT_TRM", 0x4C },
5201 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5202 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5203 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5204 	{ "NMI_INTERRUPT_HOST", 0x66 },
5205 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5206 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5207 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5208 	{ "ADVANCED_SYSASSERT", 0 },
5209 };
5210 
5211 static const char *
5212 iwm_desc_lookup(uint32_t num)
5213 {
5214 	int i;
5215 
5216 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5217 		if (advanced_lookup[i].num == num)
5218 			return advanced_lookup[i].name;
5219 
5220 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5221 	return advanced_lookup[i].name;
5222 }
5223 
5224 static void
5225 iwm_nic_umac_error(struct iwm_softc *sc)
5226 {
5227 	struct iwm_umac_error_event_table table;
5228 	uint32_t base;
5229 
5230 	base = sc->umac_error_event_table;
5231 
5232 	if (base < 0x800000) {
5233 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5234 		    base);
5235 		return;
5236 	}
5237 
5238 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5239 		device_printf(sc->sc_dev, "reading errlog failed\n");
5240 		return;
5241 	}
5242 
5243 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5244 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5245 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5246 		    sc->sc_flags, table.valid);
5247 	}
5248 
5249 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5250 		iwm_desc_lookup(table.error_id));
5251 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5252 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5253 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5254 	    table.ilink1);
5255 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5256 	    table.ilink2);
5257 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5258 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5259 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5260 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5261 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5262 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5263 	    table.frame_pointer);
5264 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5265 	    table.stack_pointer);
5266 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5267 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5268 	    table.nic_isr_pref);
5269 }
5270 
5271 /*
5272  * Support for dumping the error log seemed like a good idea ...
5273  * but it's mostly hex junk and the only sensible thing is the
5274  * hw/ucode revision (which we know anyway).  Since it's here,
5275  * I'll just leave it in, just in case e.g. the Intel guys want to
5276  * help us decipher some "ADVANCED_SYSASSERT" later.
5277  */
5278 static void
5279 iwm_nic_error(struct iwm_softc *sc)
5280 {
5281 	struct iwm_error_event_table table;
5282 	uint32_t base;
5283 
5284 	device_printf(sc->sc_dev, "dumping device error log\n");
5285 	base = sc->error_event_table;
5286 	if (base < 0x800000) {
5287 		device_printf(sc->sc_dev,
5288 		    "Invalid error log pointer 0x%08x\n", base);
5289 		return;
5290 	}
5291 
5292 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5293 		device_printf(sc->sc_dev, "reading errlog failed\n");
5294 		return;
5295 	}
5296 
5297 	if (!table.valid) {
5298 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5299 		return;
5300 	}
5301 
5302 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5303 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5304 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5305 		    sc->sc_flags, table.valid);
5306 	}
5307 
5308 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5309 	    iwm_desc_lookup(table.error_id));
5310 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5311 	    table.trm_hw_status0);
5312 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5313 	    table.trm_hw_status1);
5314 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5315 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5316 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5317 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5318 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5319 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5320 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5321 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5322 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5323 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5324 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5325 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5326 	    table.fw_rev_type);
5327 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5328 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5329 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5330 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5331 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5332 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5333 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5334 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5335 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5336 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5337 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5338 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5339 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5340 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5341 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5342 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5343 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5344 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5345 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5346 
5347 	if (sc->umac_error_event_table)
5348 		iwm_nic_umac_error(sc);
5349 }
5350 #endif
5351 
5352 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5353 
5354 /*
5355  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5356  * Basic structure from if_iwn
5357  */
5358 static void
5359 iwm_notif_intr(struct iwm_softc *sc)
5360 {
5361 	struct ieee80211com *ic = &sc->sc_ic;
5362 	uint16_t hw;
5363 
5364 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5365 	    BUS_DMASYNC_POSTREAD);
5366 
5367 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5368 
5369 	/*
5370 	 * Process responses
5371 	 */
5372 	while (sc->rxq.cur != hw) {
5373 		struct iwm_rx_ring *ring = &sc->rxq;
5374 		struct iwm_rx_data *data = &ring->data[ring->cur];
5375 		struct iwm_rx_packet *pkt;
5376 		struct iwm_cmd_response *cresp;
5377 		int qid, idx, code;
5378 
5379 		bus_dmamap_sync(ring->data_dmat, data->map,
5380 		    BUS_DMASYNC_POSTREAD);
5381 		pkt = mtod(data->m, struct iwm_rx_packet *);
5382 
5383 		qid = pkt->hdr.qid & ~0x80;
5384 		idx = pkt->hdr.idx;
5385 
5386 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5387 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5388 		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5389 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5390 
5391 		/*
5392 		 * randomly get these from the firmware, no idea why.
5393 		 * they at least seem harmless, so just ignore them for now
5394 		 */
5395 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5396 		    || pkt->len_n_flags == htole32(0x55550000))) {
5397 			ADVANCE_RXQ(sc);
5398 			continue;
5399 		}
5400 
5401 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5402 
5403 		switch (code) {
5404 		case IWM_REPLY_RX_PHY_CMD:
5405 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5406 			break;
5407 
5408 		case IWM_REPLY_RX_MPDU_CMD:
5409 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5410 			break;
5411 
5412 		case IWM_TX_CMD:
5413 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5414 			break;
5415 
5416 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5417 			struct iwm_missed_beacons_notif *resp;
5418 			int missed;
5419 
5420 			/* XXX look at mac_id to determine interface ID */
5421 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5422 
5423 			resp = (void *)pkt->data;
5424 			missed = le32toh(resp->consec_missed_beacons);
5425 
5426 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5427 			    "%s: MISSED_BEACON: mac_id=%d, "
5428 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5429 			    "num_rx=%d\n",
5430 			    __func__,
5431 			    le32toh(resp->mac_id),
5432 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5433 			    le32toh(resp->consec_missed_beacons),
5434 			    le32toh(resp->num_expected_beacons),
5435 			    le32toh(resp->num_recvd_beacons));
5436 
5437 			/* Be paranoid */
5438 			if (vap == NULL)
5439 				break;
5440 
5441 			/* XXX no net80211 locking? */
5442 			if (vap->iv_state == IEEE80211_S_RUN &&
5443 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5444 				if (missed > vap->iv_bmissthreshold) {
5445 					/* XXX bad locking; turn into task */
5446 					IWM_UNLOCK(sc);
5447 					ieee80211_beacon_miss(ic);
5448 					IWM_LOCK(sc);
5449 				}
5450 			}
5451 
5452 			break; }
5453 
5454 		case IWM_MFUART_LOAD_NOTIFICATION:
5455 			break;
5456 
5457 		case IWM_MVM_ALIVE:
5458 			break;
5459 
5460 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5461 			break;
5462 
5463 		case IWM_STATISTICS_NOTIFICATION: {
5464 			struct iwm_notif_statistics *stats;
5465 			stats = (void *)pkt->data;
5466 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5467 			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5468 			break; }
5469 
5470 		case IWM_NVM_ACCESS_CMD:
5471 		case IWM_MCC_UPDATE_CMD:
5472 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5473 				memcpy(sc->sc_cmd_resp,
5474 				    pkt, sizeof(sc->sc_cmd_resp));
5475 			}
5476 			break;
5477 
5478 		case IWM_MCC_CHUB_UPDATE_CMD: {
5479 			struct iwm_mcc_chub_notif *notif;
5480 			notif = (void *)pkt->data;
5481 
5482 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5483 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5484 			sc->sc_fw_mcc[2] = '\0';
5485 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5486 			    "fw source %d sent CC '%s'\n",
5487 			    notif->source_id, sc->sc_fw_mcc);
5488 			break; }
5489 
5490 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5491 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5492 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5493 			struct iwm_dts_measurement_notif_v1 *notif;
5494 
5495 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5496 				device_printf(sc->sc_dev,
5497 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5498 				break;
5499 			}
5500 			notif = (void *)pkt->data;
5501 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5502 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5503 			    notif->temp);
5504 			break;
5505 		}
5506 
5507 		case IWM_PHY_CONFIGURATION_CMD:
5508 		case IWM_TX_ANT_CONFIGURATION_CMD:
5509 		case IWM_ADD_STA:
5510 		case IWM_MAC_CONTEXT_CMD:
5511 		case IWM_REPLY_SF_CFG_CMD:
5512 		case IWM_POWER_TABLE_CMD:
5513 		case IWM_PHY_CONTEXT_CMD:
5514 		case IWM_BINDING_CONTEXT_CMD:
5515 		case IWM_TIME_EVENT_CMD:
5516 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5517 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5518 		case IWM_SCAN_ABORT_UMAC:
5519 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5520 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5521 		case IWM_REPLY_BEACON_FILTERING_CMD:
5522 		case IWM_MAC_PM_POWER_TABLE:
5523 		case IWM_TIME_QUOTA_CMD:
5524 		case IWM_REMOVE_STA:
5525 		case IWM_TXPATH_FLUSH:
5526 		case IWM_LQ_CMD:
5527 		case IWM_FW_PAGING_BLOCK_CMD:
5528 		case IWM_BT_CONFIG:
5529 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5530 			cresp = (void *)pkt->data;
5531 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5532 				memcpy(sc->sc_cmd_resp,
5533 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5534 			}
5535 			break;
5536 
5537 		/* ignore */
5538 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5539 			break;
5540 
5541 		case IWM_INIT_COMPLETE_NOTIF:
5542 			break;
5543 
5544 		case IWM_SCAN_OFFLOAD_COMPLETE: {
5545 			struct iwm_periodic_scan_complete *notif;
5546 			notif = (void *)pkt->data;
5547 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5548 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5549 				ieee80211_runtask(ic, &sc->sc_es_task);
5550 			}
5551 			break;
5552 		}
5553 
5554 		case IWM_SCAN_ITERATION_COMPLETE: {
5555 			struct iwm_lmac_scan_complete_notif *notif;
5556 			notif = (void *)pkt->data;
5557 			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5558  			break;
5559 		}
5560 
5561 		case IWM_SCAN_COMPLETE_UMAC: {
5562 			struct iwm_umac_scan_complete *notif;
5563 			notif = (void *)pkt->data;
5564 
5565 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5566 			    "UMAC scan complete, status=0x%x\n",
5567 			    notif->status);
5568 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5569 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5570 				ieee80211_runtask(ic, &sc->sc_es_task);
5571 			}
5572 			break;
5573 		}
5574 
5575 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5576 			struct iwm_umac_scan_iter_complete_notif *notif;
5577 			notif = (void *)pkt->data;
5578 
5579 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5580 			    "complete, status=0x%x, %d channels scanned\n",
5581 			    notif->status, notif->scanned_channels);
5582 			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5583 			break;
5584 		}
5585 
5586 		case IWM_REPLY_ERROR: {
5587 			struct iwm_error_resp *resp;
5588 			resp = (void *)pkt->data;
5589 
5590 			device_printf(sc->sc_dev,
5591 			    "firmware error 0x%x, cmd 0x%x\n",
5592 			    le32toh(resp->error_type),
5593 			    resp->cmd_id);
5594 			break;
5595 		}
5596 
5597 		case IWM_TIME_EVENT_NOTIFICATION: {
5598 			struct iwm_time_event_notif *notif;
5599 			notif = (void *)pkt->data;
5600 
5601 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5602 			    "TE notif status = 0x%x action = 0x%x\n",
5603 			    notif->status, notif->action);
5604 			break;
5605 		}
5606 
5607 		case IWM_MCAST_FILTER_CMD:
5608 			break;
5609 
5610 		case IWM_SCD_QUEUE_CFG: {
5611 			struct iwm_scd_txq_cfg_rsp *rsp;
5612 			rsp = (void *)pkt->data;
5613 
5614 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5615 			    "queue cfg token=0x%x sta_id=%d "
5616 			    "tid=%d scd_queue=%d\n",
5617 			    rsp->token, rsp->sta_id, rsp->tid,
5618 			    rsp->scd_queue);
5619 			break;
5620 		}
5621 
5622 		default:
5623 			device_printf(sc->sc_dev,
5624 			    "frame %d/%d %x UNHANDLED (this should "
5625 			    "not happen)\n", qid, idx,
5626 			    pkt->len_n_flags);
5627 			break;
5628 		}
5629 
5630 		/*
5631 		 * Why test bit 0x80?  The Linux driver:
5632 		 *
5633 		 * There is one exception:  uCode sets bit 15 when it
5634 		 * originates the response/notification, i.e. when the
5635 		 * response/notification is not a direct response to a
5636 		 * command sent by the driver.  For example, uCode issues
5637 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5638 		 * it is not a direct response to any driver command.
5639 		 *
5640 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5641 		 * uses a slightly different format for pkt->hdr, and "qid"
5642 		 * is actually the upper byte of a two-byte field.
5643 		 */
5644 		if (!(pkt->hdr.qid & (1 << 7))) {
5645 			iwm_cmd_done(sc, pkt);
5646 		}
5647 
5648 		ADVANCE_RXQ(sc);
5649 	}
5650 
5651 	/*
5652 	 * Tell the firmware what we have processed.
5653 	 * Seems like the hardware gets upset unless we align
5654 	 * the write by 8??
5655 	 */
5656 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5657 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5658 }
5659 
5660 static void
5661 iwm_intr(void *arg)
5662 {
5663 	struct iwm_softc *sc = arg;
5664 	int handled = 0;
5665 	int r1, r2, rv = 0;
5666 	int isperiodic = 0;
5667 
5668 	IWM_LOCK(sc);
5669 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5670 
5671 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5672 		uint32_t *ict = sc->ict_dma.vaddr;
5673 		int tmp;
5674 
5675 		tmp = htole32(ict[sc->ict_cur]);
5676 		if (!tmp)
5677 			goto out_ena;
5678 
5679 		/*
5680 		 * ok, there was something.  keep plowing until we have all.
5681 		 */
5682 		r1 = r2 = 0;
5683 		while (tmp) {
5684 			r1 |= tmp;
5685 			ict[sc->ict_cur] = 0;
5686 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5687 			tmp = htole32(ict[sc->ict_cur]);
5688 		}
5689 
5690 		/* this is where the fun begins.  don't ask */
5691 		if (r1 == 0xffffffff)
5692 			r1 = 0;
5693 
5694 		/* i am not expected to understand this */
5695 		if (r1 & 0xc0000)
5696 			r1 |= 0x8000;
5697 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5698 	} else {
5699 		r1 = IWM_READ(sc, IWM_CSR_INT);
5700 		/* "hardware gone" (where, fishing?) */
5701 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5702 			goto out;
5703 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5704 	}
5705 	if (r1 == 0 && r2 == 0) {
5706 		goto out_ena;
5707 	}
5708 
5709 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5710 
5711 	/* Safely ignore these bits for debug checks below */
5712 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5713 
5714 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5715 		int i;
5716 		struct ieee80211com *ic = &sc->sc_ic;
5717 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5718 
5719 #ifdef IWM_DEBUG
5720 		iwm_nic_error(sc);
5721 #endif
5722 		/* Dump driver status (TX and RX rings) while we're here. */
5723 		device_printf(sc->sc_dev, "driver status:\n");
5724 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5725 			struct iwm_tx_ring *ring = &sc->txq[i];
5726 			device_printf(sc->sc_dev,
5727 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5728 			    "queued=%-3d\n",
5729 			    i, ring->qid, ring->cur, ring->queued);
5730 		}
5731 		device_printf(sc->sc_dev,
5732 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5733 		device_printf(sc->sc_dev,
5734 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5735 
5736 		/* Don't stop the device; just do a VAP restart */
5737 		IWM_UNLOCK(sc);
5738 
5739 		if (vap == NULL) {
5740 			printf("%s: null vap\n", __func__);
5741 			return;
5742 		}
5743 
5744 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5745 		    "restarting\n", __func__, vap->iv_state);
5746 
5747 		/* XXX TODO: turn this into a callout/taskqueue */
5748 		ieee80211_restart_all(ic);
5749 		return;
5750 	}
5751 
5752 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5753 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5754 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5755 		iwm_stop(sc);
5756 		rv = 1;
5757 		goto out;
5758 	}
5759 
5760 	/* firmware chunk loaded */
5761 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5762 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5763 		handled |= IWM_CSR_INT_BIT_FH_TX;
5764 		sc->sc_fw_chunk_done = 1;
5765 		wakeup(&sc->sc_fw);
5766 	}
5767 
5768 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5769 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5770 		if (iwm_check_rfkill(sc)) {
5771 			device_printf(sc->sc_dev,
5772 			    "%s: rfkill switch, disabling interface\n",
5773 			    __func__);
5774 			iwm_stop(sc);
5775 		}
5776 	}
5777 
5778 	/*
5779 	 * The Linux driver uses periodic interrupts to avoid races.
5780 	 * We cargo-cult like it's going out of fashion.
5781 	 */
5782 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5783 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5784 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5785 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5786 			IWM_WRITE_1(sc,
5787 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5788 		isperiodic = 1;
5789 	}
5790 
5791 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5792 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5793 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5794 
5795 		iwm_notif_intr(sc);
5796 
5797 		/* enable periodic interrupt, see above */
5798 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5799 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5800 			    IWM_CSR_INT_PERIODIC_ENA);
5801 	}
5802 
5803 	if (__predict_false(r1 & ~handled))
5804 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5805 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5806 	rv = 1;
5807 
5808  out_ena:
5809 	iwm_restore_interrupts(sc);
5810  out:
5811 	IWM_UNLOCK(sc);
5812 	return;
5813 }
5814 
5815 /*
5816  * Autoconf glue-sniffing
5817  */
5818 #define	PCI_VENDOR_INTEL		0x8086
5819 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5820 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5821 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5822 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5823 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5824 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5825 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5826 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5827 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5828 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5829 
5830 static const struct iwm_devices {
5831 	uint16_t		device;
5832 	const struct iwm_cfg	*cfg;
5833 } iwm_devices[] = {
5834 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5835 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5836 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5837 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5838 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5839 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5840 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5841 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5842 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5843 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5844 };
5845 
5846 static int
5847 iwm_probe(device_t dev)
5848 {
5849 	int i;
5850 
5851 	for (i = 0; i < nitems(iwm_devices); i++) {
5852 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5853 		    pci_get_device(dev) == iwm_devices[i].device) {
5854 			device_set_desc(dev, iwm_devices[i].cfg->name);
5855 			return (BUS_PROBE_DEFAULT);
5856 		}
5857 	}
5858 
5859 	return (ENXIO);
5860 }
5861 
5862 static int
5863 iwm_dev_check(device_t dev)
5864 {
5865 	struct iwm_softc *sc;
5866 	uint16_t devid;
5867 	int i;
5868 
5869 	sc = device_get_softc(dev);
5870 
5871 	devid = pci_get_device(dev);
5872 	for (i = 0; i < nitems(iwm_devices); i++) {
5873 		if (iwm_devices[i].device == devid) {
5874 			sc->cfg = iwm_devices[i].cfg;
5875 			return (0);
5876 		}
5877 	}
5878 	device_printf(dev, "unknown adapter type\n");
5879 	return ENXIO;
5880 }
5881 
5882 /* PCI registers */
5883 #define PCI_CFG_RETRY_TIMEOUT	0x041
5884 
5885 static int
5886 iwm_pci_attach(device_t dev)
5887 {
5888 	struct iwm_softc *sc;
5889 	int count, error, rid;
5890 	uint16_t reg;
5891 
5892 	sc = device_get_softc(dev);
5893 
5894 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5895 	 * PCI Tx retries from interfering with C3 CPU state */
5896 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5897 
5898 	/* Enable bus-mastering and hardware bug workaround. */
5899 	pci_enable_busmaster(dev);
5900 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5901 	/* if !MSI */
5902 	if (reg & PCIM_STATUS_INTxSTATE) {
5903 		reg &= ~PCIM_STATUS_INTxSTATE;
5904 	}
5905 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5906 
5907 	rid = PCIR_BAR(0);
5908 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5909 	    RF_ACTIVE);
5910 	if (sc->sc_mem == NULL) {
5911 		device_printf(sc->sc_dev, "can't map mem space\n");
5912 		return (ENXIO);
5913 	}
5914 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5915 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5916 
5917 	/* Install interrupt handler. */
5918 	count = 1;
5919 	rid = 0;
5920 	if (pci_alloc_msi(dev, &count) == 0)
5921 		rid = 1;
5922 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5923 	    (rid != 0 ? 0 : RF_SHAREABLE));
5924 	if (sc->sc_irq == NULL) {
5925 		device_printf(dev, "can't map interrupt\n");
5926 			return (ENXIO);
5927 	}
5928 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5929 	    NULL, iwm_intr, sc, &sc->sc_ih);
5930 	if (sc->sc_ih == NULL) {
5931 		device_printf(dev, "can't establish interrupt");
5932 			return (ENXIO);
5933 	}
5934 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5935 
5936 	return (0);
5937 }
5938 
5939 static void
5940 iwm_pci_detach(device_t dev)
5941 {
5942 	struct iwm_softc *sc = device_get_softc(dev);
5943 
5944 	if (sc->sc_irq != NULL) {
5945 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5946 		bus_release_resource(dev, SYS_RES_IRQ,
5947 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5948 		pci_release_msi(dev);
5949         }
5950 	if (sc->sc_mem != NULL)
5951 		bus_release_resource(dev, SYS_RES_MEMORY,
5952 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5953 }
5954 
5955 
5956 
5957 static int
5958 iwm_attach(device_t dev)
5959 {
5960 	struct iwm_softc *sc = device_get_softc(dev);
5961 	struct ieee80211com *ic = &sc->sc_ic;
5962 	int error;
5963 	int txq_i, i;
5964 
5965 	sc->sc_dev = dev;
5966 	sc->sc_attached = 1;
5967 	IWM_LOCK_INIT(sc);
5968 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5969 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5970 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5971 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5972 
5973 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5974 	if (sc->sc_notif_wait == NULL) {
5975 		device_printf(dev, "failed to init notification wait struct\n");
5976 		goto fail;
5977 	}
5978 
5979 	/* Init phy db */
5980 	sc->sc_phy_db = iwm_phy_db_init(sc);
5981 	if (!sc->sc_phy_db) {
5982 		device_printf(dev, "Cannot init phy_db\n");
5983 		goto fail;
5984 	}
5985 
5986 	/* PCI attach */
5987 	error = iwm_pci_attach(dev);
5988 	if (error != 0)
5989 		goto fail;
5990 
5991 	sc->sc_wantresp = -1;
5992 
5993 	/* Check device type */
5994 	error = iwm_dev_check(dev);
5995 	if (error != 0)
5996 		goto fail;
5997 
5998 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5999 	/*
6000 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6001 	 * changed, and now the revision step also includes bit 0-1 (no more
6002 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6003 	 * in the old format.
6004 	 */
6005 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6006 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6007 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6008 
6009 	if (iwm_prepare_card_hw(sc) != 0) {
6010 		device_printf(dev, "could not initialize hardware\n");
6011 		goto fail;
6012 	}
6013 
6014 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6015 		int ret;
6016 		uint32_t hw_step;
6017 
6018 		/*
6019 		 * In order to recognize C step the driver should read the
6020 		 * chip version id located at the AUX bus MISC address.
6021 		 */
6022 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6023 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6024 		DELAY(2);
6025 
6026 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6027 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6028 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6029 				   25000);
6030 		if (!ret) {
6031 			device_printf(sc->sc_dev,
6032 			    "Failed to wake up the nic\n");
6033 			goto fail;
6034 		}
6035 
6036 		if (iwm_nic_lock(sc)) {
6037 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6038 			hw_step |= IWM_ENABLE_WFPM;
6039 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6040 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6041 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6042 			if (hw_step == 0x3)
6043 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6044 						(IWM_SILICON_C_STEP << 2);
6045 			iwm_nic_unlock(sc);
6046 		} else {
6047 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6048 			goto fail;
6049 		}
6050 	}
6051 
6052 	/* special-case 7265D, it has the same PCI IDs. */
6053 	if (sc->cfg == &iwm7265_cfg &&
6054 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6055 		sc->cfg = &iwm7265d_cfg;
6056 	}
6057 
6058 	/* Allocate DMA memory for firmware transfers. */
6059 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6060 		device_printf(dev, "could not allocate memory for firmware\n");
6061 		goto fail;
6062 	}
6063 
6064 	/* Allocate "Keep Warm" page. */
6065 	if ((error = iwm_alloc_kw(sc)) != 0) {
6066 		device_printf(dev, "could not allocate keep warm page\n");
6067 		goto fail;
6068 	}
6069 
6070 	/* We use ICT interrupts */
6071 	if ((error = iwm_alloc_ict(sc)) != 0) {
6072 		device_printf(dev, "could not allocate ICT table\n");
6073 		goto fail;
6074 	}
6075 
6076 	/* Allocate TX scheduler "rings". */
6077 	if ((error = iwm_alloc_sched(sc)) != 0) {
6078 		device_printf(dev, "could not allocate TX scheduler rings\n");
6079 		goto fail;
6080 	}
6081 
6082 	/* Allocate TX rings */
6083 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6084 		if ((error = iwm_alloc_tx_ring(sc,
6085 		    &sc->txq[txq_i], txq_i)) != 0) {
6086 			device_printf(dev,
6087 			    "could not allocate TX ring %d\n",
6088 			    txq_i);
6089 			goto fail;
6090 		}
6091 	}
6092 
6093 	/* Allocate RX ring. */
6094 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6095 		device_printf(dev, "could not allocate RX ring\n");
6096 		goto fail;
6097 	}
6098 
6099 	/* Clear pending interrupts. */
6100 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6101 
6102 	ic->ic_softc = sc;
6103 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6104 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6105 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6106 
6107 	/* Set device capabilities. */
6108 	ic->ic_caps =
6109 	    IEEE80211_C_STA |
6110 	    IEEE80211_C_WPA |		/* WPA/RSN */
6111 	    IEEE80211_C_WME |
6112 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6113 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6114 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6115 	    ;
6116 	/* Advertise full-offload scanning */
6117 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6118 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6119 		sc->sc_phyctxt[i].id = i;
6120 		sc->sc_phyctxt[i].color = 0;
6121 		sc->sc_phyctxt[i].ref = 0;
6122 		sc->sc_phyctxt[i].channel = NULL;
6123 	}
6124 
6125 	/* Default noise floor */
6126 	sc->sc_noise = -96;
6127 
6128 	/* Max RSSI */
6129 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6130 
6131 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6132 	sc->sc_preinit_hook.ich_arg = sc;
6133 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6134 		device_printf(dev, "config_intrhook_establish failed\n");
6135 		goto fail;
6136 	}
6137 
6138 #ifdef IWM_DEBUG
6139 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6140 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6141 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6142 #endif
6143 
6144 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6145 	    "<-%s\n", __func__);
6146 
6147 	return 0;
6148 
6149 	/* Free allocated memory if something failed during attachment. */
6150 fail:
6151 	iwm_detach_local(sc, 0);
6152 
6153 	return ENXIO;
6154 }
6155 
6156 static int
6157 iwm_is_valid_ether_addr(uint8_t *addr)
6158 {
6159 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6160 
6161 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6162 		return (FALSE);
6163 
6164 	return (TRUE);
6165 }
6166 
6167 static int
6168 iwm_update_edca(struct ieee80211com *ic)
6169 {
6170 	struct iwm_softc *sc = ic->ic_softc;
6171 
6172 	device_printf(sc->sc_dev, "%s: called\n", __func__);
6173 	return (0);
6174 }
6175 
6176 static void
6177 iwm_preinit(void *arg)
6178 {
6179 	struct iwm_softc *sc = arg;
6180 	device_t dev = sc->sc_dev;
6181 	struct ieee80211com *ic = &sc->sc_ic;
6182 	int error;
6183 
6184 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6185 	    "->%s\n", __func__);
6186 
6187 	IWM_LOCK(sc);
6188 	if ((error = iwm_start_hw(sc)) != 0) {
6189 		device_printf(dev, "could not initialize hardware\n");
6190 		IWM_UNLOCK(sc);
6191 		goto fail;
6192 	}
6193 
6194 	error = iwm_run_init_mvm_ucode(sc, 1);
6195 	iwm_stop_device(sc);
6196 	if (error) {
6197 		IWM_UNLOCK(sc);
6198 		goto fail;
6199 	}
6200 	device_printf(dev,
6201 	    "hw rev 0x%x, fw ver %s, address %s\n",
6202 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6203 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6204 
6205 	/* not all hardware can do 5GHz band */
6206 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6207 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6208 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6209 	IWM_UNLOCK(sc);
6210 
6211 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6212 	    ic->ic_channels);
6213 
6214 	/*
6215 	 * At this point we've committed - if we fail to do setup,
6216 	 * we now also have to tear down the net80211 state.
6217 	 */
6218 	ieee80211_ifattach(ic);
6219 	ic->ic_vap_create = iwm_vap_create;
6220 	ic->ic_vap_delete = iwm_vap_delete;
6221 	ic->ic_raw_xmit = iwm_raw_xmit;
6222 	ic->ic_node_alloc = iwm_node_alloc;
6223 	ic->ic_scan_start = iwm_scan_start;
6224 	ic->ic_scan_end = iwm_scan_end;
6225 	ic->ic_update_mcast = iwm_update_mcast;
6226 	ic->ic_getradiocaps = iwm_init_channel_map;
6227 	ic->ic_set_channel = iwm_set_channel;
6228 	ic->ic_scan_curchan = iwm_scan_curchan;
6229 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6230 	ic->ic_wme.wme_update = iwm_update_edca;
6231 	ic->ic_parent = iwm_parent;
6232 	ic->ic_transmit = iwm_transmit;
6233 	iwm_radiotap_attach(sc);
6234 	if (bootverbose)
6235 		ieee80211_announce(ic);
6236 
6237 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6238 	    "<-%s\n", __func__);
6239 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6240 
6241 	return;
6242 fail:
6243 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6244 	iwm_detach_local(sc, 0);
6245 }
6246 
6247 /*
6248  * Attach the interface to 802.11 radiotap.
6249  */
6250 static void
6251 iwm_radiotap_attach(struct iwm_softc *sc)
6252 {
6253         struct ieee80211com *ic = &sc->sc_ic;
6254 
6255 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6256 	    "->%s begin\n", __func__);
6257         ieee80211_radiotap_attach(ic,
6258             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6259                 IWM_TX_RADIOTAP_PRESENT,
6260             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6261                 IWM_RX_RADIOTAP_PRESENT);
6262 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6263 	    "->%s end\n", __func__);
6264 }
6265 
6266 static struct ieee80211vap *
6267 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6268     enum ieee80211_opmode opmode, int flags,
6269     const uint8_t bssid[IEEE80211_ADDR_LEN],
6270     const uint8_t mac[IEEE80211_ADDR_LEN])
6271 {
6272 	struct iwm_vap *ivp;
6273 	struct ieee80211vap *vap;
6274 
6275 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6276 		return NULL;
6277 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6278 	vap = &ivp->iv_vap;
6279 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6280 	vap->iv_bmissthreshold = 10;            /* override default */
6281 	/* Override with driver methods. */
6282 	ivp->iv_newstate = vap->iv_newstate;
6283 	vap->iv_newstate = iwm_newstate;
6284 
6285 	ieee80211_ratectl_init(vap);
6286 	/* Complete setup. */
6287 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6288 	    mac);
6289 	ic->ic_opmode = opmode;
6290 
6291 	return vap;
6292 }
6293 
6294 static void
6295 iwm_vap_delete(struct ieee80211vap *vap)
6296 {
6297 	struct iwm_vap *ivp = IWM_VAP(vap);
6298 
6299 	ieee80211_ratectl_deinit(vap);
6300 	ieee80211_vap_detach(vap);
6301 	free(ivp, M_80211_VAP);
6302 }
6303 
6304 static void
6305 iwm_scan_start(struct ieee80211com *ic)
6306 {
6307 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6308 	struct iwm_softc *sc = ic->ic_softc;
6309 	int error;
6310 
6311 	IWM_LOCK(sc);
6312 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6313 		/* This should not be possible */
6314 		device_printf(sc->sc_dev,
6315 		    "%s: Previous scan not completed yet\n", __func__);
6316 	}
6317 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6318 		error = iwm_mvm_umac_scan(sc);
6319 	else
6320 		error = iwm_mvm_lmac_scan(sc);
6321 	if (error != 0) {
6322 		device_printf(sc->sc_dev, "could not initiate scan\n");
6323 		IWM_UNLOCK(sc);
6324 		ieee80211_cancel_scan(vap);
6325 	} else {
6326 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6327 		iwm_led_blink_start(sc);
6328 		IWM_UNLOCK(sc);
6329 	}
6330 }
6331 
6332 static void
6333 iwm_scan_end(struct ieee80211com *ic)
6334 {
6335 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6336 	struct iwm_softc *sc = ic->ic_softc;
6337 
6338 	IWM_LOCK(sc);
6339 	iwm_led_blink_stop(sc);
6340 	if (vap->iv_state == IEEE80211_S_RUN)
6341 		iwm_mvm_led_enable(sc);
6342 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6343 		/*
6344 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6345 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6346 		 * taskqueue.
6347 		 */
6348 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6349 		iwm_mvm_scan_stop_wait(sc);
6350 	}
6351 	IWM_UNLOCK(sc);
6352 
6353 	/*
6354 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6355 	 * This is to make sure that it won't call ieee80211_scan_done
6356 	 * when we have already started the next scan.
6357 	 */
6358 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6359 }
6360 
6361 static void
6362 iwm_update_mcast(struct ieee80211com *ic)
6363 {
6364 }
6365 
6366 static void
6367 iwm_set_channel(struct ieee80211com *ic)
6368 {
6369 }
6370 
6371 static void
6372 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6373 {
6374 }
6375 
6376 static void
6377 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6378 {
6379 	return;
6380 }
6381 
6382 void
6383 iwm_init_task(void *arg1)
6384 {
6385 	struct iwm_softc *sc = arg1;
6386 
6387 	IWM_LOCK(sc);
6388 	while (sc->sc_flags & IWM_FLAG_BUSY)
6389 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6390 	sc->sc_flags |= IWM_FLAG_BUSY;
6391 	iwm_stop(sc);
6392 	if (sc->sc_ic.ic_nrunning > 0)
6393 		iwm_init(sc);
6394 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6395 	wakeup(&sc->sc_flags);
6396 	IWM_UNLOCK(sc);
6397 }
6398 
6399 static int
6400 iwm_resume(device_t dev)
6401 {
6402 	struct iwm_softc *sc = device_get_softc(dev);
6403 	int do_reinit = 0;
6404 
6405 	/*
6406 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6407 	 * PCI Tx retries from interfering with C3 CPU state.
6408 	 */
6409 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6410 	iwm_init_task(device_get_softc(dev));
6411 
6412 	IWM_LOCK(sc);
6413 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6414 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6415 		do_reinit = 1;
6416 	}
6417 	IWM_UNLOCK(sc);
6418 
6419 	if (do_reinit)
6420 		ieee80211_resume_all(&sc->sc_ic);
6421 
6422 	return 0;
6423 }
6424 
6425 static int
6426 iwm_suspend(device_t dev)
6427 {
6428 	int do_stop = 0;
6429 	struct iwm_softc *sc = device_get_softc(dev);
6430 
6431 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6432 
6433 	ieee80211_suspend_all(&sc->sc_ic);
6434 
6435 	if (do_stop) {
6436 		IWM_LOCK(sc);
6437 		iwm_stop(sc);
6438 		sc->sc_flags |= IWM_FLAG_SCANNING;
6439 		IWM_UNLOCK(sc);
6440 	}
6441 
6442 	return (0);
6443 }
6444 
6445 static int
6446 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6447 {
6448 	struct iwm_fw_info *fw = &sc->sc_fw;
6449 	device_t dev = sc->sc_dev;
6450 	int i;
6451 
6452 	if (!sc->sc_attached)
6453 		return 0;
6454 	sc->sc_attached = 0;
6455 
6456 	if (do_net80211)
6457 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6458 
6459 	callout_drain(&sc->sc_led_blink_to);
6460 	callout_drain(&sc->sc_watchdog_to);
6461 	iwm_stop_device(sc);
6462 	if (do_net80211) {
6463 		ieee80211_ifdetach(&sc->sc_ic);
6464 	}
6465 
6466 	iwm_phy_db_free(sc->sc_phy_db);
6467 	sc->sc_phy_db = NULL;
6468 
6469 	iwm_free_nvm_data(sc->nvm_data);
6470 
6471 	/* Free descriptor rings */
6472 	iwm_free_rx_ring(sc, &sc->rxq);
6473 	for (i = 0; i < nitems(sc->txq); i++)
6474 		iwm_free_tx_ring(sc, &sc->txq[i]);
6475 
6476 	/* Free firmware */
6477 	if (fw->fw_fp != NULL)
6478 		iwm_fw_info_free(fw);
6479 
6480 	/* Free scheduler */
6481 	iwm_dma_contig_free(&sc->sched_dma);
6482 	iwm_dma_contig_free(&sc->ict_dma);
6483 	iwm_dma_contig_free(&sc->kw_dma);
6484 	iwm_dma_contig_free(&sc->fw_dma);
6485 
6486 	iwm_free_fw_paging(sc);
6487 
6488 	/* Finished with the hardware - detach things */
6489 	iwm_pci_detach(dev);
6490 
6491 	if (sc->sc_notif_wait != NULL) {
6492 		iwm_notification_wait_free(sc->sc_notif_wait);
6493 		sc->sc_notif_wait = NULL;
6494 	}
6495 
6496 	mbufq_drain(&sc->sc_snd);
6497 	IWM_LOCK_DESTROY(sc);
6498 
6499 	return (0);
6500 }
6501 
6502 static int
6503 iwm_detach(device_t dev)
6504 {
6505 	struct iwm_softc *sc = device_get_softc(dev);
6506 
6507 	return (iwm_detach_local(sc, 1));
6508 }
6509 
6510 static device_method_t iwm_pci_methods[] = {
6511         /* Device interface */
6512         DEVMETHOD(device_probe,         iwm_probe),
6513         DEVMETHOD(device_attach,        iwm_attach),
6514         DEVMETHOD(device_detach,        iwm_detach),
6515         DEVMETHOD(device_suspend,       iwm_suspend),
6516         DEVMETHOD(device_resume,        iwm_resume),
6517 
6518         DEVMETHOD_END
6519 };
6520 
6521 static driver_t iwm_pci_driver = {
6522         "iwm",
6523         iwm_pci_methods,
6524         sizeof (struct iwm_softc)
6525 };
6526 
6527 static devclass_t iwm_devclass;
6528 
6529 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6530 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6531 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6532 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6533