xref: /freebsd/sys/dev/iwm/if_iwm.c (revision f4bf2442a03f9b72cfe6d051766b650a4721f3d8)
1 /*	$OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126 
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130 
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133 
134 #include <net/bpf.h>
135 
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142 
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147 
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152 
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164 
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 
167 const uint8_t iwm_nvm_channels[] = {
168 	/* 2.4 GHz */
169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 	/* 5 GHz */
171 	36, 40, 44, 48, 52, 56, 60, 64,
172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 	149, 153, 157, 161, 165
174 };
175 #define IWM_NUM_2GHZ_CHANNELS	14
176 
177 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
178     "IWM_NUM_CHANNELS is too small");
179 
180 /*
181  * XXX For now, there's simply a fixed set of rate table entries
182  * that are populated.
183  */
184 const struct iwm_rate {
185 	uint8_t rate;
186 	uint8_t plcp;
187 } iwm_rates[] = {
188 	{   2,	IWM_RATE_1M_PLCP  },
189 	{   4,	IWM_RATE_2M_PLCP  },
190 	{  11,	IWM_RATE_5M_PLCP  },
191 	{  22,	IWM_RATE_11M_PLCP },
192 	{  12,	IWM_RATE_6M_PLCP  },
193 	{  18,	IWM_RATE_9M_PLCP  },
194 	{  24,	IWM_RATE_12M_PLCP },
195 	{  36,	IWM_RATE_18M_PLCP },
196 	{  48,	IWM_RATE_24M_PLCP },
197 	{  72,	IWM_RATE_36M_PLCP },
198 	{  96,	IWM_RATE_48M_PLCP },
199 	{ 108,	IWM_RATE_54M_PLCP },
200 };
201 #define IWM_RIDX_CCK	0
202 #define IWM_RIDX_OFDM	4
203 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
204 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
205 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
206 
207 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
208 static int	iwm_firmware_store_section(struct iwm_softc *,
209                                            enum iwm_ucode_type,
210                                            const uint8_t *, size_t);
211 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
212 static void	iwm_fw_info_free(struct iwm_fw_info *);
213 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
214 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
215 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
216                                      bus_size_t, bus_size_t);
217 static void	iwm_dma_contig_free(struct iwm_dma_info *);
218 static int	iwm_alloc_fwmem(struct iwm_softc *);
219 static void	iwm_free_fwmem(struct iwm_softc *);
220 static int	iwm_alloc_sched(struct iwm_softc *);
221 static void	iwm_free_sched(struct iwm_softc *);
222 static int	iwm_alloc_kw(struct iwm_softc *);
223 static void	iwm_free_kw(struct iwm_softc *);
224 static int	iwm_alloc_ict(struct iwm_softc *);
225 static void	iwm_free_ict(struct iwm_softc *);
226 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
227 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
228 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
229 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
230                                   int);
231 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
232 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
233 static void	iwm_enable_interrupts(struct iwm_softc *);
234 static void	iwm_restore_interrupts(struct iwm_softc *);
235 static void	iwm_disable_interrupts(struct iwm_softc *);
236 static void	iwm_ict_reset(struct iwm_softc *);
237 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
238 static void	iwm_stop_device(struct iwm_softc *);
239 static void	iwm_mvm_nic_config(struct iwm_softc *);
240 static int	iwm_nic_rx_init(struct iwm_softc *);
241 static int	iwm_nic_tx_init(struct iwm_softc *);
242 static int	iwm_nic_init(struct iwm_softc *);
243 static void	iwm_enable_txq(struct iwm_softc *, int, int);
244 static int	iwm_post_alive(struct iwm_softc *);
245 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
246                                    uint16_t, uint8_t *, uint16_t *);
247 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
248 				     uint16_t *);
249 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
250 static void	iwm_add_channel_band(struct iwm_softc *,
251 		    struct ieee80211_channel[], int, int *, int, int,
252 		    const uint8_t[]);
253 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
254 		    struct ieee80211_channel[]);
255 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
256 			           const uint16_t *, const uint16_t *, uint8_t,
257 				   uint8_t);
258 struct iwm_nvm_section;
259 static int	iwm_parse_nvm_sections(struct iwm_softc *,
260                                        struct iwm_nvm_section *);
261 static int	iwm_nvm_init(struct iwm_softc *);
262 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
263                                         const uint8_t *, uint32_t);
264 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
265 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
266 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
267 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
268 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
269 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
270                                               enum iwm_ucode_type);
271 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
272 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
273 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
274 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
275 					    struct iwm_rx_phy_info *);
276 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
277                                       struct iwm_rx_packet *,
278                                       struct iwm_rx_data *);
279 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
280 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
281                                    struct iwm_rx_data *);
282 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
283                                          struct iwm_rx_packet *,
284 				         struct iwm_node *);
285 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
286                                   struct iwm_rx_data *);
287 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
288 #if 0
289 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
290                                  uint16_t);
291 #endif
292 static const struct iwm_rate *
293 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
294 			struct ieee80211_frame *, struct iwm_tx_cmd *);
295 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
296                        struct ieee80211_node *, int);
297 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
298 			     const struct ieee80211_bpf_params *);
299 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
300 					     struct iwm_mvm_add_sta_cmd_v5 *);
301 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
302 					        struct iwm_mvm_add_sta_cmd_v6 *,
303                                                 int *);
304 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
305                                        int);
306 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
307 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
308 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
309                                            struct iwm_int_sta *,
310 				           const uint8_t *, uint16_t, uint16_t);
311 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
312 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
313 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
314 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
315 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
316 static struct ieee80211_node *
317 		iwm_node_alloc(struct ieee80211vap *,
318 		               const uint8_t[IEEE80211_ADDR_LEN]);
319 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
320 static int	iwm_media_change(struct ifnet *);
321 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
322 static void	iwm_endscan_cb(void *, int);
323 static int	iwm_init_hw(struct iwm_softc *);
324 static void	iwm_init(struct iwm_softc *);
325 static void	iwm_start(struct iwm_softc *);
326 static void	iwm_stop(struct iwm_softc *);
327 static void	iwm_watchdog(void *);
328 static void	iwm_parent(struct ieee80211com *);
329 #ifdef IWM_DEBUG
330 static const char *
331 		iwm_desc_lookup(uint32_t);
332 static void	iwm_nic_error(struct iwm_softc *);
333 #endif
334 static void	iwm_notif_intr(struct iwm_softc *);
335 static void	iwm_intr(void *);
336 static int	iwm_attach(device_t);
337 static void	iwm_preinit(void *);
338 static int	iwm_detach_local(struct iwm_softc *sc, int);
339 static void	iwm_init_task(void *);
340 static void	iwm_radiotap_attach(struct iwm_softc *);
341 static struct ieee80211vap *
342 		iwm_vap_create(struct ieee80211com *,
343 		               const char [IFNAMSIZ], int,
344 		               enum ieee80211_opmode, int,
345 		               const uint8_t [IEEE80211_ADDR_LEN],
346 		               const uint8_t [IEEE80211_ADDR_LEN]);
347 static void	iwm_vap_delete(struct ieee80211vap *);
348 static void	iwm_scan_start(struct ieee80211com *);
349 static void	iwm_scan_end(struct ieee80211com *);
350 static void	iwm_update_mcast(struct ieee80211com *);
351 static void	iwm_set_channel(struct ieee80211com *);
352 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
353 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
354 static int	iwm_detach(device_t);
355 
356 /*
357  * Firmware parser.
358  */
359 
360 static int
361 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
362 {
363 	const struct iwm_fw_cscheme_list *l = (const void *)data;
364 
365 	if (dlen < sizeof(*l) ||
366 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
367 		return EINVAL;
368 
369 	/* we don't actually store anything for now, always use s/w crypto */
370 
371 	return 0;
372 }
373 
374 static int
375 iwm_firmware_store_section(struct iwm_softc *sc,
376     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
377 {
378 	struct iwm_fw_sects *fws;
379 	struct iwm_fw_onesect *fwone;
380 
381 	if (type >= IWM_UCODE_TYPE_MAX)
382 		return EINVAL;
383 	if (dlen < sizeof(uint32_t))
384 		return EINVAL;
385 
386 	fws = &sc->sc_fw.fw_sects[type];
387 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
388 		return EINVAL;
389 
390 	fwone = &fws->fw_sect[fws->fw_count];
391 
392 	/* first 32bit are device load offset */
393 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
394 
395 	/* rest is data */
396 	fwone->fws_data = data + sizeof(uint32_t);
397 	fwone->fws_len = dlen - sizeof(uint32_t);
398 
399 	fws->fw_count++;
400 	fws->fw_totlen += fwone->fws_len;
401 
402 	return 0;
403 }
404 
405 /* iwlwifi: iwl-drv.c */
406 struct iwm_tlv_calib_data {
407 	uint32_t ucode_type;
408 	struct iwm_tlv_calib_ctrl calib;
409 } __packed;
410 
411 static int
412 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
413 {
414 	const struct iwm_tlv_calib_data *def_calib = data;
415 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
416 
417 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
418 		device_printf(sc->sc_dev,
419 		    "Wrong ucode_type %u for default "
420 		    "calibration.\n", ucode_type);
421 		return EINVAL;
422 	}
423 
424 	sc->sc_default_calib[ucode_type].flow_trigger =
425 	    def_calib->calib.flow_trigger;
426 	sc->sc_default_calib[ucode_type].event_trigger =
427 	    def_calib->calib.event_trigger;
428 
429 	return 0;
430 }
431 
432 static void
433 iwm_fw_info_free(struct iwm_fw_info *fw)
434 {
435 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
436 	fw->fw_fp = NULL;
437 	/* don't touch fw->fw_status */
438 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
439 }
440 
441 static int
442 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
443 {
444 	struct iwm_fw_info *fw = &sc->sc_fw;
445 	const struct iwm_tlv_ucode_header *uhdr;
446 	struct iwm_ucode_tlv tlv;
447 	enum iwm_ucode_tlv_type tlv_type;
448 	const struct firmware *fwp;
449 	const uint8_t *data;
450 	int error = 0;
451 	size_t len;
452 
453 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
454 	    ucode_type != IWM_UCODE_TYPE_INIT)
455 		return 0;
456 
457 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
458 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
459 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
460 
461 	if (fw->fw_fp != NULL)
462 		iwm_fw_info_free(fw);
463 
464 	/*
465 	 * Load firmware into driver memory.
466 	 * fw_fp will be set.
467 	 */
468 	IWM_UNLOCK(sc);
469 	fwp = firmware_get(sc->sc_fwname);
470 	IWM_LOCK(sc);
471 	if (fwp == NULL) {
472 		device_printf(sc->sc_dev,
473 		    "could not read firmware %s (error %d)\n",
474 		    sc->sc_fwname, error);
475 		goto out;
476 	}
477 	fw->fw_fp = fwp;
478 
479 	/*
480 	 * Parse firmware contents
481 	 */
482 
483 	uhdr = (const void *)fw->fw_fp->data;
484 	if (*(const uint32_t *)fw->fw_fp->data != 0
485 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
486 		device_printf(sc->sc_dev, "invalid firmware %s\n",
487 		    sc->sc_fwname);
488 		error = EINVAL;
489 		goto out;
490 	}
491 
492 	sc->sc_fwver = le32toh(uhdr->ver);
493 	data = uhdr->data;
494 	len = fw->fw_fp->datasize - sizeof(*uhdr);
495 
496 	while (len >= sizeof(tlv)) {
497 		size_t tlv_len;
498 		const void *tlv_data;
499 
500 		memcpy(&tlv, data, sizeof(tlv));
501 		tlv_len = le32toh(tlv.length);
502 		tlv_type = le32toh(tlv.type);
503 
504 		len -= sizeof(tlv);
505 		data += sizeof(tlv);
506 		tlv_data = data;
507 
508 		if (len < tlv_len) {
509 			device_printf(sc->sc_dev,
510 			    "firmware too short: %zu bytes\n",
511 			    len);
512 			error = EINVAL;
513 			goto parse_out;
514 		}
515 
516 		switch ((int)tlv_type) {
517 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
518 			if (tlv_len < sizeof(uint32_t)) {
519 				device_printf(sc->sc_dev,
520 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
521 				    __func__,
522 				    (int) tlv_len);
523 				error = EINVAL;
524 				goto parse_out;
525 			}
526 			sc->sc_capa_max_probe_len
527 			    = le32toh(*(const uint32_t *)tlv_data);
528 			/* limit it to something sensible */
529 			if (sc->sc_capa_max_probe_len > (1<<16)) {
530 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
531 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
532 				    "ridiculous\n", __func__);
533 				error = EINVAL;
534 				goto parse_out;
535 			}
536 			break;
537 		case IWM_UCODE_TLV_PAN:
538 			if (tlv_len) {
539 				device_printf(sc->sc_dev,
540 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
541 				    __func__,
542 				    (int) tlv_len);
543 				error = EINVAL;
544 				goto parse_out;
545 			}
546 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
547 			break;
548 		case IWM_UCODE_TLV_FLAGS:
549 			if (tlv_len < sizeof(uint32_t)) {
550 				device_printf(sc->sc_dev,
551 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
552 				    __func__,
553 				    (int) tlv_len);
554 				error = EINVAL;
555 				goto parse_out;
556 			}
557 			/*
558 			 * Apparently there can be many flags, but Linux driver
559 			 * parses only the first one, and so do we.
560 			 *
561 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
562 			 * Intentional or a bug?  Observations from
563 			 * current firmware file:
564 			 *  1) TLV_PAN is parsed first
565 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
566 			 * ==> this resets TLV_PAN to itself... hnnnk
567 			 */
568 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
569 			break;
570 		case IWM_UCODE_TLV_CSCHEME:
571 			if ((error = iwm_store_cscheme(sc,
572 			    tlv_data, tlv_len)) != 0) {
573 				device_printf(sc->sc_dev,
574 				    "%s: iwm_store_cscheme(): returned %d\n",
575 				    __func__,
576 				    error);
577 				goto parse_out;
578 			}
579 			break;
580 		case IWM_UCODE_TLV_NUM_OF_CPU:
581 			if (tlv_len != sizeof(uint32_t)) {
582 				device_printf(sc->sc_dev,
583 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
584 				    __func__,
585 				    (int) tlv_len);
586 				error = EINVAL;
587 				goto parse_out;
588 			}
589 			if (le32toh(*(const uint32_t*)tlv_data) != 1) {
590 				device_printf(sc->sc_dev,
591 				    "%s: driver supports "
592 				    "only TLV_NUM_OF_CPU == 1",
593 				    __func__);
594 				error = EINVAL;
595 				goto parse_out;
596 			}
597 			break;
598 		case IWM_UCODE_TLV_SEC_RT:
599 			if ((error = iwm_firmware_store_section(sc,
600 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
601 				device_printf(sc->sc_dev,
602 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
603 				    __func__,
604 				    error);
605 				goto parse_out;
606 			}
607 			break;
608 		case IWM_UCODE_TLV_SEC_INIT:
609 			if ((error = iwm_firmware_store_section(sc,
610 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
611 				device_printf(sc->sc_dev,
612 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
613 				    __func__,
614 				    error);
615 				goto parse_out;
616 			}
617 			break;
618 		case IWM_UCODE_TLV_SEC_WOWLAN:
619 			if ((error = iwm_firmware_store_section(sc,
620 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
621 				device_printf(sc->sc_dev,
622 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
623 				    __func__,
624 				    error);
625 				goto parse_out;
626 			}
627 			break;
628 		case IWM_UCODE_TLV_DEF_CALIB:
629 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
630 				device_printf(sc->sc_dev,
631 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
632 				    __func__,
633 				    (int) tlv_len,
634 				    (int) sizeof(struct iwm_tlv_calib_data));
635 				error = EINVAL;
636 				goto parse_out;
637 			}
638 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
639 				device_printf(sc->sc_dev,
640 				    "%s: iwm_set_default_calib() failed: %d\n",
641 				    __func__,
642 				    error);
643 				goto parse_out;
644 			}
645 			break;
646 		case IWM_UCODE_TLV_PHY_SKU:
647 			if (tlv_len != sizeof(uint32_t)) {
648 				error = EINVAL;
649 				device_printf(sc->sc_dev,
650 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
651 				    __func__,
652 				    (int) tlv_len);
653 				goto parse_out;
654 			}
655 			sc->sc_fw_phy_config =
656 			    le32toh(*(const uint32_t *)tlv_data);
657 			break;
658 
659 		case IWM_UCODE_TLV_API_CHANGES_SET:
660 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
661 			/* ignore, not used by current driver */
662 			break;
663 
664 		default:
665 			device_printf(sc->sc_dev,
666 			    "%s: unknown firmware section %d, abort\n",
667 			    __func__, tlv_type);
668 			error = EINVAL;
669 			goto parse_out;
670 		}
671 
672 		len -= roundup(tlv_len, 4);
673 		data += roundup(tlv_len, 4);
674 	}
675 
676 	KASSERT(error == 0, ("unhandled error"));
677 
678  parse_out:
679 	if (error) {
680 		device_printf(sc->sc_dev, "firmware parse error %d, "
681 		    "section type %d\n", error, tlv_type);
682 	}
683 
684 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
685 		device_printf(sc->sc_dev,
686 		    "device uses unsupported power ops\n");
687 		error = ENOTSUP;
688 	}
689 
690  out:
691 	if (error) {
692 		fw->fw_status = IWM_FW_STATUS_NONE;
693 		if (fw->fw_fp != NULL)
694 			iwm_fw_info_free(fw);
695 	} else
696 		fw->fw_status = IWM_FW_STATUS_DONE;
697 	wakeup(&sc->sc_fw);
698 
699 	return error;
700 }
701 
702 /*
703  * DMA resource routines
704  */
705 
706 static void
707 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
708 {
709         if (error != 0)
710                 return;
711 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
712         *(bus_addr_t *)arg = segs[0].ds_addr;
713 }
714 
715 static int
716 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
717     bus_size_t size, bus_size_t alignment)
718 {
719 	int error;
720 
721 	dma->tag = NULL;
722 	dma->size = size;
723 
724 	error = bus_dma_tag_create(tag, alignment,
725             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
726             1, size, 0, NULL, NULL, &dma->tag);
727         if (error != 0)
728                 goto fail;
729 
730         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
731             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
732         if (error != 0)
733                 goto fail;
734 
735         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
736             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
737         if (error != 0)
738                 goto fail;
739 
740 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
741 
742 	return 0;
743 
744 fail:	iwm_dma_contig_free(dma);
745 	return error;
746 }
747 
748 static void
749 iwm_dma_contig_free(struct iwm_dma_info *dma)
750 {
751 	if (dma->map != NULL) {
752 		if (dma->vaddr != NULL) {
753 			bus_dmamap_sync(dma->tag, dma->map,
754 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
755 			bus_dmamap_unload(dma->tag, dma->map);
756 			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
757 			dma->vaddr = NULL;
758 		}
759 		bus_dmamap_destroy(dma->tag, dma->map);
760 		dma->map = NULL;
761 	}
762 	if (dma->tag != NULL) {
763 		bus_dma_tag_destroy(dma->tag);
764 		dma->tag = NULL;
765 	}
766 
767 }
768 
769 /* fwmem is used to load firmware onto the card */
770 static int
771 iwm_alloc_fwmem(struct iwm_softc *sc)
772 {
773 	/* Must be aligned on a 16-byte boundary. */
774 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
775 	    sc->sc_fwdmasegsz, 16);
776 }
777 
778 static void
779 iwm_free_fwmem(struct iwm_softc *sc)
780 {
781 	iwm_dma_contig_free(&sc->fw_dma);
782 }
783 
784 /* tx scheduler rings.  not used? */
785 static int
786 iwm_alloc_sched(struct iwm_softc *sc)
787 {
788 	int rv;
789 
790 	/* TX scheduler rings must be aligned on a 1KB boundary. */
791 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
792 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
793 	return rv;
794 }
795 
796 static void
797 iwm_free_sched(struct iwm_softc *sc)
798 {
799 	iwm_dma_contig_free(&sc->sched_dma);
800 }
801 
802 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
803 static int
804 iwm_alloc_kw(struct iwm_softc *sc)
805 {
806 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
807 }
808 
809 static void
810 iwm_free_kw(struct iwm_softc *sc)
811 {
812 	iwm_dma_contig_free(&sc->kw_dma);
813 }
814 
815 /* interrupt cause table */
816 static int
817 iwm_alloc_ict(struct iwm_softc *sc)
818 {
819 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
820 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
821 }
822 
823 static void
824 iwm_free_ict(struct iwm_softc *sc)
825 {
826 	iwm_dma_contig_free(&sc->ict_dma);
827 }
828 
829 static int
830 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
831 {
832 	bus_size_t size;
833 	int i, error;
834 
835 	ring->cur = 0;
836 
837 	/* Allocate RX descriptors (256-byte aligned). */
838 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
839 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
840 	if (error != 0) {
841 		device_printf(sc->sc_dev,
842 		    "could not allocate RX ring DMA memory\n");
843 		goto fail;
844 	}
845 	ring->desc = ring->desc_dma.vaddr;
846 
847 	/* Allocate RX status area (16-byte aligned). */
848 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
849 	    sizeof(*ring->stat), 16);
850 	if (error != 0) {
851 		device_printf(sc->sc_dev,
852 		    "could not allocate RX status DMA memory\n");
853 		goto fail;
854 	}
855 	ring->stat = ring->stat_dma.vaddr;
856 
857         /* Create RX buffer DMA tag. */
858         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
859             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
860             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
861         if (error != 0) {
862                 device_printf(sc->sc_dev,
863                     "%s: could not create RX buf DMA tag, error %d\n",
864                     __func__, error);
865                 goto fail;
866         }
867 
868 	/*
869 	 * Allocate and map RX buffers.
870 	 */
871 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
872 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
873 			goto fail;
874 		}
875 	}
876 	return 0;
877 
878 fail:	iwm_free_rx_ring(sc, ring);
879 	return error;
880 }
881 
882 static void
883 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
884 {
885 
886 	/* XXX print out if we can't lock the NIC? */
887 	if (iwm_nic_lock(sc)) {
888 		/* XXX handle if RX stop doesn't finish? */
889 		(void) iwm_pcie_rx_stop(sc);
890 		iwm_nic_unlock(sc);
891 	}
892 	/* Reset the ring state */
893 	ring->cur = 0;
894 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
895 }
896 
897 static void
898 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
899 {
900 	int i;
901 
902 	iwm_dma_contig_free(&ring->desc_dma);
903 	iwm_dma_contig_free(&ring->stat_dma);
904 
905 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
906 		struct iwm_rx_data *data = &ring->data[i];
907 
908 		if (data->m != NULL) {
909 			bus_dmamap_sync(ring->data_dmat, data->map,
910 			    BUS_DMASYNC_POSTREAD);
911 			bus_dmamap_unload(ring->data_dmat, data->map);
912 			m_freem(data->m);
913 			data->m = NULL;
914 		}
915 		if (data->map != NULL) {
916 			bus_dmamap_destroy(ring->data_dmat, data->map);
917 			data->map = NULL;
918 		}
919 	}
920 	if (ring->data_dmat != NULL) {
921 		bus_dma_tag_destroy(ring->data_dmat);
922 		ring->data_dmat = NULL;
923 	}
924 }
925 
926 static int
927 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
928 {
929 	bus_addr_t paddr;
930 	bus_size_t size;
931 	int i, error;
932 
933 	ring->qid = qid;
934 	ring->queued = 0;
935 	ring->cur = 0;
936 
937 	/* Allocate TX descriptors (256-byte aligned). */
938 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
939 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
940 	if (error != 0) {
941 		device_printf(sc->sc_dev,
942 		    "could not allocate TX ring DMA memory\n");
943 		goto fail;
944 	}
945 	ring->desc = ring->desc_dma.vaddr;
946 
947 	/*
948 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
949 	 * to allocate commands space for other rings.
950 	 */
951 	if (qid > IWM_MVM_CMD_QUEUE)
952 		return 0;
953 
954 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
955 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
956 	if (error != 0) {
957 		device_printf(sc->sc_dev,
958 		    "could not allocate TX cmd DMA memory\n");
959 		goto fail;
960 	}
961 	ring->cmd = ring->cmd_dma.vaddr;
962 
963 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
964 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
965             IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
966 	if (error != 0) {
967 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
968 		goto fail;
969 	}
970 
971 	paddr = ring->cmd_dma.paddr;
972 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
973 		struct iwm_tx_data *data = &ring->data[i];
974 
975 		data->cmd_paddr = paddr;
976 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
977 		    + offsetof(struct iwm_tx_cmd, scratch);
978 		paddr += sizeof(struct iwm_device_cmd);
979 
980 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981 		if (error != 0) {
982 			device_printf(sc->sc_dev,
983 			    "could not create TX buf DMA map\n");
984 			goto fail;
985 		}
986 	}
987 	KASSERT(paddr == ring->cmd_dma.paddr + size,
988 	    ("invalid physical address"));
989 	return 0;
990 
991 fail:	iwm_free_tx_ring(sc, ring);
992 	return error;
993 }
994 
995 static void
996 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
997 {
998 	int i;
999 
1000 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1001 		struct iwm_tx_data *data = &ring->data[i];
1002 
1003 		if (data->m != NULL) {
1004 			bus_dmamap_sync(ring->data_dmat, data->map,
1005 			    BUS_DMASYNC_POSTWRITE);
1006 			bus_dmamap_unload(ring->data_dmat, data->map);
1007 			m_freem(data->m);
1008 			data->m = NULL;
1009 		}
1010 	}
1011 	/* Clear TX descriptors. */
1012 	memset(ring->desc, 0, ring->desc_dma.size);
1013 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1014 	    BUS_DMASYNC_PREWRITE);
1015 	sc->qfullmsk &= ~(1 << ring->qid);
1016 	ring->queued = 0;
1017 	ring->cur = 0;
1018 }
1019 
1020 static void
1021 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1022 {
1023 	int i;
1024 
1025 	iwm_dma_contig_free(&ring->desc_dma);
1026 	iwm_dma_contig_free(&ring->cmd_dma);
1027 
1028 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1029 		struct iwm_tx_data *data = &ring->data[i];
1030 
1031 		if (data->m != NULL) {
1032 			bus_dmamap_sync(ring->data_dmat, data->map,
1033 			    BUS_DMASYNC_POSTWRITE);
1034 			bus_dmamap_unload(ring->data_dmat, data->map);
1035 			m_freem(data->m);
1036 			data->m = NULL;
1037 		}
1038 		if (data->map != NULL) {
1039 			bus_dmamap_destroy(ring->data_dmat, data->map);
1040 			data->map = NULL;
1041 		}
1042 	}
1043 	if (ring->data_dmat != NULL) {
1044 		bus_dma_tag_destroy(ring->data_dmat);
1045 		ring->data_dmat = NULL;
1046 	}
1047 }
1048 
1049 /*
1050  * High-level hardware frobbing routines
1051  */
1052 
1053 static void
1054 iwm_enable_interrupts(struct iwm_softc *sc)
1055 {
1056 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1057 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1058 }
1059 
1060 static void
1061 iwm_restore_interrupts(struct iwm_softc *sc)
1062 {
1063 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1064 }
1065 
1066 static void
1067 iwm_disable_interrupts(struct iwm_softc *sc)
1068 {
1069 	/* disable interrupts */
1070 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1071 
1072 	/* acknowledge all interrupts */
1073 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1074 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1075 }
1076 
1077 static void
1078 iwm_ict_reset(struct iwm_softc *sc)
1079 {
1080 	iwm_disable_interrupts(sc);
1081 
1082 	/* Reset ICT table. */
1083 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1084 	sc->ict_cur = 0;
1085 
1086 	/* Set physical address of ICT table (4KB aligned). */
1087 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1088 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1089 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1090 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1091 
1092 	/* Switch to ICT interrupt mode in driver. */
1093 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1094 
1095 	/* Re-enable interrupts. */
1096 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1097 	iwm_enable_interrupts(sc);
1098 }
1099 
1100 /* iwlwifi pcie/trans.c */
1101 
1102 /*
1103  * Since this .. hard-resets things, it's time to actually
1104  * mark the first vap (if any) as having no mac context.
1105  * It's annoying, but since the driver is potentially being
1106  * stop/start'ed whilst active (thanks openbsd port!) we
1107  * have to correctly track this.
1108  */
1109 static void
1110 iwm_stop_device(struct iwm_softc *sc)
1111 {
1112 	struct ieee80211com *ic = &sc->sc_ic;
1113 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1114 	int chnl, ntries;
1115 	int qid;
1116 
1117 	/* tell the device to stop sending interrupts */
1118 	iwm_disable_interrupts(sc);
1119 
1120 	/*
1121 	 * FreeBSD-local: mark the first vap as not-uploaded,
1122 	 * so the next transition through auth/assoc
1123 	 * will correctly populate the MAC context.
1124 	 */
1125 	if (vap) {
1126 		struct iwm_vap *iv = IWM_VAP(vap);
1127 		iv->is_uploaded = 0;
1128 	}
1129 
1130 	/* device going down, Stop using ICT table */
1131 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1132 
1133 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1134 
1135 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1136 
1137 	/* Stop all DMA channels. */
1138 	if (iwm_nic_lock(sc)) {
1139 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1140 			IWM_WRITE(sc,
1141 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1142 			for (ntries = 0; ntries < 200; ntries++) {
1143 				uint32_t r;
1144 
1145 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1146 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1147 				    chnl))
1148 					break;
1149 				DELAY(20);
1150 			}
1151 		}
1152 		iwm_nic_unlock(sc);
1153 	}
1154 
1155 	/* Stop RX ring. */
1156 	iwm_reset_rx_ring(sc, &sc->rxq);
1157 
1158 	/* Reset all TX rings. */
1159 	for (qid = 0; qid < nitems(sc->txq); qid++)
1160 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1161 
1162 	/*
1163 	 * Power-down device's busmaster DMA clocks
1164 	 */
1165 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1166 	DELAY(5);
1167 
1168 	/* Make sure (redundant) we've released our request to stay awake */
1169 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1170 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1171 
1172 	/* Stop the device, and put it in low power state */
1173 	iwm_apm_stop(sc);
1174 
1175 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1176 	 * Clean again the interrupt here
1177 	 */
1178 	iwm_disable_interrupts(sc);
1179 	/* stop and reset the on-board processor */
1180 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1181 
1182 	/*
1183 	 * Even if we stop the HW, we still want the RF kill
1184 	 * interrupt
1185 	 */
1186 	iwm_enable_rfkill_int(sc);
1187 	iwm_check_rfkill(sc);
1188 }
1189 
1190 /* iwlwifi: mvm/ops.c */
1191 static void
1192 iwm_mvm_nic_config(struct iwm_softc *sc)
1193 {
1194 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1195 	uint32_t reg_val = 0;
1196 
1197 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1198 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1199 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1200 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1201 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1202 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1203 
1204 	/* SKU control */
1205 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1206 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1207 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1208 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1209 
1210 	/* radio configuration */
1211 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1212 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1213 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1214 
1215 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1216 
1217 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1218 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1219 	    radio_cfg_step, radio_cfg_dash);
1220 
1221 	/*
1222 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1223 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1224 	 * to lose ownership and not being able to obtain it back.
1225 	 */
1226 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1227 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1228 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1229 }
1230 
1231 static int
1232 iwm_nic_rx_init(struct iwm_softc *sc)
1233 {
1234 	if (!iwm_nic_lock(sc))
1235 		return EBUSY;
1236 
1237 	/*
1238 	 * Initialize RX ring.  This is from the iwn driver.
1239 	 */
1240 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1241 
1242 	/* stop DMA */
1243 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1244 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1245 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1246 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1247 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1248 
1249 	/* Set physical address of RX ring (256-byte aligned). */
1250 	IWM_WRITE(sc,
1251 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1252 
1253 	/* Set physical address of RX status (16-byte aligned). */
1254 	IWM_WRITE(sc,
1255 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1256 
1257 	/* Enable RX. */
1258 	/*
1259 	 * Note: Linux driver also sets this:
1260 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1261 	 *
1262 	 * It causes weird behavior.  YMMV.
1263 	 */
1264 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1265 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1266 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1267 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1268 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1269 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1270 
1271 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1272 
1273 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1274 	if (sc->host_interrupt_operation_mode)
1275 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1276 
1277 	/*
1278 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1279 	 *
1280 	 * This value should initially be 0 (before preparing any
1281  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1282 	 */
1283 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1284 
1285 	iwm_nic_unlock(sc);
1286 
1287 	return 0;
1288 }
1289 
1290 static int
1291 iwm_nic_tx_init(struct iwm_softc *sc)
1292 {
1293 	int qid;
1294 
1295 	if (!iwm_nic_lock(sc))
1296 		return EBUSY;
1297 
1298 	/* Deactivate TX scheduler. */
1299 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1300 
1301 	/* Set physical address of "keep warm" page (16-byte aligned). */
1302 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1303 
1304 	/* Initialize TX rings. */
1305 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1306 		struct iwm_tx_ring *txq = &sc->txq[qid];
1307 
1308 		/* Set physical address of TX ring (256-byte aligned). */
1309 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1310 		    txq->desc_dma.paddr >> 8);
1311 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1312 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1313 		    __func__,
1314 		    qid, txq->desc,
1315 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1316 	}
1317 	iwm_nic_unlock(sc);
1318 
1319 	return 0;
1320 }
1321 
1322 static int
1323 iwm_nic_init(struct iwm_softc *sc)
1324 {
1325 	int error;
1326 
1327 	iwm_apm_init(sc);
1328 	iwm_set_pwr(sc);
1329 
1330 	iwm_mvm_nic_config(sc);
1331 
1332 	if ((error = iwm_nic_rx_init(sc)) != 0)
1333 		return error;
1334 
1335 	/*
1336 	 * Ditto for TX, from iwn
1337 	 */
1338 	if ((error = iwm_nic_tx_init(sc)) != 0)
1339 		return error;
1340 
1341 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1342 	    "%s: shadow registers enabled\n", __func__);
1343 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1344 
1345 	return 0;
1346 }
1347 
1348 enum iwm_mvm_tx_fifo {
1349 	IWM_MVM_TX_FIFO_BK = 0,
1350 	IWM_MVM_TX_FIFO_BE,
1351 	IWM_MVM_TX_FIFO_VI,
1352 	IWM_MVM_TX_FIFO_VO,
1353 	IWM_MVM_TX_FIFO_MCAST = 5,
1354 };
1355 
1356 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1357 	IWM_MVM_TX_FIFO_VO,
1358 	IWM_MVM_TX_FIFO_VI,
1359 	IWM_MVM_TX_FIFO_BE,
1360 	IWM_MVM_TX_FIFO_BK,
1361 };
1362 
1363 static void
1364 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1365 {
1366 	if (!iwm_nic_lock(sc)) {
1367 		device_printf(sc->sc_dev,
1368 		    "%s: cannot enable txq %d\n",
1369 		    __func__,
1370 		    qid);
1371 		return; /* XXX return EBUSY */
1372 	}
1373 
1374 	/* unactivate before configuration */
1375 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1376 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1377 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1378 
1379 	if (qid != IWM_MVM_CMD_QUEUE) {
1380 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1381 	}
1382 
1383 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1384 
1385 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1386 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1387 
1388 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1389 	/* Set scheduler window size and frame limit. */
1390 	iwm_write_mem32(sc,
1391 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1392 	    sizeof(uint32_t),
1393 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1394 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1395 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1396 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1397 
1398 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1399 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1400 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1401 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1402 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1403 
1404 	iwm_nic_unlock(sc);
1405 
1406 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1407 	    "%s: enabled txq %d FIFO %d\n",
1408 	    __func__, qid, fifo);
1409 }
1410 
1411 static int
1412 iwm_post_alive(struct iwm_softc *sc)
1413 {
1414 	int nwords;
1415 	int error, chnl;
1416 
1417 	if (!iwm_nic_lock(sc))
1418 		return EBUSY;
1419 
1420 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1421 		device_printf(sc->sc_dev,
1422 		    "%s: sched addr mismatch",
1423 		    __func__);
1424 		error = EINVAL;
1425 		goto out;
1426 	}
1427 
1428 	iwm_ict_reset(sc);
1429 
1430 	/* Clear TX scheduler state in SRAM. */
1431 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1432 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1433 	    / sizeof(uint32_t);
1434 	error = iwm_write_mem(sc,
1435 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1436 	    NULL, nwords);
1437 	if (error)
1438 		goto out;
1439 
1440 	/* Set physical address of TX scheduler rings (1KB aligned). */
1441 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1442 
1443 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1444 
1445 	/* enable command channel */
1446 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1447 
1448 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1449 
1450 	/* Enable DMA channels. */
1451 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1452 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1453 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1454 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1455 	}
1456 
1457 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1458 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1459 
1460 	/* Enable L1-Active */
1461 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1462 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1463 
1464  out:
1465  	iwm_nic_unlock(sc);
1466 	return error;
1467 }
1468 
1469 /*
1470  * NVM read access and content parsing.  We do not support
1471  * external NVM or writing NVM.
1472  * iwlwifi/mvm/nvm.c
1473  */
1474 
1475 /* list of NVM sections we are allowed/need to read */
1476 const int nvm_to_read[] = {
1477 	IWM_NVM_SECTION_TYPE_HW,
1478 	IWM_NVM_SECTION_TYPE_SW,
1479 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1480 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1481 };
1482 
1483 /* Default NVM size to read */
1484 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1485 #define IWM_MAX_NVM_SECTION_SIZE 7000
1486 
1487 #define IWM_NVM_WRITE_OPCODE 1
1488 #define IWM_NVM_READ_OPCODE 0
1489 
1490 static int
1491 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1492 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1493 {
1494 	offset = 0;
1495 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1496 		.offset = htole16(offset),
1497 		.length = htole16(length),
1498 		.type = htole16(section),
1499 		.op_code = IWM_NVM_READ_OPCODE,
1500 	};
1501 	struct iwm_nvm_access_resp *nvm_resp;
1502 	struct iwm_rx_packet *pkt;
1503 	struct iwm_host_cmd cmd = {
1504 		.id = IWM_NVM_ACCESS_CMD,
1505 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1506 		    IWM_CMD_SEND_IN_RFKILL,
1507 		.data = { &nvm_access_cmd, },
1508 	};
1509 	int ret, bytes_read, offset_read;
1510 	uint8_t *resp_data;
1511 
1512 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1513 
1514 	ret = iwm_send_cmd(sc, &cmd);
1515 	if (ret)
1516 		return ret;
1517 
1518 	pkt = cmd.resp_pkt;
1519 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1520 		device_printf(sc->sc_dev,
1521 		    "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1522 		    __func__, pkt->hdr.flags);
1523 		ret = EIO;
1524 		goto exit;
1525 	}
1526 
1527 	/* Extract NVM response */
1528 	nvm_resp = (void *)pkt->data;
1529 
1530 	ret = le16toh(nvm_resp->status);
1531 	bytes_read = le16toh(nvm_resp->length);
1532 	offset_read = le16toh(nvm_resp->offset);
1533 	resp_data = nvm_resp->data;
1534 	if (ret) {
1535 		device_printf(sc->sc_dev,
1536 		    "%s: NVM access command failed with status %d\n",
1537 		    __func__, ret);
1538 		ret = EINVAL;
1539 		goto exit;
1540 	}
1541 
1542 	if (offset_read != offset) {
1543 		device_printf(sc->sc_dev,
1544 		    "%s: NVM ACCESS response with invalid offset %d\n",
1545 		    __func__, offset_read);
1546 		ret = EINVAL;
1547 		goto exit;
1548 	}
1549 
1550 	memcpy(data + offset, resp_data, bytes_read);
1551 	*len = bytes_read;
1552 
1553  exit:
1554 	iwm_free_resp(sc, &cmd);
1555 	return ret;
1556 }
1557 
1558 /*
1559  * Reads an NVM section completely.
1560  * NICs prior to 7000 family doesn't have a real NVM, but just read
1561  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1562  * by uCode, we need to manually check in this case that we don't
1563  * overflow and try to read more than the EEPROM size.
1564  * For 7000 family NICs, we supply the maximal size we can read, and
1565  * the uCode fills the response with as much data as we can,
1566  * without overflowing, so no check is needed.
1567  */
1568 static int
1569 iwm_nvm_read_section(struct iwm_softc *sc,
1570 	uint16_t section, uint8_t *data, uint16_t *len)
1571 {
1572 	uint16_t length, seglen;
1573 	int error;
1574 
1575 	/* Set nvm section read length */
1576 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1577 	*len = 0;
1578 
1579 	/* Read the NVM until exhausted (reading less than requested) */
1580 	while (seglen == length) {
1581 		error = iwm_nvm_read_chunk(sc,
1582 		    section, *len, length, data, &seglen);
1583 		if (error) {
1584 			device_printf(sc->sc_dev,
1585 			    "Cannot read NVM from section "
1586 			    "%d offset %d, length %d\n",
1587 			    section, *len, length);
1588 			return error;
1589 		}
1590 		*len += seglen;
1591 	}
1592 
1593 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1594 	    "NVM section %d read completed\n", section);
1595 	return 0;
1596 }
1597 
1598 /*
1599  * BEGIN IWM_NVM_PARSE
1600  */
1601 
1602 /* iwlwifi/iwl-nvm-parse.c */
1603 
1604 /* NVM offsets (in words) definitions */
1605 enum wkp_nvm_offsets {
1606 	/* NVM HW-Section offset (in words) definitions */
1607 	IWM_HW_ADDR = 0x15,
1608 
1609 /* NVM SW-Section offset (in words) definitions */
1610 	IWM_NVM_SW_SECTION = 0x1C0,
1611 	IWM_NVM_VERSION = 0,
1612 	IWM_RADIO_CFG = 1,
1613 	IWM_SKU = 2,
1614 	IWM_N_HW_ADDRS = 3,
1615 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1616 
1617 /* NVM calibration section offset (in words) definitions */
1618 	IWM_NVM_CALIB_SECTION = 0x2B8,
1619 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1620 };
1621 
1622 /* SKU Capabilities (actual values from NVM definition) */
1623 enum nvm_sku_bits {
1624 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1625 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1626 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1627 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1628 };
1629 
1630 /* radio config bits (actual values from NVM definition) */
1631 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1632 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1633 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1634 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1635 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1636 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1637 
1638 #define DEFAULT_MAX_TX_POWER 16
1639 
1640 /**
1641  * enum iwm_nvm_channel_flags - channel flags in NVM
1642  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1643  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1644  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1645  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1646  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1647  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1648  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1649  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1650  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1651  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1652  */
1653 enum iwm_nvm_channel_flags {
1654 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1655 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1656 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1657 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1658 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1659 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1660 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1661 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1662 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1663 };
1664 
1665 /*
1666  * Translate EEPROM flags to net80211.
1667  */
1668 static uint32_t
1669 iwm_eeprom_channel_flags(uint16_t ch_flags)
1670 {
1671 	uint32_t nflags;
1672 
1673 	nflags = 0;
1674 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1675 		nflags |= IEEE80211_CHAN_PASSIVE;
1676 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1677 		nflags |= IEEE80211_CHAN_NOADHOC;
1678 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1679 		nflags |= IEEE80211_CHAN_DFS;
1680 		/* Just in case. */
1681 		nflags |= IEEE80211_CHAN_NOADHOC;
1682 	}
1683 
1684 	return (nflags);
1685 }
1686 
1687 static void
1688 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1689     int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1690 {
1691 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1692 	uint32_t nflags;
1693 	uint16_t ch_flags;
1694 	uint8_t ieee;
1695 	int error;
1696 
1697 	for (; ch_idx < ch_num; ch_idx++) {
1698 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1699 		ieee = iwm_nvm_channels[ch_idx];
1700 
1701 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1702 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1703 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1704 			    ieee, ch_flags,
1705 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1706 			    "5.2" : "2.4");
1707 			continue;
1708 		}
1709 
1710 		nflags = iwm_eeprom_channel_flags(ch_flags);
1711 		error = ieee80211_add_channel(chans, maxchans, nchans,
1712 		    ieee, 0, 0, nflags, bands);
1713 		if (error != 0)
1714 			break;
1715 
1716 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1717 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1718 		    ieee, ch_flags,
1719 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1720 		    "5.2" : "2.4");
1721 	}
1722 }
1723 
1724 static void
1725 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1726     struct ieee80211_channel chans[])
1727 {
1728 	struct iwm_softc *sc = ic->ic_softc;
1729 	struct iwm_nvm_data *data = &sc->sc_nvm;
1730 	uint8_t bands[IEEE80211_MODE_BYTES];
1731 
1732 	memset(bands, 0, sizeof(bands));
1733 	/* 1-13: 11b/g channels. */
1734 	setbit(bands, IEEE80211_MODE_11B);
1735 	setbit(bands, IEEE80211_MODE_11G);
1736 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1737 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1738 
1739 	/* 14: 11b channel only. */
1740 	clrbit(bands, IEEE80211_MODE_11G);
1741 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1742 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1743 
1744 	if (data->sku_cap_band_52GHz_enable) {
1745 		memset(bands, 0, sizeof(bands));
1746 		setbit(bands, IEEE80211_MODE_11A);
1747 		iwm_add_channel_band(sc, chans, maxchans, nchans,
1748 		    IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1749 	}
1750 }
1751 
1752 static int
1753 iwm_parse_nvm_data(struct iwm_softc *sc,
1754 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1755 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1756 {
1757 	struct iwm_nvm_data *data = &sc->sc_nvm;
1758 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
1759 	uint16_t radio_cfg, sku;
1760 
1761 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1762 
1763 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1764 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1765 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1766 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1767 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1768 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1769 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1770 
1771 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
1772 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1773 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1774 	data->sku_cap_11n_enable = 0;
1775 
1776 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
1777 		device_printf(sc->sc_dev,
1778 		    "%s: invalid antennas (0x%x, 0x%x)\n",
1779 		    __func__, data->valid_tx_ant,
1780 		    data->valid_rx_ant);
1781 		return EINVAL;
1782 	}
1783 
1784 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1785 
1786 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1787 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1788 
1789 	/* The byte order is little endian 16 bit, meaning 214365 */
1790 	IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1791 	data->hw_addr[0] = hw_addr[1];
1792 	data->hw_addr[1] = hw_addr[0];
1793 	data->hw_addr[2] = hw_addr[3];
1794 	data->hw_addr[3] = hw_addr[2];
1795 	data->hw_addr[4] = hw_addr[5];
1796 	data->hw_addr[5] = hw_addr[4];
1797 
1798 	memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1799 	    sizeof(data->nvm_ch_flags));
1800 	data->calib_version = 255;   /* TODO:
1801 					this value will prevent some checks from
1802 					failing, we need to check if this
1803 					field is still needed, and if it does,
1804 					where is it in the NVM */
1805 
1806 	return 0;
1807 }
1808 
1809 /*
1810  * END NVM PARSE
1811  */
1812 
1813 struct iwm_nvm_section {
1814 	uint16_t length;
1815 	const uint8_t *data;
1816 };
1817 
1818 static int
1819 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1820 {
1821 	const uint16_t *hw, *sw, *calib;
1822 
1823 	/* Checking for required sections */
1824 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1825 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1826 		device_printf(sc->sc_dev,
1827 		    "%s: Can't parse empty NVM sections\n",
1828 		    __func__);
1829 		return ENOENT;
1830 	}
1831 
1832 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1833 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1834 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1835 	return iwm_parse_nvm_data(sc, hw, sw, calib,
1836 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1837 }
1838 
1839 static int
1840 iwm_nvm_init(struct iwm_softc *sc)
1841 {
1842 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1843 	int i, section, error;
1844 	uint16_t len;
1845 	uint8_t *nvm_buffer, *temp;
1846 
1847 	/* Read From FW NVM */
1848 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1849 	    "%s: Read NVM\n",
1850 	    __func__);
1851 
1852 	/* TODO: find correct NVM max size for a section */
1853 	nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
1854 	if (nvm_buffer == NULL)
1855 		return (ENOMEM);
1856 	for (i = 0; i < nitems(nvm_to_read); i++) {
1857 		section = nvm_to_read[i];
1858 		KASSERT(section <= nitems(nvm_sections),
1859 		    ("too many sections"));
1860 
1861 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1862 		if (error)
1863 			break;
1864 
1865 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
1866 		if (temp == NULL) {
1867 			error = ENOMEM;
1868 			break;
1869 		}
1870 		memcpy(temp, nvm_buffer, len);
1871 		nvm_sections[section].data = temp;
1872 		nvm_sections[section].length = len;
1873 	}
1874 	free(nvm_buffer, M_DEVBUF);
1875 	if (error)
1876 		return error;
1877 
1878 	return iwm_parse_nvm_sections(sc, nvm_sections);
1879 }
1880 
1881 /*
1882  * Firmware loading gunk.  This is kind of a weird hybrid between the
1883  * iwn driver and the Linux iwlwifi driver.
1884  */
1885 
1886 static int
1887 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1888 	const uint8_t *section, uint32_t byte_cnt)
1889 {
1890 	struct iwm_dma_info *dma = &sc->fw_dma;
1891 	int error;
1892 
1893 	/* Copy firmware section into pre-allocated DMA-safe memory. */
1894 	memcpy(dma->vaddr, section, byte_cnt);
1895 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1896 
1897 	if (!iwm_nic_lock(sc))
1898 		return EBUSY;
1899 
1900 	sc->sc_fw_chunk_done = 0;
1901 
1902 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1903 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1904 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1905 	    dst_addr);
1906 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1907 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1908 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1909 	    (iwm_get_dma_hi_addr(dma->paddr)
1910 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1911 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1912 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1913 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
1914 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
1915 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1916 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
1917 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
1918 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
1919 
1920 	iwm_nic_unlock(sc);
1921 
1922 	/* wait 1s for this segment to load */
1923 	while (!sc->sc_fw_chunk_done)
1924 		if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
1925 			break;
1926 
1927 	return error;
1928 }
1929 
1930 static int
1931 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1932 {
1933 	struct iwm_fw_sects *fws;
1934 	int error, i, w;
1935 	const void *data;
1936 	uint32_t dlen;
1937 	uint32_t offset;
1938 
1939 	sc->sc_uc.uc_intr = 0;
1940 
1941 	fws = &sc->sc_fw.fw_sects[ucode_type];
1942 	for (i = 0; i < fws->fw_count; i++) {
1943 		data = fws->fw_sect[i].fws_data;
1944 		dlen = fws->fw_sect[i].fws_len;
1945 		offset = fws->fw_sect[i].fws_devoff;
1946 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
1947 		    "LOAD FIRMWARE type %d offset %u len %d\n",
1948 		    ucode_type, offset, dlen);
1949 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
1950 		if (error) {
1951 			device_printf(sc->sc_dev,
1952 			    "%s: chunk %u of %u returned error %02d\n",
1953 			    __func__, i, fws->fw_count, error);
1954 			return error;
1955 		}
1956 	}
1957 
1958 	/* wait for the firmware to load */
1959 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
1960 
1961 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
1962 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
1963 	}
1964 
1965 	return error;
1966 }
1967 
1968 /* iwlwifi: pcie/trans.c */
1969 static int
1970 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1971 {
1972 	int error;
1973 
1974 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1975 
1976 	if ((error = iwm_nic_init(sc)) != 0) {
1977 		device_printf(sc->sc_dev, "unable to init nic\n");
1978 		return error;
1979 	}
1980 
1981 	/* make sure rfkill handshake bits are cleared */
1982 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1983 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
1984 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1985 
1986 	/* clear (again), then enable host interrupts */
1987 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1988 	iwm_enable_interrupts(sc);
1989 
1990 	/* really make sure rfkill handshake bits are cleared */
1991 	/* maybe we should write a few times more?  just to make sure */
1992 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1993 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1994 
1995 	/* Load the given image to the HW */
1996 	return iwm_load_firmware(sc, ucode_type);
1997 }
1998 
1999 static int
2000 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2001 {
2002 	return iwm_post_alive(sc);
2003 }
2004 
2005 static int
2006 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2007 {
2008 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2009 		.valid = htole32(valid_tx_ant),
2010 	};
2011 
2012 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2013 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2014 }
2015 
2016 /* iwlwifi: mvm/fw.c */
2017 static int
2018 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2019 {
2020 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2021 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2022 
2023 	/* Set parameters */
2024 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2025 	phy_cfg_cmd.calib_control.event_trigger =
2026 	    sc->sc_default_calib[ucode_type].event_trigger;
2027 	phy_cfg_cmd.calib_control.flow_trigger =
2028 	    sc->sc_default_calib[ucode_type].flow_trigger;
2029 
2030 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2031 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2032 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2033 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2034 }
2035 
2036 static int
2037 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2038 	enum iwm_ucode_type ucode_type)
2039 {
2040 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2041 	int error;
2042 
2043 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2044 		return error;
2045 
2046 	sc->sc_uc_current = ucode_type;
2047 	error = iwm_start_fw(sc, ucode_type);
2048 	if (error) {
2049 		sc->sc_uc_current = old_type;
2050 		return error;
2051 	}
2052 
2053 	return iwm_fw_alive(sc, sc->sched_base);
2054 }
2055 
2056 /*
2057  * mvm misc bits
2058  */
2059 
2060 /*
2061  * follows iwlwifi/fw.c
2062  */
2063 static int
2064 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2065 {
2066 	int error;
2067 
2068 	/* do not operate with rfkill switch turned on */
2069 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2070 		device_printf(sc->sc_dev,
2071 		    "radio is disabled by hardware switch\n");
2072 		return EPERM;
2073 	}
2074 
2075 	sc->sc_init_complete = 0;
2076 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2077 	    IWM_UCODE_TYPE_INIT)) != 0)
2078 		return error;
2079 
2080 	if (justnvm) {
2081 		if ((error = iwm_nvm_init(sc)) != 0) {
2082 			device_printf(sc->sc_dev, "failed to read nvm\n");
2083 			return error;
2084 		}
2085 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2086 
2087 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2088 		    + sc->sc_capa_max_probe_len
2089 		    + IWM_MAX_NUM_SCAN_CHANNELS
2090 		    * sizeof(struct iwm_scan_channel);
2091 		sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
2092 		    M_NOWAIT);
2093 		if (sc->sc_scan_cmd == NULL)
2094 			return (ENOMEM);
2095 
2096 		return 0;
2097 	}
2098 
2099 	/* Send TX valid antennas before triggering calibrations */
2100 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2101 		return error;
2102 
2103 	/*
2104 	* Send phy configurations command to init uCode
2105 	* to start the 16.0 uCode init image internal calibrations.
2106 	*/
2107 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2108 		device_printf(sc->sc_dev,
2109 		    "%s: failed to run internal calibration: %d\n",
2110 		    __func__, error);
2111 		return error;
2112 	}
2113 
2114 	/*
2115 	 * Nothing to do but wait for the init complete notification
2116 	 * from the firmware
2117 	 */
2118 	while (!sc->sc_init_complete)
2119 		if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2120 		    0, "iwminit", 2*hz)) != 0)
2121 			break;
2122 
2123 	return error;
2124 }
2125 
2126 /*
2127  * receive side
2128  */
2129 
2130 /* (re)stock rx ring, called at init-time and at runtime */
2131 static int
2132 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2133 {
2134 	struct iwm_rx_ring *ring = &sc->rxq;
2135 	struct iwm_rx_data *data = &ring->data[idx];
2136 	struct mbuf *m;
2137 	int error;
2138 	bus_addr_t paddr;
2139 
2140 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2141 	if (m == NULL)
2142 		return ENOBUFS;
2143 
2144 	if (data->m != NULL)
2145 		bus_dmamap_unload(ring->data_dmat, data->map);
2146 
2147 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2148 	error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2149 	if (error != 0) {
2150 		device_printf(sc->sc_dev,
2151 		    "%s: could not create RX buf DMA map, error %d\n",
2152 		    __func__, error);
2153 		goto fail;
2154 	}
2155 	data->m = m;
2156 	error = bus_dmamap_load(ring->data_dmat, data->map,
2157 	    mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2158 	    &paddr, BUS_DMA_NOWAIT);
2159 	if (error != 0 && error != EFBIG) {
2160 		device_printf(sc->sc_dev,
2161 		    "%s: can't not map mbuf, error %d\n", __func__,
2162 		    error);
2163 		goto fail;
2164 	}
2165 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2166 
2167 	/* Update RX descriptor. */
2168 	ring->desc[idx] = htole32(paddr >> 8);
2169 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2170 	    BUS_DMASYNC_PREWRITE);
2171 
2172 	return 0;
2173 fail:
2174 	return error;
2175 }
2176 
2177 /* iwlwifi: mvm/rx.c */
2178 #define IWM_RSSI_OFFSET 50
2179 static int
2180 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2181 {
2182 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2183 	uint32_t agc_a, agc_b;
2184 	uint32_t val;
2185 
2186 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2187 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2188 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2189 
2190 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2191 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2192 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2193 
2194 	/*
2195 	 * dBm = rssi dB - agc dB - constant.
2196 	 * Higher AGC (higher radio gain) means lower signal.
2197 	 */
2198 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2199 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2200 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2201 
2202 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2203 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2204 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2205 
2206 	return max_rssi_dbm;
2207 }
2208 
2209 /* iwlwifi: mvm/rx.c */
2210 /*
2211  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2212  * values are reported by the fw as positive values - need to negate
2213  * to obtain their dBM.  Account for missing antennas by replacing 0
2214  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2215  */
2216 static int
2217 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2218 {
2219 	int energy_a, energy_b, energy_c, max_energy;
2220 	uint32_t val;
2221 
2222 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2223 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2224 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2225 	energy_a = energy_a ? -energy_a : -256;
2226 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2227 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2228 	energy_b = energy_b ? -energy_b : -256;
2229 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2230 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2231 	energy_c = energy_c ? -energy_c : -256;
2232 	max_energy = MAX(energy_a, energy_b);
2233 	max_energy = MAX(max_energy, energy_c);
2234 
2235 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2236 	    "energy In A %d B %d C %d , and max %d\n",
2237 	    energy_a, energy_b, energy_c, max_energy);
2238 
2239 	return max_energy;
2240 }
2241 
2242 static void
2243 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2244 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2245 {
2246 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2247 
2248 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2249 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2250 
2251 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2252 }
2253 
2254 /*
2255  * Retrieve the average noise (in dBm) among receivers.
2256  */
2257 static int
2258 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2259 {
2260 	int i, total, nbant, noise;
2261 
2262 	total = nbant = noise = 0;
2263 	for (i = 0; i < 3; i++) {
2264 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2265 		if (noise) {
2266 			total += noise;
2267 			nbant++;
2268 		}
2269 	}
2270 
2271 	/* There should be at least one antenna but check anyway. */
2272 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2273 }
2274 
2275 /*
2276  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2277  *
2278  * Handles the actual data of the Rx packet from the fw
2279  */
2280 static void
2281 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2282 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2283 {
2284 	struct ieee80211com *ic = &sc->sc_ic;
2285 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2286 	struct ieee80211_frame *wh;
2287 	struct ieee80211_node *ni;
2288 	struct ieee80211_rx_stats rxs;
2289 	struct mbuf *m;
2290 	struct iwm_rx_phy_info *phy_info;
2291 	struct iwm_rx_mpdu_res_start *rx_res;
2292 	uint32_t len;
2293 	uint32_t rx_pkt_status;
2294 	int rssi;
2295 
2296 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2297 
2298 	phy_info = &sc->sc_last_phy_info;
2299 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2300 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2301 	len = le16toh(rx_res->byte_count);
2302 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2303 
2304 	m = data->m;
2305 	m->m_data = pkt->data + sizeof(*rx_res);
2306 	m->m_pkthdr.len = m->m_len = len;
2307 
2308 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2309 		device_printf(sc->sc_dev,
2310 		    "dsp size out of range [0,20]: %d\n",
2311 		    phy_info->cfg_phy_cnt);
2312 		return;
2313 	}
2314 
2315 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2316 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2317 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2318 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2319 		return; /* drop */
2320 	}
2321 
2322 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2323 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2324 	} else {
2325 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
2326 	}
2327 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
2328 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
2329 
2330 	/* replenish ring for the buffer we're going to feed to the sharks */
2331 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2332 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2333 		    __func__);
2334 		return;
2335 	}
2336 
2337 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2338 
2339 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2340 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
2341 	    __func__,
2342 	    le16toh(phy_info->channel),
2343 	    le16toh(phy_info->phy_flags));
2344 
2345 	/*
2346 	 * Populate an RX state struct with the provided information.
2347 	 */
2348 	bzero(&rxs, sizeof(rxs));
2349 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2350 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2351 	rxs.c_ieee = le16toh(phy_info->channel);
2352 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2353 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2354 	} else {
2355 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2356 	}
2357 	rxs.rssi = rssi - sc->sc_noise;
2358 	rxs.nf = sc->sc_noise;
2359 
2360 	if (ieee80211_radiotap_active_vap(vap)) {
2361 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2362 
2363 		tap->wr_flags = 0;
2364 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2365 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2366 		tap->wr_chan_freq = htole16(rxs.c_freq);
2367 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2368 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2369 		tap->wr_dbm_antsignal = (int8_t)rssi;
2370 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2371 		tap->wr_tsft = phy_info->system_timestamp;
2372 		switch (phy_info->rate) {
2373 		/* CCK rates. */
2374 		case  10: tap->wr_rate =   2; break;
2375 		case  20: tap->wr_rate =   4; break;
2376 		case  55: tap->wr_rate =  11; break;
2377 		case 110: tap->wr_rate =  22; break;
2378 		/* OFDM rates. */
2379 		case 0xd: tap->wr_rate =  12; break;
2380 		case 0xf: tap->wr_rate =  18; break;
2381 		case 0x5: tap->wr_rate =  24; break;
2382 		case 0x7: tap->wr_rate =  36; break;
2383 		case 0x9: tap->wr_rate =  48; break;
2384 		case 0xb: tap->wr_rate =  72; break;
2385 		case 0x1: tap->wr_rate =  96; break;
2386 		case 0x3: tap->wr_rate = 108; break;
2387 		/* Unknown rate: should not happen. */
2388 		default:  tap->wr_rate =   0;
2389 		}
2390 	}
2391 
2392 	IWM_UNLOCK(sc);
2393 	if (ni != NULL) {
2394 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2395 		ieee80211_input_mimo(ni, m, &rxs);
2396 		ieee80211_free_node(ni);
2397 	} else {
2398 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2399 		ieee80211_input_mimo_all(ic, m, &rxs);
2400 	}
2401 	IWM_LOCK(sc);
2402 }
2403 
2404 static int
2405 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2406 	struct iwm_node *in)
2407 {
2408 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2409 	struct ieee80211_node *ni = &in->in_ni;
2410 	struct ieee80211vap *vap = ni->ni_vap;
2411 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2412 	int failack = tx_resp->failure_frame;
2413 
2414 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2415 
2416 	/* Update rate control statistics. */
2417 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2418 	    __func__,
2419 	    (int) le16toh(tx_resp->status.status),
2420 	    (int) le16toh(tx_resp->status.sequence),
2421 	    tx_resp->frame_count,
2422 	    tx_resp->bt_kill_count,
2423 	    tx_resp->failure_rts,
2424 	    tx_resp->failure_frame,
2425 	    le32toh(tx_resp->initial_rate),
2426 	    (int) le16toh(tx_resp->wireless_media_time));
2427 
2428 	if (status != IWM_TX_STATUS_SUCCESS &&
2429 	    status != IWM_TX_STATUS_DIRECT_DONE) {
2430 		ieee80211_ratectl_tx_complete(vap, ni,
2431 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2432 		return (1);
2433 	} else {
2434 		ieee80211_ratectl_tx_complete(vap, ni,
2435 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2436 		return (0);
2437 	}
2438 }
2439 
2440 static void
2441 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2442 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2443 {
2444 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2445 	int idx = cmd_hdr->idx;
2446 	int qid = cmd_hdr->qid;
2447 	struct iwm_tx_ring *ring = &sc->txq[qid];
2448 	struct iwm_tx_data *txd = &ring->data[idx];
2449 	struct iwm_node *in = txd->in;
2450 	struct mbuf *m = txd->m;
2451 	int status;
2452 
2453 	KASSERT(txd->done == 0, ("txd not done"));
2454 	KASSERT(txd->in != NULL, ("txd without node"));
2455 	KASSERT(txd->m != NULL, ("txd without mbuf"));
2456 
2457 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2458 
2459 	sc->sc_tx_timer = 0;
2460 
2461 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2462 
2463 	/* Unmap and free mbuf. */
2464 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2465 	bus_dmamap_unload(ring->data_dmat, txd->map);
2466 
2467 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2468 	    "free txd %p, in %p\n", txd, txd->in);
2469 	txd->done = 1;
2470 	txd->m = NULL;
2471 	txd->in = NULL;
2472 
2473 	ieee80211_tx_complete(&in->in_ni, m, status);
2474 
2475 	if (--ring->queued < IWM_TX_RING_LOMARK) {
2476 		sc->qfullmsk &= ~(1 << ring->qid);
2477 		if (sc->qfullmsk == 0) {
2478 			/*
2479 			 * Well, we're in interrupt context, but then again
2480 			 * I guess net80211 does all sorts of stunts in
2481 			 * interrupt context, so maybe this is no biggie.
2482 			 */
2483 			iwm_start(sc);
2484 		}
2485 	}
2486 }
2487 
2488 /*
2489  * transmit side
2490  */
2491 
2492 /*
2493  * Process a "command done" firmware notification.  This is where we wakeup
2494  * processes waiting for a synchronous command completion.
2495  * from if_iwn
2496  */
2497 static void
2498 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2499 {
2500 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2501 	struct iwm_tx_data *data;
2502 
2503 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2504 		return;	/* Not a command ack. */
2505 	}
2506 
2507 	data = &ring->data[pkt->hdr.idx];
2508 
2509 	/* If the command was mapped in an mbuf, free it. */
2510 	if (data->m != NULL) {
2511 		bus_dmamap_sync(ring->data_dmat, data->map,
2512 		    BUS_DMASYNC_POSTWRITE);
2513 		bus_dmamap_unload(ring->data_dmat, data->map);
2514 		m_freem(data->m);
2515 		data->m = NULL;
2516 	}
2517 	wakeup(&ring->desc[pkt->hdr.idx]);
2518 }
2519 
2520 #if 0
2521 /*
2522  * necessary only for block ack mode
2523  */
2524 void
2525 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2526 	uint16_t len)
2527 {
2528 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2529 	uint16_t w_val;
2530 
2531 	scd_bc_tbl = sc->sched_dma.vaddr;
2532 
2533 	len += 8; /* magic numbers came naturally from paris */
2534 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2535 		len = roundup(len, 4) / 4;
2536 
2537 	w_val = htole16(sta_id << 12 | len);
2538 
2539 	/* Update TX scheduler. */
2540 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2541 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2542 	    BUS_DMASYNC_PREWRITE);
2543 
2544 	/* I really wonder what this is ?!? */
2545 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2546 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2547 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2548 		    BUS_DMASYNC_PREWRITE);
2549 	}
2550 }
2551 #endif
2552 
2553 /*
2554  * Take an 802.11 (non-n) rate, find the relevant rate
2555  * table entry.  return the index into in_ridx[].
2556  *
2557  * The caller then uses that index back into in_ridx
2558  * to figure out the rate index programmed /into/
2559  * the firmware for this given node.
2560  */
2561 static int
2562 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2563     uint8_t rate)
2564 {
2565 	int i;
2566 	uint8_t r;
2567 
2568 	for (i = 0; i < nitems(in->in_ridx); i++) {
2569 		r = iwm_rates[in->in_ridx[i]].rate;
2570 		if (rate == r)
2571 			return (i);
2572 	}
2573 	/* XXX Return the first */
2574 	/* XXX TODO: have it return the /lowest/ */
2575 	return (0);
2576 }
2577 
2578 /*
2579  * Fill in the rate related information for a transmit command.
2580  */
2581 static const struct iwm_rate *
2582 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2583 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2584 {
2585 	struct ieee80211com *ic = &sc->sc_ic;
2586 	struct ieee80211_node *ni = &in->in_ni;
2587 	const struct iwm_rate *rinfo;
2588 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2589 	int ridx, rate_flags;
2590 
2591 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2592 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2593 
2594 	/*
2595 	 * XXX TODO: everything about the rate selection here is terrible!
2596 	 */
2597 
2598 	if (type == IEEE80211_FC0_TYPE_DATA) {
2599 		int i;
2600 		/* for data frames, use RS table */
2601 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2602 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2603 		ridx = in->in_ridx[i];
2604 
2605 		/* This is the index into the programmed table */
2606 		tx->initial_rate_index = i;
2607 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2608 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2609 		    "%s: start with i=%d, txrate %d\n",
2610 		    __func__, i, iwm_rates[ridx].rate);
2611 	} else {
2612 		/*
2613 		 * For non-data, use the lowest supported rate for the given
2614 		 * operational mode.
2615 		 *
2616 		 * Note: there may not be any rate control information available.
2617 		 * This driver currently assumes if we're transmitting data
2618 		 * frames, use the rate control table.  Grr.
2619 		 *
2620 		 * XXX TODO: use the configured rate for the traffic type!
2621 		 * XXX TODO: this should be per-vap, not curmode; as we later
2622 		 * on we'll want to handle off-channel stuff (eg TDLS).
2623 		 */
2624 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
2625 			/*
2626 			 * XXX this assumes the mode is either 11a or not 11a;
2627 			 * definitely won't work for 11n.
2628 			 */
2629 			ridx = IWM_RIDX_OFDM;
2630 		} else {
2631 			ridx = IWM_RIDX_CCK;
2632 		}
2633 	}
2634 
2635 	rinfo = &iwm_rates[ridx];
2636 
2637 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2638 	    __func__, ridx,
2639 	    rinfo->rate,
2640 	    !! (IWM_RIDX_IS_CCK(ridx))
2641 	    );
2642 
2643 	/* XXX TODO: hard-coded TX antenna? */
2644 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2645 	if (IWM_RIDX_IS_CCK(ridx))
2646 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
2647 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2648 
2649 	return rinfo;
2650 }
2651 
2652 #define TB0_SIZE 16
2653 static int
2654 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2655 {
2656 	struct ieee80211com *ic = &sc->sc_ic;
2657 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2658 	struct iwm_node *in = IWM_NODE(ni);
2659 	struct iwm_tx_ring *ring;
2660 	struct iwm_tx_data *data;
2661 	struct iwm_tfd *desc;
2662 	struct iwm_device_cmd *cmd;
2663 	struct iwm_tx_cmd *tx;
2664 	struct ieee80211_frame *wh;
2665 	struct ieee80211_key *k = NULL;
2666 	struct mbuf *m1;
2667 	const struct iwm_rate *rinfo;
2668 	uint32_t flags;
2669 	u_int hdrlen;
2670 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2671 	int nsegs;
2672 	uint8_t tid, type;
2673 	int i, totlen, error, pad;
2674 
2675 	wh = mtod(m, struct ieee80211_frame *);
2676 	hdrlen = ieee80211_anyhdrsize(wh);
2677 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2678 	tid = 0;
2679 	ring = &sc->txq[ac];
2680 	desc = &ring->desc[ring->cur];
2681 	memset(desc, 0, sizeof(*desc));
2682 	data = &ring->data[ring->cur];
2683 
2684 	/* Fill out iwm_tx_cmd to send to the firmware */
2685 	cmd = &ring->cmd[ring->cur];
2686 	cmd->hdr.code = IWM_TX_CMD;
2687 	cmd->hdr.flags = 0;
2688 	cmd->hdr.qid = ring->qid;
2689 	cmd->hdr.idx = ring->cur;
2690 
2691 	tx = (void *)cmd->data;
2692 	memset(tx, 0, sizeof(*tx));
2693 
2694 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2695 
2696 	/* Encrypt the frame if need be. */
2697 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2698 		/* Retrieve key for TX && do software encryption. */
2699 		k = ieee80211_crypto_encap(ni, m);
2700 		if (k == NULL) {
2701 			m_freem(m);
2702 			return (ENOBUFS);
2703 		}
2704 		/* 802.11 header may have moved. */
2705 		wh = mtod(m, struct ieee80211_frame *);
2706 	}
2707 
2708 	if (ieee80211_radiotap_active_vap(vap)) {
2709 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2710 
2711 		tap->wt_flags = 0;
2712 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2713 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2714 		tap->wt_rate = rinfo->rate;
2715 		if (k != NULL)
2716 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2717 		ieee80211_radiotap_tx(vap, m);
2718 	}
2719 
2720 
2721 	totlen = m->m_pkthdr.len;
2722 
2723 	flags = 0;
2724 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2725 		flags |= IWM_TX_CMD_FLG_ACK;
2726 	}
2727 
2728 	if (type != IEEE80211_FC0_TYPE_DATA
2729 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2730 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2731 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2732 	}
2733 
2734 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2735 	    type != IEEE80211_FC0_TYPE_DATA)
2736 		tx->sta_id = sc->sc_aux_sta.sta_id;
2737 	else
2738 		tx->sta_id = IWM_STATION_ID;
2739 
2740 	if (type == IEEE80211_FC0_TYPE_MGT) {
2741 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2742 
2743 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2744 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2745 			tx->pm_frame_timeout = htole16(3);
2746 		else
2747 			tx->pm_frame_timeout = htole16(2);
2748 	} else {
2749 		tx->pm_frame_timeout = htole16(0);
2750 	}
2751 
2752 	if (hdrlen & 3) {
2753 		/* First segment length must be a multiple of 4. */
2754 		flags |= IWM_TX_CMD_FLG_MH_PAD;
2755 		pad = 4 - (hdrlen & 3);
2756 	} else
2757 		pad = 0;
2758 
2759 	tx->driver_txop = 0;
2760 	tx->next_frame_len = 0;
2761 
2762 	tx->len = htole16(totlen);
2763 	tx->tid_tspec = tid;
2764 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2765 
2766 	/* Set physical address of "scratch area". */
2767 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2768 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2769 
2770 	/* Copy 802.11 header in TX command. */
2771 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2772 
2773 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2774 
2775 	tx->sec_ctl = 0;
2776 	tx->tx_flags |= htole32(flags);
2777 
2778 	/* Trim 802.11 header. */
2779 	m_adj(m, hdrlen);
2780 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2781 	    segs, &nsegs, BUS_DMA_NOWAIT);
2782 	if (error != 0) {
2783 		if (error != EFBIG) {
2784 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2785 			    error);
2786 			m_freem(m);
2787 			return error;
2788 		}
2789 		/* Too many DMA segments, linearize mbuf. */
2790 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
2791 		if (m1 == NULL) {
2792 			device_printf(sc->sc_dev,
2793 			    "%s: could not defrag mbuf\n", __func__);
2794 			m_freem(m);
2795 			return (ENOBUFS);
2796 		}
2797 		m = m1;
2798 
2799 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2800 		    segs, &nsegs, BUS_DMA_NOWAIT);
2801 		if (error != 0) {
2802 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2803 			    error);
2804 			m_freem(m);
2805 			return error;
2806 		}
2807 	}
2808 	data->m = m;
2809 	data->in = in;
2810 	data->done = 0;
2811 
2812 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2813 	    "sending txd %p, in %p\n", data, data->in);
2814 	KASSERT(data->in != NULL, ("node is NULL"));
2815 
2816 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2817 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%d\n",
2818 	    ring->qid, ring->cur, totlen, nsegs,
2819 	    le32toh(tx->tx_flags),
2820 	    le32toh(tx->rate_n_flags),
2821 	    (int) tx->initial_rate_index
2822 	    );
2823 
2824 	/* Fill TX descriptor. */
2825 	desc->num_tbs = 2 + nsegs;
2826 
2827 	desc->tbs[0].lo = htole32(data->cmd_paddr);
2828 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2829 	    (TB0_SIZE << 4);
2830 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2831 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2832 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2833 	      + hdrlen + pad - TB0_SIZE) << 4);
2834 
2835 	/* Other DMA segments are for data payload. */
2836 	for (i = 0; i < nsegs; i++) {
2837 		seg = &segs[i];
2838 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
2839 		desc->tbs[i+2].hi_n_len = \
2840 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2841 		    | ((seg->ds_len) << 4);
2842 	}
2843 
2844 	bus_dmamap_sync(ring->data_dmat, data->map,
2845 	    BUS_DMASYNC_PREWRITE);
2846 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2847 	    BUS_DMASYNC_PREWRITE);
2848 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2849 	    BUS_DMASYNC_PREWRITE);
2850 
2851 #if 0
2852 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2853 #endif
2854 
2855 	/* Kick TX ring. */
2856 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2857 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2858 
2859 	/* Mark TX ring as full if we reach a certain threshold. */
2860 	if (++ring->queued > IWM_TX_RING_HIMARK) {
2861 		sc->qfullmsk |= 1 << ring->qid;
2862 	}
2863 
2864 	return 0;
2865 }
2866 
2867 static int
2868 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2869     const struct ieee80211_bpf_params *params)
2870 {
2871 	struct ieee80211com *ic = ni->ni_ic;
2872 	struct iwm_softc *sc = ic->ic_softc;
2873 	int error = 0;
2874 
2875 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2876 	    "->%s begin\n", __func__);
2877 
2878 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2879 		m_freem(m);
2880 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2881 		    "<-%s not RUNNING\n", __func__);
2882 		return (ENETDOWN);
2883         }
2884 
2885 	IWM_LOCK(sc);
2886 	/* XXX fix this */
2887         if (params == NULL) {
2888 		error = iwm_tx(sc, m, ni, 0);
2889 	} else {
2890 		error = iwm_tx(sc, m, ni, 0);
2891 	}
2892 	sc->sc_tx_timer = 5;
2893 	IWM_UNLOCK(sc);
2894 
2895         return (error);
2896 }
2897 
2898 /*
2899  * mvm/tx.c
2900  */
2901 
2902 #if 0
2903 /*
2904  * Note that there are transports that buffer frames before they reach
2905  * the firmware. This means that after flush_tx_path is called, the
2906  * queue might not be empty. The race-free way to handle this is to:
2907  * 1) set the station as draining
2908  * 2) flush the Tx path
2909  * 3) wait for the transport queues to be empty
2910  */
2911 int
2912 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
2913 {
2914 	struct iwm_tx_path_flush_cmd flush_cmd = {
2915 		.queues_ctl = htole32(tfd_msk),
2916 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
2917 	};
2918 	int ret;
2919 
2920 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
2921 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
2922 	    sizeof(flush_cmd), &flush_cmd);
2923 	if (ret)
2924                 device_printf(sc->sc_dev,
2925 		    "Flushing tx queue failed: %d\n", ret);
2926 	return ret;
2927 }
2928 #endif
2929 
2930 /*
2931  * BEGIN mvm/sta.c
2932  */
2933 
2934 static void
2935 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
2936 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
2937 {
2938 	memset(cmd_v5, 0, sizeof(*cmd_v5));
2939 
2940 	cmd_v5->add_modify = cmd_v6->add_modify;
2941 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
2942 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
2943 	IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
2944 	cmd_v5->sta_id = cmd_v6->sta_id;
2945 	cmd_v5->modify_mask = cmd_v6->modify_mask;
2946 	cmd_v5->station_flags = cmd_v6->station_flags;
2947 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
2948 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
2949 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
2950 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
2951 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
2952 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
2953 	cmd_v5->assoc_id = cmd_v6->assoc_id;
2954 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
2955 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
2956 }
2957 
2958 static int
2959 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
2960 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
2961 {
2962 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
2963 
2964 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
2965 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
2966 		    sizeof(*cmd), cmd, status);
2967 	}
2968 
2969 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
2970 
2971 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
2972 	    &cmd_v5, status);
2973 }
2974 
2975 /* send station add/update command to firmware */
2976 static int
2977 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
2978 {
2979 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
2980 	int ret;
2981 	uint32_t status;
2982 
2983 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
2984 
2985 	add_sta_cmd.sta_id = IWM_STATION_ID;
2986 	add_sta_cmd.mac_id_n_color
2987 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
2988 	        IWM_DEFAULT_COLOR));
2989 	if (!update) {
2990 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
2991 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
2992 	}
2993 	add_sta_cmd.add_modify = update ? 1 : 0;
2994 	add_sta_cmd.station_flags_msk
2995 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
2996 
2997 	status = IWM_ADD_STA_SUCCESS;
2998 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
2999 	if (ret)
3000 		return ret;
3001 
3002 	switch (status) {
3003 	case IWM_ADD_STA_SUCCESS:
3004 		break;
3005 	default:
3006 		ret = EIO;
3007 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3008 		break;
3009 	}
3010 
3011 	return ret;
3012 }
3013 
3014 static int
3015 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3016 {
3017 	int ret;
3018 
3019 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3020 	if (ret)
3021 		return ret;
3022 
3023 	return 0;
3024 }
3025 
3026 static int
3027 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3028 {
3029 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3030 }
3031 
3032 static int
3033 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3034 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3035 {
3036 	struct iwm_mvm_add_sta_cmd_v6 cmd;
3037 	int ret;
3038 	uint32_t status;
3039 
3040 	memset(&cmd, 0, sizeof(cmd));
3041 	cmd.sta_id = sta->sta_id;
3042 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3043 
3044 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3045 
3046 	if (addr)
3047 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3048 
3049 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3050 	if (ret)
3051 		return ret;
3052 
3053 	switch (status) {
3054 	case IWM_ADD_STA_SUCCESS:
3055 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3056 		    "%s: Internal station added.\n", __func__);
3057 		return 0;
3058 	default:
3059 		device_printf(sc->sc_dev,
3060 		    "%s: Add internal station failed, status=0x%x\n",
3061 		    __func__, status);
3062 		ret = EIO;
3063 		break;
3064 	}
3065 	return ret;
3066 }
3067 
3068 static int
3069 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3070 {
3071 	int ret;
3072 
3073 	sc->sc_aux_sta.sta_id = 3;
3074 	sc->sc_aux_sta.tfd_queue_msk = 0;
3075 
3076 	ret = iwm_mvm_add_int_sta_common(sc,
3077 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3078 
3079 	if (ret)
3080 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3081 	return ret;
3082 }
3083 
3084 /*
3085  * END mvm/sta.c
3086  */
3087 
3088 /*
3089  * BEGIN mvm/quota.c
3090  */
3091 
3092 static int
3093 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3094 {
3095 	struct iwm_time_quota_cmd cmd;
3096 	int i, idx, ret, num_active_macs, quota, quota_rem;
3097 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3098 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3099 	uint16_t id;
3100 
3101 	memset(&cmd, 0, sizeof(cmd));
3102 
3103 	/* currently, PHY ID == binding ID */
3104 	if (in) {
3105 		id = in->in_phyctxt->id;
3106 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3107 		colors[id] = in->in_phyctxt->color;
3108 
3109 		if (1)
3110 			n_ifs[id] = 1;
3111 	}
3112 
3113 	/*
3114 	 * The FW's scheduling session consists of
3115 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3116 	 * equally between all the bindings that require quota
3117 	 */
3118 	num_active_macs = 0;
3119 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3120 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3121 		num_active_macs += n_ifs[i];
3122 	}
3123 
3124 	quota = 0;
3125 	quota_rem = 0;
3126 	if (num_active_macs) {
3127 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3128 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3129 	}
3130 
3131 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3132 		if (colors[i] < 0)
3133 			continue;
3134 
3135 		cmd.quotas[idx].id_and_color =
3136 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3137 
3138 		if (n_ifs[i] <= 0) {
3139 			cmd.quotas[idx].quota = htole32(0);
3140 			cmd.quotas[idx].max_duration = htole32(0);
3141 		} else {
3142 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3143 			cmd.quotas[idx].max_duration = htole32(0);
3144 		}
3145 		idx++;
3146 	}
3147 
3148 	/* Give the remainder of the session to the first binding */
3149 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3150 
3151 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3152 	    sizeof(cmd), &cmd);
3153 	if (ret)
3154 		device_printf(sc->sc_dev,
3155 		    "%s: Failed to send quota: %d\n", __func__, ret);
3156 	return ret;
3157 }
3158 
3159 /*
3160  * END mvm/quota.c
3161  */
3162 
3163 /*
3164  * ieee80211 routines
3165  */
3166 
3167 /*
3168  * Change to AUTH state in 80211 state machine.  Roughly matches what
3169  * Linux does in bss_info_changed().
3170  */
3171 static int
3172 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3173 {
3174 	struct ieee80211_node *ni;
3175 	struct iwm_node *in;
3176 	struct iwm_vap *iv = IWM_VAP(vap);
3177 	uint32_t duration;
3178 	int error;
3179 
3180 	/*
3181 	 * XXX i have a feeling that the vap node is being
3182 	 * freed from underneath us. Grr.
3183 	 */
3184 	ni = ieee80211_ref_node(vap->iv_bss);
3185 	in = IWM_NODE(ni);
3186 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3187 	    "%s: called; vap=%p, bss ni=%p\n",
3188 	    __func__,
3189 	    vap,
3190 	    ni);
3191 
3192 	in->in_assoc = 0;
3193 
3194 	error = iwm_allow_mcast(vap, sc);
3195 	if (error) {
3196 		device_printf(sc->sc_dev,
3197 		    "%s: failed to set multicast\n", __func__);
3198 		goto out;
3199 	}
3200 
3201 	/*
3202 	 * This is where it deviates from what Linux does.
3203 	 *
3204 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3205 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3206 	 * and always does does a mac_ctx_changed().
3207 	 *
3208 	 * The openbsd port doesn't attempt to do that - it reset things
3209 	 * at odd states and does the add here.
3210 	 *
3211 	 * So, until the state handling is fixed (ie, we never reset
3212 	 * the NIC except for a firmware failure, which should drag
3213 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3214 	 * contexts that are required), let's do a dirty hack here.
3215 	 */
3216 	if (iv->is_uploaded) {
3217 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3218 			device_printf(sc->sc_dev,
3219 			    "%s: failed to update MAC\n", __func__);
3220 			goto out;
3221 		}
3222 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3223 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3224 			device_printf(sc->sc_dev,
3225 			    "%s: failed update phy ctxt\n", __func__);
3226 			goto out;
3227 		}
3228 		in->in_phyctxt = &sc->sc_phyctxt[0];
3229 
3230 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3231 			device_printf(sc->sc_dev,
3232 			    "%s: binding update cmd\n", __func__);
3233 			goto out;
3234 		}
3235 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3236 			device_printf(sc->sc_dev,
3237 			    "%s: failed to update sta\n", __func__);
3238 			goto out;
3239 		}
3240 	} else {
3241 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3242 			device_printf(sc->sc_dev,
3243 			    "%s: failed to add MAC\n", __func__);
3244 			goto out;
3245 		}
3246 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3247 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3248 			device_printf(sc->sc_dev,
3249 			    "%s: failed add phy ctxt!\n", __func__);
3250 			error = ETIMEDOUT;
3251 			goto out;
3252 		}
3253 		in->in_phyctxt = &sc->sc_phyctxt[0];
3254 
3255 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3256 			device_printf(sc->sc_dev,
3257 			    "%s: binding add cmd\n", __func__);
3258 			goto out;
3259 		}
3260 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3261 			device_printf(sc->sc_dev,
3262 			    "%s: failed to add sta\n", __func__);
3263 			goto out;
3264 		}
3265 	}
3266 
3267 	/*
3268 	 * Prevent the FW from wandering off channel during association
3269 	 * by "protecting" the session with a time event.
3270 	 */
3271 	/* XXX duration is in units of TU, not MS */
3272 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3273 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3274 	DELAY(100);
3275 
3276 	error = 0;
3277 out:
3278 	ieee80211_free_node(ni);
3279 	return (error);
3280 }
3281 
3282 static int
3283 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3284 {
3285 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3286 	int error;
3287 
3288 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3289 		device_printf(sc->sc_dev,
3290 		    "%s: failed to update STA\n", __func__);
3291 		return error;
3292 	}
3293 
3294 	in->in_assoc = 1;
3295 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3296 		device_printf(sc->sc_dev,
3297 		    "%s: failed to update MAC\n", __func__);
3298 		return error;
3299 	}
3300 
3301 	return 0;
3302 }
3303 
3304 static int
3305 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3306 {
3307 	/*
3308 	 * Ok, so *technically* the proper set of calls for going
3309 	 * from RUN back to SCAN is:
3310 	 *
3311 	 * iwm_mvm_power_mac_disable(sc, in);
3312 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3313 	 * iwm_mvm_rm_sta(sc, in);
3314 	 * iwm_mvm_update_quotas(sc, NULL);
3315 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3316 	 * iwm_mvm_binding_remove_vif(sc, in);
3317 	 * iwm_mvm_mac_ctxt_remove(sc, in);
3318 	 *
3319 	 * However, that freezes the device not matter which permutations
3320 	 * and modifications are attempted.  Obviously, this driver is missing
3321 	 * something since it works in the Linux driver, but figuring out what
3322 	 * is missing is a little more complicated.  Now, since we're going
3323 	 * back to nothing anyway, we'll just do a complete device reset.
3324 	 * Up your's, device!
3325 	 */
3326 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
3327 	iwm_stop_device(sc);
3328 	iwm_init_hw(sc);
3329 	if (in)
3330 		in->in_assoc = 0;
3331 	return 0;
3332 
3333 #if 0
3334 	int error;
3335 
3336 	iwm_mvm_power_mac_disable(sc, in);
3337 
3338 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3339 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3340 		return error;
3341 	}
3342 
3343 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3344 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3345 		return error;
3346 	}
3347 	error = iwm_mvm_rm_sta(sc, in);
3348 	in->in_assoc = 0;
3349 	iwm_mvm_update_quotas(sc, NULL);
3350 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3351 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3352 		return error;
3353 	}
3354 	iwm_mvm_binding_remove_vif(sc, in);
3355 
3356 	iwm_mvm_mac_ctxt_remove(sc, in);
3357 
3358 	return error;
3359 #endif
3360 }
3361 
3362 static struct ieee80211_node *
3363 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3364 {
3365 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
3366 	    M_NOWAIT | M_ZERO);
3367 }
3368 
3369 static void
3370 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3371 {
3372 	struct ieee80211_node *ni = &in->in_ni;
3373 	struct iwm_lq_cmd *lq = &in->in_lq;
3374 	int nrates = ni->ni_rates.rs_nrates;
3375 	int i, ridx, tab = 0;
3376 	int txant = 0;
3377 
3378 	if (nrates > nitems(lq->rs_table)) {
3379 		device_printf(sc->sc_dev,
3380 		    "%s: node supports %d rates, driver handles "
3381 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3382 		return;
3383 	}
3384 	if (nrates == 0) {
3385 		device_printf(sc->sc_dev,
3386 		    "%s: node supports 0 rates, odd!\n", __func__);
3387 		return;
3388 	}
3389 
3390 	/*
3391 	 * XXX .. and most of iwm_node is not initialised explicitly;
3392 	 * it's all just 0x0 passed to the firmware.
3393 	 */
3394 
3395 	/* first figure out which rates we should support */
3396 	/* XXX TODO: this isn't 11n aware /at all/ */
3397 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3398 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3399 	    "%s: nrates=%d\n", __func__, nrates);
3400 
3401 	/*
3402 	 * Loop over nrates and populate in_ridx from the highest
3403 	 * rate to the lowest rate.  Remember, in_ridx[] has
3404 	 * IEEE80211_RATE_MAXSIZE entries!
3405 	 */
3406 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3407 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3408 
3409 		/* Map 802.11 rate to HW rate index. */
3410 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3411 			if (iwm_rates[ridx].rate == rate)
3412 				break;
3413 		if (ridx > IWM_RIDX_MAX) {
3414 			device_printf(sc->sc_dev,
3415 			    "%s: WARNING: device rate for %d not found!\n",
3416 			    __func__, rate);
3417 		} else {
3418 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3419 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
3420 			    __func__,
3421 			    i,
3422 			    rate,
3423 			    ridx);
3424 			in->in_ridx[i] = ridx;
3425 		}
3426 	}
3427 
3428 	/* then construct a lq_cmd based on those */
3429 	memset(lq, 0, sizeof(*lq));
3430 	lq->sta_id = IWM_STATION_ID;
3431 
3432 	/*
3433 	 * are these used? (we don't do SISO or MIMO)
3434 	 * need to set them to non-zero, though, or we get an error.
3435 	 */
3436 	lq->single_stream_ant_msk = 1;
3437 	lq->dual_stream_ant_msk = 1;
3438 
3439 	/*
3440 	 * Build the actual rate selection table.
3441 	 * The lowest bits are the rates.  Additionally,
3442 	 * CCK needs bit 9 to be set.  The rest of the bits
3443 	 * we add to the table select the tx antenna
3444 	 * Note that we add the rates in the highest rate first
3445 	 * (opposite of ni_rates).
3446 	 */
3447 	/*
3448 	 * XXX TODO: this should be looping over the min of nrates
3449 	 * and LQ_MAX_RETRY_NUM.  Sigh.
3450 	 */
3451 	for (i = 0; i < nrates; i++) {
3452 		int nextant;
3453 
3454 		if (txant == 0)
3455 			txant = IWM_FW_VALID_TX_ANT(sc);
3456 		nextant = 1<<(ffs(txant)-1);
3457 		txant &= ~nextant;
3458 
3459 		/*
3460 		 * Map the rate id into a rate index into
3461 		 * our hardware table containing the
3462 		 * configuration to use for this rate.
3463 		 */
3464 		ridx = in->in_ridx[i];
3465 		tab = iwm_rates[ridx].plcp;
3466 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
3467 		if (IWM_RIDX_IS_CCK(ridx))
3468 			tab |= IWM_RATE_MCS_CCK_MSK;
3469 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3470 		    "station rate i=%d, rate=%d, hw=%x\n",
3471 		    i, iwm_rates[ridx].rate, tab);
3472 		lq->rs_table[i] = htole32(tab);
3473 	}
3474 	/* then fill the rest with the lowest possible rate */
3475 	for (i = nrates; i < nitems(lq->rs_table); i++) {
3476 		KASSERT(tab != 0, ("invalid tab"));
3477 		lq->rs_table[i] = htole32(tab);
3478 	}
3479 }
3480 
3481 static int
3482 iwm_media_change(struct ifnet *ifp)
3483 {
3484 	struct ieee80211vap *vap = ifp->if_softc;
3485 	struct ieee80211com *ic = vap->iv_ic;
3486 	struct iwm_softc *sc = ic->ic_softc;
3487 	int error;
3488 
3489 	error = ieee80211_media_change(ifp);
3490 	if (error != ENETRESET)
3491 		return error;
3492 
3493 	IWM_LOCK(sc);
3494 	if (ic->ic_nrunning > 0) {
3495 		iwm_stop(sc);
3496 		iwm_init(sc);
3497 	}
3498 	IWM_UNLOCK(sc);
3499 	return error;
3500 }
3501 
3502 
3503 static int
3504 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3505 {
3506 	struct iwm_vap *ivp = IWM_VAP(vap);
3507 	struct ieee80211com *ic = vap->iv_ic;
3508 	struct iwm_softc *sc = ic->ic_softc;
3509 	struct iwm_node *in;
3510 	int error;
3511 
3512 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3513 	    "switching state %s -> %s\n",
3514 	    ieee80211_state_name[vap->iv_state],
3515 	    ieee80211_state_name[nstate]);
3516 	IEEE80211_UNLOCK(ic);
3517 	IWM_LOCK(sc);
3518 	/* disable beacon filtering if we're hopping out of RUN */
3519 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3520 		iwm_mvm_disable_beacon_filter(sc);
3521 
3522 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3523 			in->in_assoc = 0;
3524 
3525 		iwm_release(sc, NULL);
3526 
3527 		/*
3528 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3529 		 * above then the card will be completely reinitialized,
3530 		 * so the driver must do everything necessary to bring the card
3531 		 * from INIT to SCAN.
3532 		 *
3533 		 * Additionally, upon receiving deauth frame from AP,
3534 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3535 		 * state. This will also fail with this driver, so bring the FSM
3536 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3537 		 *
3538 		 * XXX TODO: fix this for FreeBSD!
3539 		 */
3540 		if (nstate == IEEE80211_S_SCAN ||
3541 		    nstate == IEEE80211_S_AUTH ||
3542 		    nstate == IEEE80211_S_ASSOC) {
3543 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3544 			    "Force transition to INIT; MGT=%d\n", arg);
3545 			IWM_UNLOCK(sc);
3546 			IEEE80211_LOCK(ic);
3547 			vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3548 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3549 			    "Going INIT->SCAN\n");
3550 			nstate = IEEE80211_S_SCAN;
3551 			IEEE80211_UNLOCK(ic);
3552 			IWM_LOCK(sc);
3553 		}
3554 	}
3555 
3556 	switch (nstate) {
3557 	case IEEE80211_S_INIT:
3558 		sc->sc_scanband = 0;
3559 		break;
3560 
3561 	case IEEE80211_S_AUTH:
3562 		if ((error = iwm_auth(vap, sc)) != 0) {
3563 			device_printf(sc->sc_dev,
3564 			    "%s: could not move to auth state: %d\n",
3565 			    __func__, error);
3566 			break;
3567 		}
3568 		break;
3569 
3570 	case IEEE80211_S_ASSOC:
3571 		if ((error = iwm_assoc(vap, sc)) != 0) {
3572 			device_printf(sc->sc_dev,
3573 			    "%s: failed to associate: %d\n", __func__,
3574 			    error);
3575 			break;
3576 		}
3577 		break;
3578 
3579 	case IEEE80211_S_RUN:
3580 	{
3581 		struct iwm_host_cmd cmd = {
3582 			.id = IWM_LQ_CMD,
3583 			.len = { sizeof(in->in_lq), },
3584 			.flags = IWM_CMD_SYNC,
3585 		};
3586 
3587 		/* Update the association state, now we have it all */
3588 		/* (eg associd comes in at this point */
3589 		error = iwm_assoc(vap, sc);
3590 		if (error != 0) {
3591 			device_printf(sc->sc_dev,
3592 			    "%s: failed to update association state: %d\n",
3593 			    __func__,
3594 			    error);
3595 			break;
3596 		}
3597 
3598 		in = IWM_NODE(vap->iv_bss);
3599 		iwm_mvm_power_mac_update_mode(sc, in);
3600 		iwm_mvm_enable_beacon_filter(sc, in);
3601 		iwm_mvm_update_quotas(sc, in);
3602 		iwm_setrates(sc, in);
3603 
3604 		cmd.data[0] = &in->in_lq;
3605 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3606 			device_printf(sc->sc_dev,
3607 			    "%s: IWM_LQ_CMD failed\n", __func__);
3608 		}
3609 
3610 		break;
3611 	}
3612 
3613 	default:
3614 		break;
3615 	}
3616 	IWM_UNLOCK(sc);
3617 	IEEE80211_LOCK(ic);
3618 
3619 	return (ivp->iv_newstate(vap, nstate, arg));
3620 }
3621 
3622 void
3623 iwm_endscan_cb(void *arg, int pending)
3624 {
3625 	struct iwm_softc *sc = arg;
3626 	struct ieee80211com *ic = &sc->sc_ic;
3627 	int done;
3628 	int error;
3629 
3630 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3631 	    "%s: scan ended\n",
3632 	    __func__);
3633 
3634 	IWM_LOCK(sc);
3635 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3636 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
3637 		done = 0;
3638 		if ((error = iwm_mvm_scan_request(sc,
3639 		    IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3640 			device_printf(sc->sc_dev, "could not initiate scan\n");
3641 			done = 1;
3642 		}
3643 	} else {
3644 		done = 1;
3645 	}
3646 
3647 	if (done) {
3648 		IWM_UNLOCK(sc);
3649 		ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3650 		IWM_LOCK(sc);
3651 		sc->sc_scanband = 0;
3652 	}
3653 	IWM_UNLOCK(sc);
3654 }
3655 
3656 static int
3657 iwm_init_hw(struct iwm_softc *sc)
3658 {
3659 	struct ieee80211com *ic = &sc->sc_ic;
3660 	int error, i, qid;
3661 
3662 	if ((error = iwm_start_hw(sc)) != 0)
3663 		return error;
3664 
3665 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3666 		return error;
3667 	}
3668 
3669 	/*
3670 	 * should stop and start HW since that INIT
3671 	 * image just loaded
3672 	 */
3673 	iwm_stop_device(sc);
3674 	if ((error = iwm_start_hw(sc)) != 0) {
3675 		device_printf(sc->sc_dev, "could not initialize hardware\n");
3676 		return error;
3677 	}
3678 
3679 	/* omstart, this time with the regular firmware */
3680 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3681 	if (error) {
3682 		device_printf(sc->sc_dev, "could not load firmware\n");
3683 		goto error;
3684 	}
3685 
3686 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
3687 		goto error;
3688 
3689 	/* Send phy db control command and then phy db calibration*/
3690 	if ((error = iwm_send_phy_db_data(sc)) != 0)
3691 		goto error;
3692 
3693 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
3694 		goto error;
3695 
3696 	/* Add auxiliary station for scanning */
3697 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
3698 		goto error;
3699 
3700 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3701 		/*
3702 		 * The channel used here isn't relevant as it's
3703 		 * going to be overwritten in the other flows.
3704 		 * For now use the first channel we have.
3705 		 */
3706 		if ((error = iwm_mvm_phy_ctxt_add(sc,
3707 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3708 			goto error;
3709 	}
3710 
3711 	error = iwm_mvm_power_update_device(sc);
3712 	if (error)
3713 		goto error;
3714 
3715 	/* Mark TX rings as active. */
3716 	for (qid = 0; qid < 4; qid++) {
3717 		iwm_enable_txq(sc, qid, qid);
3718 	}
3719 
3720 	return 0;
3721 
3722  error:
3723 	iwm_stop_device(sc);
3724 	return error;
3725 }
3726 
3727 /* Allow multicast from our BSSID. */
3728 static int
3729 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3730 {
3731 	struct ieee80211_node *ni = vap->iv_bss;
3732 	struct iwm_mcast_filter_cmd *cmd;
3733 	size_t size;
3734 	int error;
3735 
3736 	size = roundup(sizeof(*cmd), 4);
3737 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3738 	if (cmd == NULL)
3739 		return ENOMEM;
3740 	cmd->filter_own = 1;
3741 	cmd->port_id = 0;
3742 	cmd->count = 0;
3743 	cmd->pass_all = 1;
3744 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3745 
3746 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3747 	    IWM_CMD_SYNC, size, cmd);
3748 	free(cmd, M_DEVBUF);
3749 
3750 	return (error);
3751 }
3752 
3753 static void
3754 iwm_init(struct iwm_softc *sc)
3755 {
3756 	int error;
3757 
3758 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3759 		return;
3760 	}
3761 	sc->sc_generation++;
3762 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
3763 
3764 	if ((error = iwm_init_hw(sc)) != 0) {
3765 		iwm_stop(sc);
3766 		return;
3767 	}
3768 
3769 	/*
3770  	 * Ok, firmware loaded and we are jogging
3771 	 */
3772 	sc->sc_flags |= IWM_FLAG_HW_INITED;
3773 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3774 }
3775 
3776 static int
3777 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3778 {
3779 	struct iwm_softc *sc;
3780 	int error;
3781 
3782 	sc = ic->ic_softc;
3783 
3784 	IWM_LOCK(sc);
3785 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3786 		IWM_UNLOCK(sc);
3787 		return (ENXIO);
3788 	}
3789 	error = mbufq_enqueue(&sc->sc_snd, m);
3790 	if (error) {
3791 		IWM_UNLOCK(sc);
3792 		return (error);
3793 	}
3794 	iwm_start(sc);
3795 	IWM_UNLOCK(sc);
3796 	return (0);
3797 }
3798 
3799 /*
3800  * Dequeue packets from sendq and call send.
3801  */
3802 static void
3803 iwm_start(struct iwm_softc *sc)
3804 {
3805 	struct ieee80211_node *ni;
3806 	struct mbuf *m;
3807 	int ac = 0;
3808 
3809 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3810 	while (sc->qfullmsk == 0 &&
3811 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3812 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3813 		if (iwm_tx(sc, m, ni, ac) != 0) {
3814 			if_inc_counter(ni->ni_vap->iv_ifp,
3815 			    IFCOUNTER_OERRORS, 1);
3816 			ieee80211_free_node(ni);
3817 			continue;
3818 		}
3819 		sc->sc_tx_timer = 15;
3820 	}
3821 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3822 }
3823 
3824 static void
3825 iwm_stop(struct iwm_softc *sc)
3826 {
3827 
3828 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3829 	sc->sc_flags |= IWM_FLAG_STOPPED;
3830 	sc->sc_generation++;
3831 	sc->sc_scanband = 0;
3832 	sc->sc_auth_prot = 0;
3833 	sc->sc_tx_timer = 0;
3834 	iwm_stop_device(sc);
3835 }
3836 
3837 static void
3838 iwm_watchdog(void *arg)
3839 {
3840 	struct iwm_softc *sc = arg;
3841 	struct ieee80211com *ic = &sc->sc_ic;
3842 
3843 	if (sc->sc_tx_timer > 0) {
3844 		if (--sc->sc_tx_timer == 0) {
3845 			device_printf(sc->sc_dev, "device timeout\n");
3846 #ifdef IWM_DEBUG
3847 			iwm_nic_error(sc);
3848 #endif
3849 			ieee80211_restart_all(ic);
3850 			counter_u64_add(ic->ic_oerrors, 1);
3851 			return;
3852 		}
3853 	}
3854 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3855 }
3856 
3857 static void
3858 iwm_parent(struct ieee80211com *ic)
3859 {
3860 	struct iwm_softc *sc = ic->ic_softc;
3861 	int startall = 0;
3862 
3863 	IWM_LOCK(sc);
3864 	if (ic->ic_nrunning > 0) {
3865 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3866 			iwm_init(sc);
3867 			startall = 1;
3868 		}
3869 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3870 		iwm_stop(sc);
3871 	IWM_UNLOCK(sc);
3872 	if (startall)
3873 		ieee80211_start_all(ic);
3874 }
3875 
3876 /*
3877  * The interrupt side of things
3878  */
3879 
3880 /*
3881  * error dumping routines are from iwlwifi/mvm/utils.c
3882  */
3883 
3884 /*
3885  * Note: This structure is read from the device with IO accesses,
3886  * and the reading already does the endian conversion. As it is
3887  * read with uint32_t-sized accesses, any members with a different size
3888  * need to be ordered correctly though!
3889  */
3890 struct iwm_error_event_table {
3891 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
3892 	uint32_t error_id;		/* type of error */
3893 	uint32_t pc;			/* program counter */
3894 	uint32_t blink1;		/* branch link */
3895 	uint32_t blink2;		/* branch link */
3896 	uint32_t ilink1;		/* interrupt link */
3897 	uint32_t ilink2;		/* interrupt link */
3898 	uint32_t data1;		/* error-specific data */
3899 	uint32_t data2;		/* error-specific data */
3900 	uint32_t data3;		/* error-specific data */
3901 	uint32_t bcon_time;		/* beacon timer */
3902 	uint32_t tsf_low;		/* network timestamp function timer */
3903 	uint32_t tsf_hi;		/* network timestamp function timer */
3904 	uint32_t gp1;		/* GP1 timer register */
3905 	uint32_t gp2;		/* GP2 timer register */
3906 	uint32_t gp3;		/* GP3 timer register */
3907 	uint32_t ucode_ver;		/* uCode version */
3908 	uint32_t hw_ver;		/* HW Silicon version */
3909 	uint32_t brd_ver;		/* HW board version */
3910 	uint32_t log_pc;		/* log program counter */
3911 	uint32_t frame_ptr;		/* frame pointer */
3912 	uint32_t stack_ptr;		/* stack pointer */
3913 	uint32_t hcmd;		/* last host command header */
3914 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
3915 				 * rxtx_flag */
3916 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
3917 				 * host_flag */
3918 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
3919 				 * enc_flag */
3920 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
3921 				 * time_flag */
3922 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
3923 				 * wico interrupt */
3924 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
3925 	uint32_t wait_event;		/* wait event() caller address */
3926 	uint32_t l2p_control;	/* L2pControlField */
3927 	uint32_t l2p_duration;	/* L2pDurationField */
3928 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
3929 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
3930 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
3931 				 * (LMPM_PMG_SEL) */
3932 	uint32_t u_timestamp;	/* indicate when the date and time of the
3933 				 * compilation */
3934 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
3935 } __packed;
3936 
3937 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
3938 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
3939 
3940 #ifdef IWM_DEBUG
3941 struct {
3942 	const char *name;
3943 	uint8_t num;
3944 } advanced_lookup[] = {
3945 	{ "NMI_INTERRUPT_WDG", 0x34 },
3946 	{ "SYSASSERT", 0x35 },
3947 	{ "UCODE_VERSION_MISMATCH", 0x37 },
3948 	{ "BAD_COMMAND", 0x38 },
3949 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
3950 	{ "FATAL_ERROR", 0x3D },
3951 	{ "NMI_TRM_HW_ERR", 0x46 },
3952 	{ "NMI_INTERRUPT_TRM", 0x4C },
3953 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
3954 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
3955 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
3956 	{ "NMI_INTERRUPT_HOST", 0x66 },
3957 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
3958 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
3959 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
3960 	{ "ADVANCED_SYSASSERT", 0 },
3961 };
3962 
3963 static const char *
3964 iwm_desc_lookup(uint32_t num)
3965 {
3966 	int i;
3967 
3968 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
3969 		if (advanced_lookup[i].num == num)
3970 			return advanced_lookup[i].name;
3971 
3972 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
3973 	return advanced_lookup[i].name;
3974 }
3975 
3976 /*
3977  * Support for dumping the error log seemed like a good idea ...
3978  * but it's mostly hex junk and the only sensible thing is the
3979  * hw/ucode revision (which we know anyway).  Since it's here,
3980  * I'll just leave it in, just in case e.g. the Intel guys want to
3981  * help us decipher some "ADVANCED_SYSASSERT" later.
3982  */
3983 static void
3984 iwm_nic_error(struct iwm_softc *sc)
3985 {
3986 	struct iwm_error_event_table table;
3987 	uint32_t base;
3988 
3989 	device_printf(sc->sc_dev, "dumping device error log\n");
3990 	base = sc->sc_uc.uc_error_event_table;
3991 	if (base < 0x800000 || base >= 0x80C000) {
3992 		device_printf(sc->sc_dev,
3993 		    "Not valid error log pointer 0x%08x\n", base);
3994 		return;
3995 	}
3996 
3997 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
3998 		device_printf(sc->sc_dev, "reading errlog failed\n");
3999 		return;
4000 	}
4001 
4002 	if (!table.valid) {
4003 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
4004 		return;
4005 	}
4006 
4007 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4008 		device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4009 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4010 		    sc->sc_flags, table.valid);
4011 	}
4012 
4013 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4014 		iwm_desc_lookup(table.error_id));
4015 	device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4016 	device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4017 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4018 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4019 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4020 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4021 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4022 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4023 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4024 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4025 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4026 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4027 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4028 	device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4029 	device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4030 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4031 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4032 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4033 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4034 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4035 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4036 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4037 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4038 	device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4039 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4040 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4041 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4042 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4043 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4044 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4045 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4046 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4047 }
4048 #endif
4049 
4050 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
4051 do {									\
4052 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4053 	_var_ = (void *)((_pkt_)+1);					\
4054 } while (/*CONSTCOND*/0)
4055 
4056 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
4057 do {									\
4058 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4059 	_ptr_ = (void *)((_pkt_)+1);					\
4060 } while (/*CONSTCOND*/0)
4061 
4062 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4063 
4064 /*
4065  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4066  * Basic structure from if_iwn
4067  */
4068 static void
4069 iwm_notif_intr(struct iwm_softc *sc)
4070 {
4071 	uint16_t hw;
4072 
4073 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4074 	    BUS_DMASYNC_POSTREAD);
4075 
4076 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4077 	while (sc->rxq.cur != hw) {
4078 		struct iwm_rx_ring *ring = &sc->rxq;
4079 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4080 		struct iwm_rx_packet *pkt;
4081 		struct iwm_cmd_response *cresp;
4082 		int qid, idx;
4083 
4084 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4085 		    BUS_DMASYNC_POSTREAD);
4086 		pkt = mtod(data->m, struct iwm_rx_packet *);
4087 
4088 		qid = pkt->hdr.qid & ~0x80;
4089 		idx = pkt->hdr.idx;
4090 
4091 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4092 		    "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4093 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4094 		    pkt->hdr.code, sc->rxq.cur, hw);
4095 
4096 		/*
4097 		 * randomly get these from the firmware, no idea why.
4098 		 * they at least seem harmless, so just ignore them for now
4099 		 */
4100 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4101 		    || pkt->len_n_flags == htole32(0x55550000))) {
4102 			ADVANCE_RXQ(sc);
4103 			continue;
4104 		}
4105 
4106 		switch (pkt->hdr.code) {
4107 		case IWM_REPLY_RX_PHY_CMD:
4108 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4109 			break;
4110 
4111 		case IWM_REPLY_RX_MPDU_CMD:
4112 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4113 			break;
4114 
4115 		case IWM_TX_CMD:
4116 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
4117 			break;
4118 
4119 		case IWM_MISSED_BEACONS_NOTIFICATION: {
4120 			struct iwm_missed_beacons_notif *resp;
4121 			int missed;
4122 
4123 			/* XXX look at mac_id to determine interface ID */
4124 			struct ieee80211com *ic = &sc->sc_ic;
4125 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4126 
4127 			SYNC_RESP_STRUCT(resp, pkt);
4128 			missed = le32toh(resp->consec_missed_beacons);
4129 
4130 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4131 			    "%s: MISSED_BEACON: mac_id=%d, "
4132 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4133 			    "num_rx=%d\n",
4134 			    __func__,
4135 			    le32toh(resp->mac_id),
4136 			    le32toh(resp->consec_missed_beacons_since_last_rx),
4137 			    le32toh(resp->consec_missed_beacons),
4138 			    le32toh(resp->num_expected_beacons),
4139 			    le32toh(resp->num_recvd_beacons));
4140 
4141 			/* Be paranoid */
4142 			if (vap == NULL)
4143 				break;
4144 
4145 			/* XXX no net80211 locking? */
4146 			if (vap->iv_state == IEEE80211_S_RUN &&
4147 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4148 				if (missed > vap->iv_bmissthreshold) {
4149 					/* XXX bad locking; turn into task */
4150 					IWM_UNLOCK(sc);
4151 					ieee80211_beacon_miss(ic);
4152 					IWM_LOCK(sc);
4153 				}
4154 			}
4155 
4156 			break; }
4157 
4158 		case IWM_MVM_ALIVE: {
4159 			struct iwm_mvm_alive_resp *resp;
4160 			SYNC_RESP_STRUCT(resp, pkt);
4161 
4162 			sc->sc_uc.uc_error_event_table
4163 			    = le32toh(resp->error_event_table_ptr);
4164 			sc->sc_uc.uc_log_event_table
4165 			    = le32toh(resp->log_event_table_ptr);
4166 			sc->sched_base = le32toh(resp->scd_base_ptr);
4167 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4168 
4169 			sc->sc_uc.uc_intr = 1;
4170 			wakeup(&sc->sc_uc);
4171 			break; }
4172 
4173 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
4174 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
4175 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
4176 
4177 			iwm_phy_db_set_section(sc, phy_db_notif);
4178 
4179 			break; }
4180 
4181 		case IWM_STATISTICS_NOTIFICATION: {
4182 			struct iwm_notif_statistics *stats;
4183 			SYNC_RESP_STRUCT(stats, pkt);
4184 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4185 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
4186 			break; }
4187 
4188 		case IWM_NVM_ACCESS_CMD:
4189 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4190 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4191 				    BUS_DMASYNC_POSTREAD);
4192 				memcpy(sc->sc_cmd_resp,
4193 				    pkt, sizeof(sc->sc_cmd_resp));
4194 			}
4195 			break;
4196 
4197 		case IWM_PHY_CONFIGURATION_CMD:
4198 		case IWM_TX_ANT_CONFIGURATION_CMD:
4199 		case IWM_ADD_STA:
4200 		case IWM_MAC_CONTEXT_CMD:
4201 		case IWM_REPLY_SF_CFG_CMD:
4202 		case IWM_POWER_TABLE_CMD:
4203 		case IWM_PHY_CONTEXT_CMD:
4204 		case IWM_BINDING_CONTEXT_CMD:
4205 		case IWM_TIME_EVENT_CMD:
4206 		case IWM_SCAN_REQUEST_CMD:
4207 		case IWM_REPLY_BEACON_FILTERING_CMD:
4208 		case IWM_MAC_PM_POWER_TABLE:
4209 		case IWM_TIME_QUOTA_CMD:
4210 		case IWM_REMOVE_STA:
4211 		case IWM_TXPATH_FLUSH:
4212 		case IWM_LQ_CMD:
4213 			SYNC_RESP_STRUCT(cresp, pkt);
4214 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4215 				memcpy(sc->sc_cmd_resp,
4216 				    pkt, sizeof(*pkt)+sizeof(*cresp));
4217 			}
4218 			break;
4219 
4220 		/* ignore */
4221 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4222 			break;
4223 
4224 		case IWM_INIT_COMPLETE_NOTIF:
4225 			sc->sc_init_complete = 1;
4226 			wakeup(&sc->sc_init_complete);
4227 			break;
4228 
4229 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
4230 			struct iwm_scan_complete_notif *notif;
4231 			SYNC_RESP_STRUCT(notif, pkt);
4232 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4233 			break; }
4234 
4235 		case IWM_REPLY_ERROR: {
4236 			struct iwm_error_resp *resp;
4237 			SYNC_RESP_STRUCT(resp, pkt);
4238 
4239 			device_printf(sc->sc_dev,
4240 			    "firmware error 0x%x, cmd 0x%x\n",
4241 			    le32toh(resp->error_type),
4242 			    resp->cmd_id);
4243 			break; }
4244 
4245 		case IWM_TIME_EVENT_NOTIFICATION: {
4246 			struct iwm_time_event_notif *notif;
4247 			SYNC_RESP_STRUCT(notif, pkt);
4248 
4249 			if (notif->status) {
4250 				if (le32toh(notif->action) &
4251 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
4252 					sc->sc_auth_prot = 2;
4253 				else
4254 					sc->sc_auth_prot = 0;
4255 			} else {
4256 				sc->sc_auth_prot = -1;
4257 			}
4258 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4259 			    "%s: time event notification auth_prot=%d\n",
4260 				__func__, sc->sc_auth_prot);
4261 
4262 			wakeup(&sc->sc_auth_prot);
4263 			break; }
4264 
4265 		case IWM_MCAST_FILTER_CMD:
4266 			break;
4267 
4268 		default:
4269 			device_printf(sc->sc_dev,
4270 			    "frame %d/%d %x UNHANDLED (this should "
4271 			    "not happen)\n", qid, idx,
4272 			    pkt->len_n_flags);
4273 			break;
4274 		}
4275 
4276 		/*
4277 		 * Why test bit 0x80?  The Linux driver:
4278 		 *
4279 		 * There is one exception:  uCode sets bit 15 when it
4280 		 * originates the response/notification, i.e. when the
4281 		 * response/notification is not a direct response to a
4282 		 * command sent by the driver.  For example, uCode issues
4283 		 * IWM_REPLY_RX when it sends a received frame to the driver;
4284 		 * it is not a direct response to any driver command.
4285 		 *
4286 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
4287 		 * uses a slightly different format for pkt->hdr, and "qid"
4288 		 * is actually the upper byte of a two-byte field.
4289 		 */
4290 		if (!(pkt->hdr.qid & (1 << 7))) {
4291 			iwm_cmd_done(sc, pkt);
4292 		}
4293 
4294 		ADVANCE_RXQ(sc);
4295 	}
4296 
4297 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4298 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4299 
4300 	/*
4301 	 * Tell the firmware what we have processed.
4302 	 * Seems like the hardware gets upset unless we align
4303 	 * the write by 8??
4304 	 */
4305 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4306 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4307 }
4308 
4309 static void
4310 iwm_intr(void *arg)
4311 {
4312 	struct iwm_softc *sc = arg;
4313 	int handled = 0;
4314 	int r1, r2, rv = 0;
4315 	int isperiodic = 0;
4316 
4317 	IWM_LOCK(sc);
4318 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4319 
4320 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4321 		uint32_t *ict = sc->ict_dma.vaddr;
4322 		int tmp;
4323 
4324 		tmp = htole32(ict[sc->ict_cur]);
4325 		if (!tmp)
4326 			goto out_ena;
4327 
4328 		/*
4329 		 * ok, there was something.  keep plowing until we have all.
4330 		 */
4331 		r1 = r2 = 0;
4332 		while (tmp) {
4333 			r1 |= tmp;
4334 			ict[sc->ict_cur] = 0;
4335 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4336 			tmp = htole32(ict[sc->ict_cur]);
4337 		}
4338 
4339 		/* this is where the fun begins.  don't ask */
4340 		if (r1 == 0xffffffff)
4341 			r1 = 0;
4342 
4343 		/* i am not expected to understand this */
4344 		if (r1 & 0xc0000)
4345 			r1 |= 0x8000;
4346 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4347 	} else {
4348 		r1 = IWM_READ(sc, IWM_CSR_INT);
4349 		/* "hardware gone" (where, fishing?) */
4350 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4351 			goto out;
4352 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4353 	}
4354 	if (r1 == 0 && r2 == 0) {
4355 		goto out_ena;
4356 	}
4357 
4358 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4359 
4360 	/* ignored */
4361 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4362 
4363 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4364 		int i;
4365 		struct ieee80211com *ic = &sc->sc_ic;
4366 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4367 
4368 #ifdef IWM_DEBUG
4369 		iwm_nic_error(sc);
4370 #endif
4371 		/* Dump driver status (TX and RX rings) while we're here. */
4372 		device_printf(sc->sc_dev, "driver status:\n");
4373 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4374 			struct iwm_tx_ring *ring = &sc->txq[i];
4375 			device_printf(sc->sc_dev,
4376 			    "  tx ring %2d: qid=%-2d cur=%-3d "
4377 			    "queued=%-3d\n",
4378 			    i, ring->qid, ring->cur, ring->queued);
4379 		}
4380 		device_printf(sc->sc_dev,
4381 		    "  rx ring: cur=%d\n", sc->rxq.cur);
4382 		device_printf(sc->sc_dev,
4383 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4384 
4385 		/* Don't stop the device; just do a VAP restart */
4386 		IWM_UNLOCK(sc);
4387 
4388 		if (vap == NULL) {
4389 			printf("%s: null vap\n", __func__);
4390 			return;
4391 		}
4392 
4393 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4394 		    "restarting\n", __func__, vap->iv_state);
4395 
4396 		/* XXX TODO: turn this into a callout/taskqueue */
4397 		ieee80211_restart_all(ic);
4398 		return;
4399 	}
4400 
4401 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4402 		handled |= IWM_CSR_INT_BIT_HW_ERR;
4403 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
4404 		iwm_stop(sc);
4405 		rv = 1;
4406 		goto out;
4407 	}
4408 
4409 	/* firmware chunk loaded */
4410 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4411 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4412 		handled |= IWM_CSR_INT_BIT_FH_TX;
4413 		sc->sc_fw_chunk_done = 1;
4414 		wakeup(&sc->sc_fw);
4415 	}
4416 
4417 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4418 		handled |= IWM_CSR_INT_BIT_RF_KILL;
4419 		if (iwm_check_rfkill(sc)) {
4420 			device_printf(sc->sc_dev,
4421 			    "%s: rfkill switch, disabling interface\n",
4422 			    __func__);
4423 			iwm_stop(sc);
4424 		}
4425 	}
4426 
4427 	/*
4428 	 * The Linux driver uses periodic interrupts to avoid races.
4429 	 * We cargo-cult like it's going out of fashion.
4430 	 */
4431 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4432 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4433 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4434 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4435 			IWM_WRITE_1(sc,
4436 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4437 		isperiodic = 1;
4438 	}
4439 
4440 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4441 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4442 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4443 
4444 		iwm_notif_intr(sc);
4445 
4446 		/* enable periodic interrupt, see above */
4447 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4448 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4449 			    IWM_CSR_INT_PERIODIC_ENA);
4450 	}
4451 
4452 	if (__predict_false(r1 & ~handled))
4453 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4454 		    "%s: unhandled interrupts: %x\n", __func__, r1);
4455 	rv = 1;
4456 
4457  out_ena:
4458 	iwm_restore_interrupts(sc);
4459  out:
4460 	IWM_UNLOCK(sc);
4461 	return;
4462 }
4463 
4464 /*
4465  * Autoconf glue-sniffing
4466  */
4467 #define	PCI_VENDOR_INTEL		0x8086
4468 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
4469 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
4470 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
4471 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
4472 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
4473 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
4474 
4475 static const struct iwm_devices {
4476 	uint16_t	device;
4477 	const char	*name;
4478 } iwm_devices[] = {
4479 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4480 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4481 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4482 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4483 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4484 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4485 };
4486 
4487 static int
4488 iwm_probe(device_t dev)
4489 {
4490 	int i;
4491 
4492 	for (i = 0; i < nitems(iwm_devices); i++)
4493 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4494 		    pci_get_device(dev) == iwm_devices[i].device) {
4495 			device_set_desc(dev, iwm_devices[i].name);
4496 			return (BUS_PROBE_DEFAULT);
4497 		}
4498 
4499 	return (ENXIO);
4500 }
4501 
4502 static int
4503 iwm_dev_check(device_t dev)
4504 {
4505 	struct iwm_softc *sc;
4506 
4507 	sc = device_get_softc(dev);
4508 
4509 	switch (pci_get_device(dev)) {
4510 	case PCI_PRODUCT_INTEL_WL_3160_1:
4511 	case PCI_PRODUCT_INTEL_WL_3160_2:
4512 		sc->sc_fwname = "iwm3160fw";
4513 		sc->host_interrupt_operation_mode = 1;
4514 		return (0);
4515 	case PCI_PRODUCT_INTEL_WL_7260_1:
4516 	case PCI_PRODUCT_INTEL_WL_7260_2:
4517 		sc->sc_fwname = "iwm7260fw";
4518 		sc->host_interrupt_operation_mode = 1;
4519 		return (0);
4520 	case PCI_PRODUCT_INTEL_WL_7265_1:
4521 	case PCI_PRODUCT_INTEL_WL_7265_2:
4522 		sc->sc_fwname = "iwm7265fw";
4523 		sc->host_interrupt_operation_mode = 0;
4524 		return (0);
4525 	default:
4526 		device_printf(dev, "unknown adapter type\n");
4527 		return ENXIO;
4528 	}
4529 }
4530 
4531 static int
4532 iwm_pci_attach(device_t dev)
4533 {
4534 	struct iwm_softc *sc;
4535 	int count, error, rid;
4536 	uint16_t reg;
4537 
4538 	sc = device_get_softc(dev);
4539 
4540 	/* Clear device-specific "PCI retry timeout" register (41h). */
4541 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4542 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4543 
4544 	/* Enable bus-mastering and hardware bug workaround. */
4545 	pci_enable_busmaster(dev);
4546 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4547 	/* if !MSI */
4548 	if (reg & PCIM_STATUS_INTxSTATE) {
4549 		reg &= ~PCIM_STATUS_INTxSTATE;
4550 	}
4551 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4552 
4553 	rid = PCIR_BAR(0);
4554 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4555 	    RF_ACTIVE);
4556 	if (sc->sc_mem == NULL) {
4557 		device_printf(sc->sc_dev, "can't map mem space\n");
4558 		return (ENXIO);
4559 	}
4560 	sc->sc_st = rman_get_bustag(sc->sc_mem);
4561 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4562 
4563 	/* Install interrupt handler. */
4564 	count = 1;
4565 	rid = 0;
4566 	if (pci_alloc_msi(dev, &count) == 0)
4567 		rid = 1;
4568 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4569 	    (rid != 0 ? 0 : RF_SHAREABLE));
4570 	if (sc->sc_irq == NULL) {
4571 		device_printf(dev, "can't map interrupt\n");
4572 			return (ENXIO);
4573 	}
4574 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4575 	    NULL, iwm_intr, sc, &sc->sc_ih);
4576 	if (sc->sc_ih == NULL) {
4577 		device_printf(dev, "can't establish interrupt");
4578 			return (ENXIO);
4579 	}
4580 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4581 
4582 	return (0);
4583 }
4584 
4585 static void
4586 iwm_pci_detach(device_t dev)
4587 {
4588 	struct iwm_softc *sc = device_get_softc(dev);
4589 
4590 	if (sc->sc_irq != NULL) {
4591 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4592 		bus_release_resource(dev, SYS_RES_IRQ,
4593 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
4594 		pci_release_msi(dev);
4595         }
4596 	if (sc->sc_mem != NULL)
4597 		bus_release_resource(dev, SYS_RES_MEMORY,
4598 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
4599 }
4600 
4601 
4602 
4603 static int
4604 iwm_attach(device_t dev)
4605 {
4606 	struct iwm_softc *sc = device_get_softc(dev);
4607 	struct ieee80211com *ic = &sc->sc_ic;
4608 	int error;
4609 	int txq_i, i;
4610 
4611 	sc->sc_dev = dev;
4612 	IWM_LOCK_INIT(sc);
4613 	mbufq_init(&sc->sc_snd, ifqmaxlen);
4614 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4615 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4616 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4617             taskqueue_thread_enqueue, &sc->sc_tq);
4618         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4619         if (error != 0) {
4620                 device_printf(dev, "can't start threads, error %d\n",
4621 		    error);
4622 		goto fail;
4623         }
4624 
4625 	/* PCI attach */
4626 	error = iwm_pci_attach(dev);
4627 	if (error != 0)
4628 		goto fail;
4629 
4630 	sc->sc_wantresp = -1;
4631 
4632 	/* Check device type */
4633 	error = iwm_dev_check(dev);
4634 	if (error != 0)
4635 		goto fail;
4636 
4637 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4638 
4639 	/*
4640 	 * We now start fiddling with the hardware
4641 	 */
4642 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4643 	if (iwm_prepare_card_hw(sc) != 0) {
4644 		device_printf(dev, "could not initialize hardware\n");
4645 		goto fail;
4646 	}
4647 
4648 	/* Allocate DMA memory for firmware transfers. */
4649 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
4650 		device_printf(dev, "could not allocate memory for firmware\n");
4651 		goto fail;
4652 	}
4653 
4654 	/* Allocate "Keep Warm" page. */
4655 	if ((error = iwm_alloc_kw(sc)) != 0) {
4656 		device_printf(dev, "could not allocate keep warm page\n");
4657 		goto fail;
4658 	}
4659 
4660 	/* We use ICT interrupts */
4661 	if ((error = iwm_alloc_ict(sc)) != 0) {
4662 		device_printf(dev, "could not allocate ICT table\n");
4663 		goto fail;
4664 	}
4665 
4666 	/* Allocate TX scheduler "rings". */
4667 	if ((error = iwm_alloc_sched(sc)) != 0) {
4668 		device_printf(dev, "could not allocate TX scheduler rings\n");
4669 		goto fail;
4670 	}
4671 
4672 	/* Allocate TX rings */
4673 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4674 		if ((error = iwm_alloc_tx_ring(sc,
4675 		    &sc->txq[txq_i], txq_i)) != 0) {
4676 			device_printf(dev,
4677 			    "could not allocate TX ring %d\n",
4678 			    txq_i);
4679 			goto fail;
4680 		}
4681 	}
4682 
4683 	/* Allocate RX ring. */
4684 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4685 		device_printf(dev, "could not allocate RX ring\n");
4686 		goto fail;
4687 	}
4688 
4689 	/* Clear pending interrupts. */
4690 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4691 
4692 	ic->ic_softc = sc;
4693 	ic->ic_name = device_get_nameunit(sc->sc_dev);
4694 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
4695 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
4696 
4697 	/* Set device capabilities. */
4698 	ic->ic_caps =
4699 	    IEEE80211_C_STA |
4700 	    IEEE80211_C_WPA |		/* WPA/RSN */
4701 	    IEEE80211_C_WME |
4702 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
4703 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
4704 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
4705 	    ;
4706 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4707 		sc->sc_phyctxt[i].id = i;
4708 		sc->sc_phyctxt[i].color = 0;
4709 		sc->sc_phyctxt[i].ref = 0;
4710 		sc->sc_phyctxt[i].channel = NULL;
4711 	}
4712 
4713 	/* Max RSSI */
4714 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4715 	sc->sc_preinit_hook.ich_func = iwm_preinit;
4716 	sc->sc_preinit_hook.ich_arg = sc;
4717 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4718 		device_printf(dev, "config_intrhook_establish failed\n");
4719 		goto fail;
4720 	}
4721 
4722 #ifdef IWM_DEBUG
4723 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4724 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4725 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4726 #endif
4727 
4728 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4729 	    "<-%s\n", __func__);
4730 
4731 	return 0;
4732 
4733 	/* Free allocated memory if something failed during attachment. */
4734 fail:
4735 	iwm_detach_local(sc, 0);
4736 
4737 	return ENXIO;
4738 }
4739 
4740 static int
4741 iwm_update_edca(struct ieee80211com *ic)
4742 {
4743 	struct iwm_softc *sc = ic->ic_softc;
4744 
4745 	device_printf(sc->sc_dev, "%s: called\n", __func__);
4746 	return (0);
4747 }
4748 
4749 static void
4750 iwm_preinit(void *arg)
4751 {
4752 	struct iwm_softc *sc = arg;
4753 	device_t dev = sc->sc_dev;
4754 	struct ieee80211com *ic = &sc->sc_ic;
4755 	int error;
4756 
4757 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4758 	    "->%s\n", __func__);
4759 
4760 	IWM_LOCK(sc);
4761 	if ((error = iwm_start_hw(sc)) != 0) {
4762 		device_printf(dev, "could not initialize hardware\n");
4763 		IWM_UNLOCK(sc);
4764 		goto fail;
4765 	}
4766 
4767 	error = iwm_run_init_mvm_ucode(sc, 1);
4768 	iwm_stop_device(sc);
4769 	if (error) {
4770 		IWM_UNLOCK(sc);
4771 		goto fail;
4772 	}
4773 	device_printf(dev,
4774 	    "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
4775 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4776 	    IWM_UCODE_MAJOR(sc->sc_fwver),
4777 	    IWM_UCODE_MINOR(sc->sc_fwver),
4778 	    IWM_UCODE_API(sc->sc_fwver));
4779 
4780 	/* not all hardware can do 5GHz band */
4781 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4782 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4783 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4784 	IWM_UNLOCK(sc);
4785 
4786 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4787 	    ic->ic_channels);
4788 
4789 	/*
4790 	 * At this point we've committed - if we fail to do setup,
4791 	 * we now also have to tear down the net80211 state.
4792 	 */
4793 	ieee80211_ifattach(ic);
4794 	ic->ic_vap_create = iwm_vap_create;
4795 	ic->ic_vap_delete = iwm_vap_delete;
4796 	ic->ic_raw_xmit = iwm_raw_xmit;
4797 	ic->ic_node_alloc = iwm_node_alloc;
4798 	ic->ic_scan_start = iwm_scan_start;
4799 	ic->ic_scan_end = iwm_scan_end;
4800 	ic->ic_update_mcast = iwm_update_mcast;
4801 	ic->ic_getradiocaps = iwm_init_channel_map;
4802 	ic->ic_set_channel = iwm_set_channel;
4803 	ic->ic_scan_curchan = iwm_scan_curchan;
4804 	ic->ic_scan_mindwell = iwm_scan_mindwell;
4805 	ic->ic_wme.wme_update = iwm_update_edca;
4806 	ic->ic_parent = iwm_parent;
4807 	ic->ic_transmit = iwm_transmit;
4808 	iwm_radiotap_attach(sc);
4809 	if (bootverbose)
4810 		ieee80211_announce(ic);
4811 
4812 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4813 	    "<-%s\n", __func__);
4814 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4815 
4816 	return;
4817 fail:
4818 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4819 	iwm_detach_local(sc, 0);
4820 }
4821 
4822 /*
4823  * Attach the interface to 802.11 radiotap.
4824  */
4825 static void
4826 iwm_radiotap_attach(struct iwm_softc *sc)
4827 {
4828         struct ieee80211com *ic = &sc->sc_ic;
4829 
4830 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4831 	    "->%s begin\n", __func__);
4832         ieee80211_radiotap_attach(ic,
4833             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4834                 IWM_TX_RADIOTAP_PRESENT,
4835             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4836                 IWM_RX_RADIOTAP_PRESENT);
4837 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4838 	    "->%s end\n", __func__);
4839 }
4840 
4841 static struct ieee80211vap *
4842 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4843     enum ieee80211_opmode opmode, int flags,
4844     const uint8_t bssid[IEEE80211_ADDR_LEN],
4845     const uint8_t mac[IEEE80211_ADDR_LEN])
4846 {
4847 	struct iwm_vap *ivp;
4848 	struct ieee80211vap *vap;
4849 
4850 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
4851 		return NULL;
4852 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
4853 	vap = &ivp->iv_vap;
4854 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
4855 	vap->iv_bmissthreshold = 10;            /* override default */
4856 	/* Override with driver methods. */
4857 	ivp->iv_newstate = vap->iv_newstate;
4858 	vap->iv_newstate = iwm_newstate;
4859 
4860 	ieee80211_ratectl_init(vap);
4861 	/* Complete setup. */
4862 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
4863 	    mac);
4864 	ic->ic_opmode = opmode;
4865 
4866 	return vap;
4867 }
4868 
4869 static void
4870 iwm_vap_delete(struct ieee80211vap *vap)
4871 {
4872 	struct iwm_vap *ivp = IWM_VAP(vap);
4873 
4874 	ieee80211_ratectl_deinit(vap);
4875 	ieee80211_vap_detach(vap);
4876 	free(ivp, M_80211_VAP);
4877 }
4878 
4879 static void
4880 iwm_scan_start(struct ieee80211com *ic)
4881 {
4882 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4883         struct iwm_softc *sc = ic->ic_softc;
4884 	int error;
4885 
4886 	if (sc->sc_scanband)
4887 		return;
4888 	IWM_LOCK(sc);
4889 	error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
4890 	if (error) {
4891 		device_printf(sc->sc_dev, "could not initiate scan\n");
4892 		IWM_UNLOCK(sc);
4893 		ieee80211_cancel_scan(vap);
4894 	} else
4895 		IWM_UNLOCK(sc);
4896 }
4897 
4898 static void
4899 iwm_scan_end(struct ieee80211com *ic)
4900 {
4901 }
4902 
4903 static void
4904 iwm_update_mcast(struct ieee80211com *ic)
4905 {
4906 }
4907 
4908 static void
4909 iwm_set_channel(struct ieee80211com *ic)
4910 {
4911 }
4912 
4913 static void
4914 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
4915 {
4916 }
4917 
4918 static void
4919 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
4920 {
4921 	return;
4922 }
4923 
4924 void
4925 iwm_init_task(void *arg1)
4926 {
4927 	struct iwm_softc *sc = arg1;
4928 
4929 	IWM_LOCK(sc);
4930 	while (sc->sc_flags & IWM_FLAG_BUSY)
4931 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
4932 	sc->sc_flags |= IWM_FLAG_BUSY;
4933 	iwm_stop(sc);
4934 	if (sc->sc_ic.ic_nrunning > 0)
4935 		iwm_init(sc);
4936 	sc->sc_flags &= ~IWM_FLAG_BUSY;
4937 	wakeup(&sc->sc_flags);
4938 	IWM_UNLOCK(sc);
4939 }
4940 
4941 static int
4942 iwm_resume(device_t dev)
4943 {
4944 	struct iwm_softc *sc = device_get_softc(dev);
4945 	int do_reinit = 0;
4946 	uint16_t reg;
4947 
4948 	/* Clear device-specific "PCI retry timeout" register (41h). */
4949 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4950 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4951 	iwm_init_task(device_get_softc(dev));
4952 
4953 	IWM_LOCK(sc);
4954 	if (sc->sc_flags & IWM_FLAG_DORESUME) {
4955 		sc->sc_flags &= ~IWM_FLAG_DORESUME;
4956 		do_reinit = 1;
4957 	}
4958 	IWM_UNLOCK(sc);
4959 
4960 	if (do_reinit)
4961 		ieee80211_resume_all(&sc->sc_ic);
4962 
4963 	return 0;
4964 }
4965 
4966 static int
4967 iwm_suspend(device_t dev)
4968 {
4969 	int do_stop = 0;
4970 	struct iwm_softc *sc = device_get_softc(dev);
4971 
4972 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
4973 
4974 	ieee80211_suspend_all(&sc->sc_ic);
4975 
4976 	if (do_stop) {
4977 		IWM_LOCK(sc);
4978 		iwm_stop(sc);
4979 		sc->sc_flags |= IWM_FLAG_DORESUME;
4980 		IWM_UNLOCK(sc);
4981 	}
4982 
4983 	return (0);
4984 }
4985 
4986 static int
4987 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
4988 {
4989 	struct iwm_fw_info *fw = &sc->sc_fw;
4990 	device_t dev = sc->sc_dev;
4991 	int i;
4992 
4993 	if (sc->sc_tq) {
4994 		taskqueue_drain_all(sc->sc_tq);
4995 		taskqueue_free(sc->sc_tq);
4996 	}
4997 	callout_drain(&sc->sc_watchdog_to);
4998 	iwm_stop_device(sc);
4999 	if (do_net80211)
5000 		ieee80211_ifdetach(&sc->sc_ic);
5001 
5002 	/* Free descriptor rings */
5003 	for (i = 0; i < nitems(sc->txq); i++)
5004 		iwm_free_tx_ring(sc, &sc->txq[i]);
5005 
5006 	/* Free firmware */
5007 	if (fw->fw_fp != NULL)
5008 		iwm_fw_info_free(fw);
5009 
5010 	/* Free scheduler */
5011 	iwm_free_sched(sc);
5012 	if (sc->ict_dma.vaddr != NULL)
5013 		iwm_free_ict(sc);
5014 	if (sc->kw_dma.vaddr != NULL)
5015 		iwm_free_kw(sc);
5016 	if (sc->fw_dma.vaddr != NULL)
5017 		iwm_free_fwmem(sc);
5018 
5019 	/* Finished with the hardware - detach things */
5020 	iwm_pci_detach(dev);
5021 
5022 	mbufq_drain(&sc->sc_snd);
5023 	IWM_LOCK_DESTROY(sc);
5024 
5025 	return (0);
5026 }
5027 
5028 static int
5029 iwm_detach(device_t dev)
5030 {
5031 	struct iwm_softc *sc = device_get_softc(dev);
5032 
5033 	return (iwm_detach_local(sc, 1));
5034 }
5035 
5036 static device_method_t iwm_pci_methods[] = {
5037         /* Device interface */
5038         DEVMETHOD(device_probe,         iwm_probe),
5039         DEVMETHOD(device_attach,        iwm_attach),
5040         DEVMETHOD(device_detach,        iwm_detach),
5041         DEVMETHOD(device_suspend,       iwm_suspend),
5042         DEVMETHOD(device_resume,        iwm_resume),
5043 
5044         DEVMETHOD_END
5045 };
5046 
5047 static driver_t iwm_pci_driver = {
5048         "iwm",
5049         iwm_pci_methods,
5050         sizeof (struct iwm_softc)
5051 };
5052 
5053 static devclass_t iwm_devclass;
5054 
5055 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5056 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5057 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5058 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
5059