xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 55620f43deef5c0eb5b4b0f675de18b30c8d1c2d)
1 /*	$OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126 
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130 
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133 
134 #include <net/bpf.h>
135 
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142 
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147 
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152 
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164 
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167 
168 const uint8_t iwm_nvm_channels[] = {
169 	/* 2.4 GHz */
170 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171 	/* 5 GHz */
172 	36, 40, 44, 48, 52, 56, 60, 64,
173 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174 	149, 153, 157, 161, 165
175 };
176 #define IWM_NUM_2GHZ_CHANNELS	14
177 
178 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
179     "IWM_NUM_CHANNELS is too small");
180 
181 /*
182  * XXX For now, there's simply a fixed set of rate table entries
183  * that are populated.
184  */
185 const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 } iwm_rates[] = {
189 	{   2,	IWM_RATE_1M_PLCP  },
190 	{   4,	IWM_RATE_2M_PLCP  },
191 	{  11,	IWM_RATE_5M_PLCP  },
192 	{  22,	IWM_RATE_11M_PLCP },
193 	{  12,	IWM_RATE_6M_PLCP  },
194 	{  18,	IWM_RATE_9M_PLCP  },
195 	{  24,	IWM_RATE_12M_PLCP },
196 	{  36,	IWM_RATE_18M_PLCP },
197 	{  48,	IWM_RATE_24M_PLCP },
198 	{  72,	IWM_RATE_36M_PLCP },
199 	{  96,	IWM_RATE_48M_PLCP },
200 	{ 108,	IWM_RATE_54M_PLCP },
201 };
202 #define IWM_RIDX_CCK	0
203 #define IWM_RIDX_OFDM	4
204 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
205 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
206 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
207 
208 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
209 static int	iwm_firmware_store_section(struct iwm_softc *,
210                                            enum iwm_ucode_type,
211                                            const uint8_t *, size_t);
212 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
213 static void	iwm_fw_info_free(struct iwm_fw_info *);
214 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
215 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
216 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
217                                      bus_size_t, bus_size_t);
218 static void	iwm_dma_contig_free(struct iwm_dma_info *);
219 static int	iwm_alloc_fwmem(struct iwm_softc *);
220 static void	iwm_free_fwmem(struct iwm_softc *);
221 static int	iwm_alloc_sched(struct iwm_softc *);
222 static void	iwm_free_sched(struct iwm_softc *);
223 static int	iwm_alloc_kw(struct iwm_softc *);
224 static void	iwm_free_kw(struct iwm_softc *);
225 static int	iwm_alloc_ict(struct iwm_softc *);
226 static void	iwm_free_ict(struct iwm_softc *);
227 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
228 static void	iwm_disable_rx_dma(struct iwm_softc *);
229 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
230 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
231 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
232                                   int);
233 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
234 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
235 static void	iwm_enable_interrupts(struct iwm_softc *);
236 static void	iwm_restore_interrupts(struct iwm_softc *);
237 static void	iwm_disable_interrupts(struct iwm_softc *);
238 static void	iwm_ict_reset(struct iwm_softc *);
239 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
240 static void	iwm_stop_device(struct iwm_softc *);
241 static void	iwm_mvm_nic_config(struct iwm_softc *);
242 static int	iwm_nic_rx_init(struct iwm_softc *);
243 static int	iwm_nic_tx_init(struct iwm_softc *);
244 static int	iwm_nic_init(struct iwm_softc *);
245 static void	iwm_enable_txq(struct iwm_softc *, int, int);
246 static int	iwm_post_alive(struct iwm_softc *);
247 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
248                                    uint16_t, uint8_t *, uint16_t *);
249 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
250 				     uint16_t *);
251 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
252 static void	iwm_add_channel_band(struct iwm_softc *,
253 		    struct ieee80211_channel[], int, int *, int, int,
254 		    const uint8_t[]);
255 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
256 		    struct ieee80211_channel[]);
257 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
258 			           const uint16_t *, const uint16_t *, uint8_t,
259 				   uint8_t);
260 struct iwm_nvm_section;
261 static int	iwm_parse_nvm_sections(struct iwm_softc *,
262                                        struct iwm_nvm_section *);
263 static int	iwm_nvm_init(struct iwm_softc *);
264 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
265                                         const uint8_t *, uint32_t);
266 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
267 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
268 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
269 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
270 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
271                                               enum iwm_ucode_type);
272 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
273 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
274 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
275 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
276 					    struct iwm_rx_phy_info *);
277 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
278                                       struct iwm_rx_packet *,
279                                       struct iwm_rx_data *);
280 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
281 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
282                                    struct iwm_rx_data *);
283 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
284                                          struct iwm_rx_packet *,
285 				         struct iwm_node *);
286 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
287                                   struct iwm_rx_data *);
288 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
289 #if 0
290 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
291                                  uint16_t);
292 #endif
293 static const struct iwm_rate *
294 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
295 			struct ieee80211_frame *, struct iwm_tx_cmd *);
296 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
297                        struct ieee80211_node *, int);
298 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
299 			     const struct ieee80211_bpf_params *);
300 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
301 					     struct iwm_mvm_add_sta_cmd_v5 *);
302 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
303 					        struct iwm_mvm_add_sta_cmd_v6 *,
304                                                 int *);
305 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
306                                        int);
307 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
308 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
309 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
310                                            struct iwm_int_sta *,
311 				           const uint8_t *, uint16_t, uint16_t);
312 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
313 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
314 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
315 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
316 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
317 static struct ieee80211_node *
318 		iwm_node_alloc(struct ieee80211vap *,
319 		               const uint8_t[IEEE80211_ADDR_LEN]);
320 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
321 static int	iwm_media_change(struct ifnet *);
322 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
323 static void	iwm_endscan_cb(void *, int);
324 static int	iwm_init_hw(struct iwm_softc *);
325 static void	iwm_init(struct iwm_softc *);
326 static void	iwm_start(struct iwm_softc *);
327 static void	iwm_stop(struct iwm_softc *);
328 static void	iwm_watchdog(void *);
329 static void	iwm_parent(struct ieee80211com *);
330 #ifdef IWM_DEBUG
331 static const char *
332 		iwm_desc_lookup(uint32_t);
333 static void	iwm_nic_error(struct iwm_softc *);
334 #endif
335 static void	iwm_notif_intr(struct iwm_softc *);
336 static void	iwm_intr(void *);
337 static int	iwm_attach(device_t);
338 static void	iwm_preinit(void *);
339 static int	iwm_detach_local(struct iwm_softc *sc, int);
340 static void	iwm_init_task(void *);
341 static void	iwm_radiotap_attach(struct iwm_softc *);
342 static struct ieee80211vap *
343 		iwm_vap_create(struct ieee80211com *,
344 		               const char [IFNAMSIZ], int,
345 		               enum ieee80211_opmode, int,
346 		               const uint8_t [IEEE80211_ADDR_LEN],
347 		               const uint8_t [IEEE80211_ADDR_LEN]);
348 static void	iwm_vap_delete(struct ieee80211vap *);
349 static void	iwm_scan_start(struct ieee80211com *);
350 static void	iwm_scan_end(struct ieee80211com *);
351 static void	iwm_update_mcast(struct ieee80211com *);
352 static void	iwm_set_channel(struct ieee80211com *);
353 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
354 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
355 static int	iwm_detach(device_t);
356 
357 /*
358  * Firmware parser.
359  */
360 
361 static int
362 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
363 {
364 	const struct iwm_fw_cscheme_list *l = (const void *)data;
365 
366 	if (dlen < sizeof(*l) ||
367 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
368 		return EINVAL;
369 
370 	/* we don't actually store anything for now, always use s/w crypto */
371 
372 	return 0;
373 }
374 
375 static int
376 iwm_firmware_store_section(struct iwm_softc *sc,
377     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
378 {
379 	struct iwm_fw_sects *fws;
380 	struct iwm_fw_onesect *fwone;
381 
382 	if (type >= IWM_UCODE_TYPE_MAX)
383 		return EINVAL;
384 	if (dlen < sizeof(uint32_t))
385 		return EINVAL;
386 
387 	fws = &sc->sc_fw.fw_sects[type];
388 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
389 		return EINVAL;
390 
391 	fwone = &fws->fw_sect[fws->fw_count];
392 
393 	/* first 32bit are device load offset */
394 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
395 
396 	/* rest is data */
397 	fwone->fws_data = data + sizeof(uint32_t);
398 	fwone->fws_len = dlen - sizeof(uint32_t);
399 
400 	fws->fw_count++;
401 	fws->fw_totlen += fwone->fws_len;
402 
403 	return 0;
404 }
405 
406 /* iwlwifi: iwl-drv.c */
407 struct iwm_tlv_calib_data {
408 	uint32_t ucode_type;
409 	struct iwm_tlv_calib_ctrl calib;
410 } __packed;
411 
412 static int
413 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
414 {
415 	const struct iwm_tlv_calib_data *def_calib = data;
416 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
417 
418 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
419 		device_printf(sc->sc_dev,
420 		    "Wrong ucode_type %u for default "
421 		    "calibration.\n", ucode_type);
422 		return EINVAL;
423 	}
424 
425 	sc->sc_default_calib[ucode_type].flow_trigger =
426 	    def_calib->calib.flow_trigger;
427 	sc->sc_default_calib[ucode_type].event_trigger =
428 	    def_calib->calib.event_trigger;
429 
430 	return 0;
431 }
432 
433 static void
434 iwm_fw_info_free(struct iwm_fw_info *fw)
435 {
436 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
437 	fw->fw_fp = NULL;
438 	/* don't touch fw->fw_status */
439 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
440 }
441 
442 static int
443 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
444 {
445 	struct iwm_fw_info *fw = &sc->sc_fw;
446 	const struct iwm_tlv_ucode_header *uhdr;
447 	struct iwm_ucode_tlv tlv;
448 	enum iwm_ucode_tlv_type tlv_type;
449 	const struct firmware *fwp;
450 	const uint8_t *data;
451 	int error = 0;
452 	size_t len;
453 
454 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
455 	    ucode_type != IWM_UCODE_TYPE_INIT)
456 		return 0;
457 
458 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
459 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
460 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
461 
462 	if (fw->fw_fp != NULL)
463 		iwm_fw_info_free(fw);
464 
465 	/*
466 	 * Load firmware into driver memory.
467 	 * fw_fp will be set.
468 	 */
469 	IWM_UNLOCK(sc);
470 	fwp = firmware_get(sc->sc_fwname);
471 	IWM_LOCK(sc);
472 	if (fwp == NULL) {
473 		device_printf(sc->sc_dev,
474 		    "could not read firmware %s (error %d)\n",
475 		    sc->sc_fwname, error);
476 		goto out;
477 	}
478 	fw->fw_fp = fwp;
479 
480 	/*
481 	 * Parse firmware contents
482 	 */
483 
484 	uhdr = (const void *)fw->fw_fp->data;
485 	if (*(const uint32_t *)fw->fw_fp->data != 0
486 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
487 		device_printf(sc->sc_dev, "invalid firmware %s\n",
488 		    sc->sc_fwname);
489 		error = EINVAL;
490 		goto out;
491 	}
492 
493 	sc->sc_fwver = le32toh(uhdr->ver);
494 	data = uhdr->data;
495 	len = fw->fw_fp->datasize - sizeof(*uhdr);
496 
497 	while (len >= sizeof(tlv)) {
498 		size_t tlv_len;
499 		const void *tlv_data;
500 
501 		memcpy(&tlv, data, sizeof(tlv));
502 		tlv_len = le32toh(tlv.length);
503 		tlv_type = le32toh(tlv.type);
504 
505 		len -= sizeof(tlv);
506 		data += sizeof(tlv);
507 		tlv_data = data;
508 
509 		if (len < tlv_len) {
510 			device_printf(sc->sc_dev,
511 			    "firmware too short: %zu bytes\n",
512 			    len);
513 			error = EINVAL;
514 			goto parse_out;
515 		}
516 
517 		switch ((int)tlv_type) {
518 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
519 			if (tlv_len < sizeof(uint32_t)) {
520 				device_printf(sc->sc_dev,
521 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
522 				    __func__,
523 				    (int) tlv_len);
524 				error = EINVAL;
525 				goto parse_out;
526 			}
527 			sc->sc_capa_max_probe_len
528 			    = le32toh(*(const uint32_t *)tlv_data);
529 			/* limit it to something sensible */
530 			if (sc->sc_capa_max_probe_len > (1<<16)) {
531 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
532 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
533 				    "ridiculous\n", __func__);
534 				error = EINVAL;
535 				goto parse_out;
536 			}
537 			break;
538 		case IWM_UCODE_TLV_PAN:
539 			if (tlv_len) {
540 				device_printf(sc->sc_dev,
541 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
542 				    __func__,
543 				    (int) tlv_len);
544 				error = EINVAL;
545 				goto parse_out;
546 			}
547 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
548 			break;
549 		case IWM_UCODE_TLV_FLAGS:
550 			if (tlv_len < sizeof(uint32_t)) {
551 				device_printf(sc->sc_dev,
552 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
553 				    __func__,
554 				    (int) tlv_len);
555 				error = EINVAL;
556 				goto parse_out;
557 			}
558 			/*
559 			 * Apparently there can be many flags, but Linux driver
560 			 * parses only the first one, and so do we.
561 			 *
562 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
563 			 * Intentional or a bug?  Observations from
564 			 * current firmware file:
565 			 *  1) TLV_PAN is parsed first
566 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
567 			 * ==> this resets TLV_PAN to itself... hnnnk
568 			 */
569 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
570 			break;
571 		case IWM_UCODE_TLV_CSCHEME:
572 			if ((error = iwm_store_cscheme(sc,
573 			    tlv_data, tlv_len)) != 0) {
574 				device_printf(sc->sc_dev,
575 				    "%s: iwm_store_cscheme(): returned %d\n",
576 				    __func__,
577 				    error);
578 				goto parse_out;
579 			}
580 			break;
581 		case IWM_UCODE_TLV_NUM_OF_CPU:
582 			if (tlv_len != sizeof(uint32_t)) {
583 				device_printf(sc->sc_dev,
584 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
585 				    __func__,
586 				    (int) tlv_len);
587 				error = EINVAL;
588 				goto parse_out;
589 			}
590 			if (le32toh(*(const uint32_t*)tlv_data) != 1) {
591 				device_printf(sc->sc_dev,
592 				    "%s: driver supports "
593 				    "only TLV_NUM_OF_CPU == 1",
594 				    __func__);
595 				error = EINVAL;
596 				goto parse_out;
597 			}
598 			break;
599 		case IWM_UCODE_TLV_SEC_RT:
600 			if ((error = iwm_firmware_store_section(sc,
601 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
602 				device_printf(sc->sc_dev,
603 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
604 				    __func__,
605 				    error);
606 				goto parse_out;
607 			}
608 			break;
609 		case IWM_UCODE_TLV_SEC_INIT:
610 			if ((error = iwm_firmware_store_section(sc,
611 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
612 				device_printf(sc->sc_dev,
613 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
614 				    __func__,
615 				    error);
616 				goto parse_out;
617 			}
618 			break;
619 		case IWM_UCODE_TLV_SEC_WOWLAN:
620 			if ((error = iwm_firmware_store_section(sc,
621 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
622 				device_printf(sc->sc_dev,
623 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
624 				    __func__,
625 				    error);
626 				goto parse_out;
627 			}
628 			break;
629 		case IWM_UCODE_TLV_DEF_CALIB:
630 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
631 				device_printf(sc->sc_dev,
632 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
633 				    __func__,
634 				    (int) tlv_len,
635 				    (int) sizeof(struct iwm_tlv_calib_data));
636 				error = EINVAL;
637 				goto parse_out;
638 			}
639 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
640 				device_printf(sc->sc_dev,
641 				    "%s: iwm_set_default_calib() failed: %d\n",
642 				    __func__,
643 				    error);
644 				goto parse_out;
645 			}
646 			break;
647 		case IWM_UCODE_TLV_PHY_SKU:
648 			if (tlv_len != sizeof(uint32_t)) {
649 				error = EINVAL;
650 				device_printf(sc->sc_dev,
651 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
652 				    __func__,
653 				    (int) tlv_len);
654 				goto parse_out;
655 			}
656 			sc->sc_fw_phy_config =
657 			    le32toh(*(const uint32_t *)tlv_data);
658 			break;
659 
660 		case IWM_UCODE_TLV_API_CHANGES_SET:
661 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
662 			/* ignore, not used by current driver */
663 			break;
664 
665 		default:
666 			device_printf(sc->sc_dev,
667 			    "%s: unknown firmware section %d, abort\n",
668 			    __func__, tlv_type);
669 			error = EINVAL;
670 			goto parse_out;
671 		}
672 
673 		len -= roundup(tlv_len, 4);
674 		data += roundup(tlv_len, 4);
675 	}
676 
677 	KASSERT(error == 0, ("unhandled error"));
678 
679  parse_out:
680 	if (error) {
681 		device_printf(sc->sc_dev, "firmware parse error %d, "
682 		    "section type %d\n", error, tlv_type);
683 	}
684 
685 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
686 		device_printf(sc->sc_dev,
687 		    "device uses unsupported power ops\n");
688 		error = ENOTSUP;
689 	}
690 
691  out:
692 	if (error) {
693 		fw->fw_status = IWM_FW_STATUS_NONE;
694 		if (fw->fw_fp != NULL)
695 			iwm_fw_info_free(fw);
696 	} else
697 		fw->fw_status = IWM_FW_STATUS_DONE;
698 	wakeup(&sc->sc_fw);
699 
700 	return error;
701 }
702 
703 /*
704  * DMA resource routines
705  */
706 
707 static void
708 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
709 {
710         if (error != 0)
711                 return;
712 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
713         *(bus_addr_t *)arg = segs[0].ds_addr;
714 }
715 
716 static int
717 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
718     bus_size_t size, bus_size_t alignment)
719 {
720 	int error;
721 
722 	dma->tag = NULL;
723 	dma->size = size;
724 
725 	error = bus_dma_tag_create(tag, alignment,
726             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
727             1, size, 0, NULL, NULL, &dma->tag);
728         if (error != 0)
729                 goto fail;
730 
731         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
732             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
733         if (error != 0)
734                 goto fail;
735 
736         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
737             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
738         if (error != 0)
739                 goto fail;
740 
741 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
742 
743 	return 0;
744 
745 fail:	iwm_dma_contig_free(dma);
746 	return error;
747 }
748 
749 static void
750 iwm_dma_contig_free(struct iwm_dma_info *dma)
751 {
752 	if (dma->map != NULL) {
753 		if (dma->vaddr != NULL) {
754 			bus_dmamap_sync(dma->tag, dma->map,
755 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
756 			bus_dmamap_unload(dma->tag, dma->map);
757 			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
758 			dma->vaddr = NULL;
759 		}
760 		bus_dmamap_destroy(dma->tag, dma->map);
761 		dma->map = NULL;
762 	}
763 	if (dma->tag != NULL) {
764 		bus_dma_tag_destroy(dma->tag);
765 		dma->tag = NULL;
766 	}
767 
768 }
769 
770 /* fwmem is used to load firmware onto the card */
771 static int
772 iwm_alloc_fwmem(struct iwm_softc *sc)
773 {
774 	/* Must be aligned on a 16-byte boundary. */
775 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
776 	    sc->sc_fwdmasegsz, 16);
777 }
778 
779 static void
780 iwm_free_fwmem(struct iwm_softc *sc)
781 {
782 	iwm_dma_contig_free(&sc->fw_dma);
783 }
784 
785 /* tx scheduler rings.  not used? */
786 static int
787 iwm_alloc_sched(struct iwm_softc *sc)
788 {
789 	int rv;
790 
791 	/* TX scheduler rings must be aligned on a 1KB boundary. */
792 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
793 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
794 	return rv;
795 }
796 
797 static void
798 iwm_free_sched(struct iwm_softc *sc)
799 {
800 	iwm_dma_contig_free(&sc->sched_dma);
801 }
802 
803 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
804 static int
805 iwm_alloc_kw(struct iwm_softc *sc)
806 {
807 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
808 }
809 
810 static void
811 iwm_free_kw(struct iwm_softc *sc)
812 {
813 	iwm_dma_contig_free(&sc->kw_dma);
814 }
815 
816 /* interrupt cause table */
817 static int
818 iwm_alloc_ict(struct iwm_softc *sc)
819 {
820 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
821 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
822 }
823 
824 static void
825 iwm_free_ict(struct iwm_softc *sc)
826 {
827 	iwm_dma_contig_free(&sc->ict_dma);
828 }
829 
830 static int
831 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
832 {
833 	bus_size_t size;
834 	int i, error;
835 
836 	ring->cur = 0;
837 
838 	/* Allocate RX descriptors (256-byte aligned). */
839 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
840 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
841 	if (error != 0) {
842 		device_printf(sc->sc_dev,
843 		    "could not allocate RX ring DMA memory\n");
844 		goto fail;
845 	}
846 	ring->desc = ring->desc_dma.vaddr;
847 
848 	/* Allocate RX status area (16-byte aligned). */
849 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
850 	    sizeof(*ring->stat), 16);
851 	if (error != 0) {
852 		device_printf(sc->sc_dev,
853 		    "could not allocate RX status DMA memory\n");
854 		goto fail;
855 	}
856 	ring->stat = ring->stat_dma.vaddr;
857 
858         /* Create RX buffer DMA tag. */
859         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
860             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
861             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
862         if (error != 0) {
863                 device_printf(sc->sc_dev,
864                     "%s: could not create RX buf DMA tag, error %d\n",
865                     __func__, error);
866                 goto fail;
867         }
868 
869 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
870 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
871 	if (error != 0) {
872 		device_printf(sc->sc_dev,
873 		    "%s: could not create RX buf DMA map, error %d\n",
874 		    __func__, error);
875 		goto fail;
876 	}
877 	/*
878 	 * Allocate and map RX buffers.
879 	 */
880 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
881 		struct iwm_rx_data *data = &ring->data[i];
882 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
883 		if (error != 0) {
884 			device_printf(sc->sc_dev,
885 			    "%s: could not create RX buf DMA map, error %d\n",
886 			    __func__, error);
887 			goto fail;
888 		}
889 		data->m = NULL;
890 
891 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
892 			goto fail;
893 		}
894 	}
895 	return 0;
896 
897 fail:	iwm_free_rx_ring(sc, ring);
898 	return error;
899 }
900 
901 static void
902 iwm_disable_rx_dma(struct iwm_softc *sc)
903 {
904 
905 	/* XXX print out if we can't lock the NIC? */
906 	if (iwm_nic_lock(sc)) {
907 		/* XXX handle if RX stop doesn't finish? */
908 		(void) iwm_pcie_rx_stop(sc);
909 		iwm_nic_unlock(sc);
910 	}
911 }
912 
913 static void
914 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
915 {
916 	/* Reset the ring state */
917 	ring->cur = 0;
918 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
919 }
920 
921 static void
922 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
923 {
924 	int i;
925 
926 	iwm_dma_contig_free(&ring->desc_dma);
927 	iwm_dma_contig_free(&ring->stat_dma);
928 
929 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
930 		struct iwm_rx_data *data = &ring->data[i];
931 
932 		if (data->m != NULL) {
933 			bus_dmamap_sync(ring->data_dmat, data->map,
934 			    BUS_DMASYNC_POSTREAD);
935 			bus_dmamap_unload(ring->data_dmat, data->map);
936 			m_freem(data->m);
937 			data->m = NULL;
938 		}
939 		if (data->map != NULL) {
940 			bus_dmamap_destroy(ring->data_dmat, data->map);
941 			data->map = NULL;
942 		}
943 	}
944 	if (ring->spare_map != NULL) {
945 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
946 		ring->spare_map = NULL;
947 	}
948 	if (ring->data_dmat != NULL) {
949 		bus_dma_tag_destroy(ring->data_dmat);
950 		ring->data_dmat = NULL;
951 	}
952 }
953 
954 static int
955 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
956 {
957 	bus_addr_t paddr;
958 	bus_size_t size;
959 	int i, error;
960 
961 	ring->qid = qid;
962 	ring->queued = 0;
963 	ring->cur = 0;
964 
965 	/* Allocate TX descriptors (256-byte aligned). */
966 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
967 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
968 	if (error != 0) {
969 		device_printf(sc->sc_dev,
970 		    "could not allocate TX ring DMA memory\n");
971 		goto fail;
972 	}
973 	ring->desc = ring->desc_dma.vaddr;
974 
975 	/*
976 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
977 	 * to allocate commands space for other rings.
978 	 */
979 	if (qid > IWM_MVM_CMD_QUEUE)
980 		return 0;
981 
982 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
983 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
984 	if (error != 0) {
985 		device_printf(sc->sc_dev,
986 		    "could not allocate TX cmd DMA memory\n");
987 		goto fail;
988 	}
989 	ring->cmd = ring->cmd_dma.vaddr;
990 
991 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
992 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
993             IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
994 	if (error != 0) {
995 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
996 		goto fail;
997 	}
998 
999 	paddr = ring->cmd_dma.paddr;
1000 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1001 		struct iwm_tx_data *data = &ring->data[i];
1002 
1003 		data->cmd_paddr = paddr;
1004 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1005 		    + offsetof(struct iwm_tx_cmd, scratch);
1006 		paddr += sizeof(struct iwm_device_cmd);
1007 
1008 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1009 		if (error != 0) {
1010 			device_printf(sc->sc_dev,
1011 			    "could not create TX buf DMA map\n");
1012 			goto fail;
1013 		}
1014 	}
1015 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1016 	    ("invalid physical address"));
1017 	return 0;
1018 
1019 fail:	iwm_free_tx_ring(sc, ring);
1020 	return error;
1021 }
1022 
1023 static void
1024 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1025 {
1026 	int i;
1027 
1028 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1029 		struct iwm_tx_data *data = &ring->data[i];
1030 
1031 		if (data->m != NULL) {
1032 			bus_dmamap_sync(ring->data_dmat, data->map,
1033 			    BUS_DMASYNC_POSTWRITE);
1034 			bus_dmamap_unload(ring->data_dmat, data->map);
1035 			m_freem(data->m);
1036 			data->m = NULL;
1037 		}
1038 	}
1039 	/* Clear TX descriptors. */
1040 	memset(ring->desc, 0, ring->desc_dma.size);
1041 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1042 	    BUS_DMASYNC_PREWRITE);
1043 	sc->qfullmsk &= ~(1 << ring->qid);
1044 	ring->queued = 0;
1045 	ring->cur = 0;
1046 }
1047 
1048 static void
1049 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1050 {
1051 	int i;
1052 
1053 	iwm_dma_contig_free(&ring->desc_dma);
1054 	iwm_dma_contig_free(&ring->cmd_dma);
1055 
1056 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1057 		struct iwm_tx_data *data = &ring->data[i];
1058 
1059 		if (data->m != NULL) {
1060 			bus_dmamap_sync(ring->data_dmat, data->map,
1061 			    BUS_DMASYNC_POSTWRITE);
1062 			bus_dmamap_unload(ring->data_dmat, data->map);
1063 			m_freem(data->m);
1064 			data->m = NULL;
1065 		}
1066 		if (data->map != NULL) {
1067 			bus_dmamap_destroy(ring->data_dmat, data->map);
1068 			data->map = NULL;
1069 		}
1070 	}
1071 	if (ring->data_dmat != NULL) {
1072 		bus_dma_tag_destroy(ring->data_dmat);
1073 		ring->data_dmat = NULL;
1074 	}
1075 }
1076 
1077 /*
1078  * High-level hardware frobbing routines
1079  */
1080 
1081 static void
1082 iwm_enable_interrupts(struct iwm_softc *sc)
1083 {
1084 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1085 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1086 }
1087 
1088 static void
1089 iwm_restore_interrupts(struct iwm_softc *sc)
1090 {
1091 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1092 }
1093 
1094 static void
1095 iwm_disable_interrupts(struct iwm_softc *sc)
1096 {
1097 	/* disable interrupts */
1098 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1099 
1100 	/* acknowledge all interrupts */
1101 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1102 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1103 }
1104 
1105 static void
1106 iwm_ict_reset(struct iwm_softc *sc)
1107 {
1108 	iwm_disable_interrupts(sc);
1109 
1110 	/* Reset ICT table. */
1111 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1112 	sc->ict_cur = 0;
1113 
1114 	/* Set physical address of ICT table (4KB aligned). */
1115 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1116 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1117 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1118 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1119 
1120 	/* Switch to ICT interrupt mode in driver. */
1121 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1122 
1123 	/* Re-enable interrupts. */
1124 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1125 	iwm_enable_interrupts(sc);
1126 }
1127 
1128 /* iwlwifi pcie/trans.c */
1129 
1130 /*
1131  * Since this .. hard-resets things, it's time to actually
1132  * mark the first vap (if any) as having no mac context.
1133  * It's annoying, but since the driver is potentially being
1134  * stop/start'ed whilst active (thanks openbsd port!) we
1135  * have to correctly track this.
1136  */
1137 static void
1138 iwm_stop_device(struct iwm_softc *sc)
1139 {
1140 	struct ieee80211com *ic = &sc->sc_ic;
1141 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1142 	int chnl, ntries;
1143 	int qid;
1144 
1145 	/* tell the device to stop sending interrupts */
1146 	iwm_disable_interrupts(sc);
1147 
1148 	/*
1149 	 * FreeBSD-local: mark the first vap as not-uploaded,
1150 	 * so the next transition through auth/assoc
1151 	 * will correctly populate the MAC context.
1152 	 */
1153 	if (vap) {
1154 		struct iwm_vap *iv = IWM_VAP(vap);
1155 		iv->is_uploaded = 0;
1156 	}
1157 
1158 	/* device going down, Stop using ICT table */
1159 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1160 
1161 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1162 
1163 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1164 
1165 	/* Stop all DMA channels. */
1166 	if (iwm_nic_lock(sc)) {
1167 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1168 			IWM_WRITE(sc,
1169 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1170 			for (ntries = 0; ntries < 200; ntries++) {
1171 				uint32_t r;
1172 
1173 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1174 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1175 				    chnl))
1176 					break;
1177 				DELAY(20);
1178 			}
1179 		}
1180 		iwm_nic_unlock(sc);
1181 	}
1182 	iwm_disable_rx_dma(sc);
1183 
1184 	/* Stop RX ring. */
1185 	iwm_reset_rx_ring(sc, &sc->rxq);
1186 
1187 	/* Reset all TX rings. */
1188 	for (qid = 0; qid < nitems(sc->txq); qid++)
1189 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1190 
1191 	/*
1192 	 * Power-down device's busmaster DMA clocks
1193 	 */
1194 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1195 	DELAY(5);
1196 
1197 	/* Make sure (redundant) we've released our request to stay awake */
1198 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1199 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1200 
1201 	/* Stop the device, and put it in low power state */
1202 	iwm_apm_stop(sc);
1203 
1204 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1205 	 * Clean again the interrupt here
1206 	 */
1207 	iwm_disable_interrupts(sc);
1208 	/* stop and reset the on-board processor */
1209 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1210 
1211 	/*
1212 	 * Even if we stop the HW, we still want the RF kill
1213 	 * interrupt
1214 	 */
1215 	iwm_enable_rfkill_int(sc);
1216 	iwm_check_rfkill(sc);
1217 }
1218 
1219 /* iwlwifi: mvm/ops.c */
1220 static void
1221 iwm_mvm_nic_config(struct iwm_softc *sc)
1222 {
1223 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1224 	uint32_t reg_val = 0;
1225 
1226 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1227 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1228 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1229 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1230 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1231 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1232 
1233 	/* SKU control */
1234 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1235 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1236 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1237 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1238 
1239 	/* radio configuration */
1240 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1241 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1242 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1243 
1244 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1245 
1246 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1247 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1248 	    radio_cfg_step, radio_cfg_dash);
1249 
1250 	/*
1251 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1252 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1253 	 * to lose ownership and not being able to obtain it back.
1254 	 */
1255 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1256 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1257 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1258 }
1259 
1260 static int
1261 iwm_nic_rx_init(struct iwm_softc *sc)
1262 {
1263 	if (!iwm_nic_lock(sc))
1264 		return EBUSY;
1265 
1266 	/*
1267 	 * Initialize RX ring.  This is from the iwn driver.
1268 	 */
1269 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1270 
1271 	/* stop DMA */
1272 	iwm_disable_rx_dma(sc);
1273 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1274 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1275 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1276 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1277 
1278 	/* Set physical address of RX ring (256-byte aligned). */
1279 	IWM_WRITE(sc,
1280 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1281 
1282 	/* Set physical address of RX status (16-byte aligned). */
1283 	IWM_WRITE(sc,
1284 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1285 
1286 	/* Enable RX. */
1287 	/*
1288 	 * Note: Linux driver also sets this:
1289 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1290 	 *
1291 	 * It causes weird behavior.  YMMV.
1292 	 */
1293 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1294 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1295 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1296 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1297 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1298 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1299 
1300 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1301 
1302 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1303 	if (sc->host_interrupt_operation_mode)
1304 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1305 
1306 	/*
1307 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1308 	 *
1309 	 * This value should initially be 0 (before preparing any
1310  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1311 	 */
1312 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1313 
1314 	iwm_nic_unlock(sc);
1315 
1316 	return 0;
1317 }
1318 
1319 static int
1320 iwm_nic_tx_init(struct iwm_softc *sc)
1321 {
1322 	int qid;
1323 
1324 	if (!iwm_nic_lock(sc))
1325 		return EBUSY;
1326 
1327 	/* Deactivate TX scheduler. */
1328 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1329 
1330 	/* Set physical address of "keep warm" page (16-byte aligned). */
1331 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1332 
1333 	/* Initialize TX rings. */
1334 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1335 		struct iwm_tx_ring *txq = &sc->txq[qid];
1336 
1337 		/* Set physical address of TX ring (256-byte aligned). */
1338 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1339 		    txq->desc_dma.paddr >> 8);
1340 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1341 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1342 		    __func__,
1343 		    qid, txq->desc,
1344 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1345 	}
1346 	iwm_nic_unlock(sc);
1347 
1348 	return 0;
1349 }
1350 
1351 static int
1352 iwm_nic_init(struct iwm_softc *sc)
1353 {
1354 	int error;
1355 
1356 	iwm_apm_init(sc);
1357 	iwm_set_pwr(sc);
1358 
1359 	iwm_mvm_nic_config(sc);
1360 
1361 	if ((error = iwm_nic_rx_init(sc)) != 0)
1362 		return error;
1363 
1364 	/*
1365 	 * Ditto for TX, from iwn
1366 	 */
1367 	if ((error = iwm_nic_tx_init(sc)) != 0)
1368 		return error;
1369 
1370 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1371 	    "%s: shadow registers enabled\n", __func__);
1372 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1373 
1374 	return 0;
1375 }
1376 
1377 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1378 	IWM_MVM_TX_FIFO_VO,
1379 	IWM_MVM_TX_FIFO_VI,
1380 	IWM_MVM_TX_FIFO_BE,
1381 	IWM_MVM_TX_FIFO_BK,
1382 };
1383 
1384 static void
1385 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1386 {
1387 	if (!iwm_nic_lock(sc)) {
1388 		device_printf(sc->sc_dev,
1389 		    "%s: cannot enable txq %d\n",
1390 		    __func__,
1391 		    qid);
1392 		return; /* XXX return EBUSY */
1393 	}
1394 
1395 	/* unactivate before configuration */
1396 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1397 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1398 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1399 
1400 	if (qid != IWM_MVM_CMD_QUEUE) {
1401 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1402 	}
1403 
1404 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1405 
1406 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1407 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1408 
1409 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1410 	/* Set scheduler window size and frame limit. */
1411 	iwm_write_mem32(sc,
1412 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1413 	    sizeof(uint32_t),
1414 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1415 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1416 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1417 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1418 
1419 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1420 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1421 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1422 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1423 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1424 
1425 	iwm_nic_unlock(sc);
1426 
1427 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1428 	    "%s: enabled txq %d FIFO %d\n",
1429 	    __func__, qid, fifo);
1430 }
1431 
1432 static int
1433 iwm_post_alive(struct iwm_softc *sc)
1434 {
1435 	int nwords;
1436 	int error, chnl;
1437 
1438 	if (!iwm_nic_lock(sc))
1439 		return EBUSY;
1440 
1441 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1442 		device_printf(sc->sc_dev,
1443 		    "%s: sched addr mismatch",
1444 		    __func__);
1445 		error = EINVAL;
1446 		goto out;
1447 	}
1448 
1449 	iwm_ict_reset(sc);
1450 
1451 	/* Clear TX scheduler state in SRAM. */
1452 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1453 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1454 	    / sizeof(uint32_t);
1455 	error = iwm_write_mem(sc,
1456 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1457 	    NULL, nwords);
1458 	if (error)
1459 		goto out;
1460 
1461 	/* Set physical address of TX scheduler rings (1KB aligned). */
1462 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1463 
1464 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1465 
1466 	/* enable command channel */
1467 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1468 
1469 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1470 
1471 	/* Enable DMA channels. */
1472 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1473 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1474 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1475 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1476 	}
1477 
1478 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1479 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1480 
1481 	/* Enable L1-Active */
1482 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1483 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1484 
1485  out:
1486  	iwm_nic_unlock(sc);
1487 	return error;
1488 }
1489 
1490 /*
1491  * NVM read access and content parsing.  We do not support
1492  * external NVM or writing NVM.
1493  * iwlwifi/mvm/nvm.c
1494  */
1495 
1496 /* list of NVM sections we are allowed/need to read */
1497 const int nvm_to_read[] = {
1498 	IWM_NVM_SECTION_TYPE_HW,
1499 	IWM_NVM_SECTION_TYPE_SW,
1500 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1501 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1502 };
1503 
1504 /* Default NVM size to read */
1505 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1506 #define IWM_MAX_NVM_SECTION_SIZE 7000
1507 
1508 #define IWM_NVM_WRITE_OPCODE 1
1509 #define IWM_NVM_READ_OPCODE 0
1510 
1511 static int
1512 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1513 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1514 {
1515 	offset = 0;
1516 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1517 		.offset = htole16(offset),
1518 		.length = htole16(length),
1519 		.type = htole16(section),
1520 		.op_code = IWM_NVM_READ_OPCODE,
1521 	};
1522 	struct iwm_nvm_access_resp *nvm_resp;
1523 	struct iwm_rx_packet *pkt;
1524 	struct iwm_host_cmd cmd = {
1525 		.id = IWM_NVM_ACCESS_CMD,
1526 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1527 		    IWM_CMD_SEND_IN_RFKILL,
1528 		.data = { &nvm_access_cmd, },
1529 	};
1530 	int ret, bytes_read, offset_read;
1531 	uint8_t *resp_data;
1532 
1533 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1534 
1535 	ret = iwm_send_cmd(sc, &cmd);
1536 	if (ret)
1537 		return ret;
1538 
1539 	pkt = cmd.resp_pkt;
1540 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1541 		device_printf(sc->sc_dev,
1542 		    "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1543 		    __func__, pkt->hdr.flags);
1544 		ret = EIO;
1545 		goto exit;
1546 	}
1547 
1548 	/* Extract NVM response */
1549 	nvm_resp = (void *)pkt->data;
1550 
1551 	ret = le16toh(nvm_resp->status);
1552 	bytes_read = le16toh(nvm_resp->length);
1553 	offset_read = le16toh(nvm_resp->offset);
1554 	resp_data = nvm_resp->data;
1555 	if (ret) {
1556 		device_printf(sc->sc_dev,
1557 		    "%s: NVM access command failed with status %d\n",
1558 		    __func__, ret);
1559 		ret = EINVAL;
1560 		goto exit;
1561 	}
1562 
1563 	if (offset_read != offset) {
1564 		device_printf(sc->sc_dev,
1565 		    "%s: NVM ACCESS response with invalid offset %d\n",
1566 		    __func__, offset_read);
1567 		ret = EINVAL;
1568 		goto exit;
1569 	}
1570 
1571 	memcpy(data + offset, resp_data, bytes_read);
1572 	*len = bytes_read;
1573 
1574  exit:
1575 	iwm_free_resp(sc, &cmd);
1576 	return ret;
1577 }
1578 
1579 /*
1580  * Reads an NVM section completely.
1581  * NICs prior to 7000 family doesn't have a real NVM, but just read
1582  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1583  * by uCode, we need to manually check in this case that we don't
1584  * overflow and try to read more than the EEPROM size.
1585  * For 7000 family NICs, we supply the maximal size we can read, and
1586  * the uCode fills the response with as much data as we can,
1587  * without overflowing, so no check is needed.
1588  */
1589 static int
1590 iwm_nvm_read_section(struct iwm_softc *sc,
1591 	uint16_t section, uint8_t *data, uint16_t *len)
1592 {
1593 	uint16_t length, seglen;
1594 	int error;
1595 
1596 	/* Set nvm section read length */
1597 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1598 	*len = 0;
1599 
1600 	/* Read the NVM until exhausted (reading less than requested) */
1601 	while (seglen == length) {
1602 		error = iwm_nvm_read_chunk(sc,
1603 		    section, *len, length, data, &seglen);
1604 		if (error) {
1605 			device_printf(sc->sc_dev,
1606 			    "Cannot read NVM from section "
1607 			    "%d offset %d, length %d\n",
1608 			    section, *len, length);
1609 			return error;
1610 		}
1611 		*len += seglen;
1612 	}
1613 
1614 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1615 	    "NVM section %d read completed\n", section);
1616 	return 0;
1617 }
1618 
1619 /*
1620  * BEGIN IWM_NVM_PARSE
1621  */
1622 
1623 /* iwlwifi/iwl-nvm-parse.c */
1624 
1625 /* NVM offsets (in words) definitions */
1626 enum wkp_nvm_offsets {
1627 	/* NVM HW-Section offset (in words) definitions */
1628 	IWM_HW_ADDR = 0x15,
1629 
1630 /* NVM SW-Section offset (in words) definitions */
1631 	IWM_NVM_SW_SECTION = 0x1C0,
1632 	IWM_NVM_VERSION = 0,
1633 	IWM_RADIO_CFG = 1,
1634 	IWM_SKU = 2,
1635 	IWM_N_HW_ADDRS = 3,
1636 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1637 
1638 /* NVM calibration section offset (in words) definitions */
1639 	IWM_NVM_CALIB_SECTION = 0x2B8,
1640 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1641 };
1642 
1643 /* SKU Capabilities (actual values from NVM definition) */
1644 enum nvm_sku_bits {
1645 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1646 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1647 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1648 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1649 };
1650 
1651 /* radio config bits (actual values from NVM definition) */
1652 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1653 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1654 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1655 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1656 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1657 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1658 
1659 #define DEFAULT_MAX_TX_POWER 16
1660 
1661 /**
1662  * enum iwm_nvm_channel_flags - channel flags in NVM
1663  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1664  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1665  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1666  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1667  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1668  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1669  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1670  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1671  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1672  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1673  */
1674 enum iwm_nvm_channel_flags {
1675 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1676 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1677 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1678 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1679 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1680 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1681 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1682 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1683 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1684 };
1685 
1686 /*
1687  * Translate EEPROM flags to net80211.
1688  */
1689 static uint32_t
1690 iwm_eeprom_channel_flags(uint16_t ch_flags)
1691 {
1692 	uint32_t nflags;
1693 
1694 	nflags = 0;
1695 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1696 		nflags |= IEEE80211_CHAN_PASSIVE;
1697 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1698 		nflags |= IEEE80211_CHAN_NOADHOC;
1699 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1700 		nflags |= IEEE80211_CHAN_DFS;
1701 		/* Just in case. */
1702 		nflags |= IEEE80211_CHAN_NOADHOC;
1703 	}
1704 
1705 	return (nflags);
1706 }
1707 
1708 static void
1709 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1710     int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1711 {
1712 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1713 	uint32_t nflags;
1714 	uint16_t ch_flags;
1715 	uint8_t ieee;
1716 	int error;
1717 
1718 	for (; ch_idx < ch_num; ch_idx++) {
1719 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1720 		ieee = iwm_nvm_channels[ch_idx];
1721 
1722 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1723 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1724 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1725 			    ieee, ch_flags,
1726 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1727 			    "5.2" : "2.4");
1728 			continue;
1729 		}
1730 
1731 		nflags = iwm_eeprom_channel_flags(ch_flags);
1732 		error = ieee80211_add_channel(chans, maxchans, nchans,
1733 		    ieee, 0, 0, nflags, bands);
1734 		if (error != 0)
1735 			break;
1736 
1737 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1738 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1739 		    ieee, ch_flags,
1740 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1741 		    "5.2" : "2.4");
1742 	}
1743 }
1744 
1745 static void
1746 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1747     struct ieee80211_channel chans[])
1748 {
1749 	struct iwm_softc *sc = ic->ic_softc;
1750 	struct iwm_nvm_data *data = &sc->sc_nvm;
1751 	uint8_t bands[IEEE80211_MODE_BYTES];
1752 
1753 	memset(bands, 0, sizeof(bands));
1754 	/* 1-13: 11b/g channels. */
1755 	setbit(bands, IEEE80211_MODE_11B);
1756 	setbit(bands, IEEE80211_MODE_11G);
1757 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1758 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1759 
1760 	/* 14: 11b channel only. */
1761 	clrbit(bands, IEEE80211_MODE_11G);
1762 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1763 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1764 
1765 	if (data->sku_cap_band_52GHz_enable) {
1766 		memset(bands, 0, sizeof(bands));
1767 		setbit(bands, IEEE80211_MODE_11A);
1768 		iwm_add_channel_band(sc, chans, maxchans, nchans,
1769 		    IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1770 	}
1771 }
1772 
1773 static int
1774 iwm_parse_nvm_data(struct iwm_softc *sc,
1775 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1776 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1777 {
1778 	struct iwm_nvm_data *data = &sc->sc_nvm;
1779 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
1780 	uint16_t radio_cfg, sku;
1781 
1782 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1783 
1784 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1785 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1786 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1787 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1788 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1789 
1790 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
1791 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1792 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1793 	data->sku_cap_11n_enable = 0;
1794 
1795 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1796 
1797 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1798 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1799 
1800 	/* The byte order is little endian 16 bit, meaning 214365 */
1801 	IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1802 	data->hw_addr[0] = hw_addr[1];
1803 	data->hw_addr[1] = hw_addr[0];
1804 	data->hw_addr[2] = hw_addr[3];
1805 	data->hw_addr[3] = hw_addr[2];
1806 	data->hw_addr[4] = hw_addr[5];
1807 	data->hw_addr[5] = hw_addr[4];
1808 
1809 	memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1810 	    sizeof(data->nvm_ch_flags));
1811 	data->calib_version = 255;   /* TODO:
1812 					this value will prevent some checks from
1813 					failing, we need to check if this
1814 					field is still needed, and if it does,
1815 					where is it in the NVM */
1816 
1817 	return 0;
1818 }
1819 
1820 /*
1821  * END NVM PARSE
1822  */
1823 
1824 struct iwm_nvm_section {
1825 	uint16_t length;
1826 	uint8_t *data;
1827 };
1828 
1829 static int
1830 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1831 {
1832 	const uint16_t *hw, *sw, *calib;
1833 
1834 	/* Checking for required sections */
1835 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1836 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1837 		device_printf(sc->sc_dev,
1838 		    "%s: Can't parse empty NVM sections\n",
1839 		    __func__);
1840 		return ENOENT;
1841 	}
1842 
1843 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1844 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1845 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1846 	return iwm_parse_nvm_data(sc, hw, sw, calib,
1847 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1848 }
1849 
1850 static int
1851 iwm_nvm_init(struct iwm_softc *sc)
1852 {
1853 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1854 	int i, section, error;
1855 	uint16_t len;
1856 	uint8_t *nvm_buffer, *temp;
1857 
1858 	/* Read From FW NVM */
1859 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1860 	    "%s: Read NVM\n",
1861 	    __func__);
1862 
1863 	memset(nvm_sections, 0, sizeof(nvm_sections));
1864 
1865 	/* TODO: find correct NVM max size for a section */
1866 	nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
1867 	if (nvm_buffer == NULL)
1868 		return (ENOMEM);
1869 	for (i = 0; i < nitems(nvm_to_read); i++) {
1870 		section = nvm_to_read[i];
1871 		KASSERT(section <= nitems(nvm_sections),
1872 		    ("too many sections"));
1873 
1874 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1875 		if (error)
1876 			break;
1877 
1878 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
1879 		if (temp == NULL) {
1880 			error = ENOMEM;
1881 			break;
1882 		}
1883 		memcpy(temp, nvm_buffer, len);
1884 		nvm_sections[section].data = temp;
1885 		nvm_sections[section].length = len;
1886 	}
1887 	free(nvm_buffer, M_DEVBUF);
1888 	if (error == 0)
1889 		error = iwm_parse_nvm_sections(sc, nvm_sections);
1890 
1891 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
1892 		if (nvm_sections[i].data != NULL)
1893 			free(nvm_sections[i].data, M_DEVBUF);
1894 	}
1895 
1896 	return error;
1897 }
1898 
1899 /*
1900  * Firmware loading gunk.  This is kind of a weird hybrid between the
1901  * iwn driver and the Linux iwlwifi driver.
1902  */
1903 
1904 static int
1905 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1906 	const uint8_t *section, uint32_t byte_cnt)
1907 {
1908 	struct iwm_dma_info *dma = &sc->fw_dma;
1909 	int error;
1910 
1911 	/* Copy firmware section into pre-allocated DMA-safe memory. */
1912 	memcpy(dma->vaddr, section, byte_cnt);
1913 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1914 
1915 	if (!iwm_nic_lock(sc))
1916 		return EBUSY;
1917 
1918 	sc->sc_fw_chunk_done = 0;
1919 
1920 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1921 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1922 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1923 	    dst_addr);
1924 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1925 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1926 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1927 	    (iwm_get_dma_hi_addr(dma->paddr)
1928 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1929 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1930 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1931 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
1932 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
1933 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1934 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
1935 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
1936 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
1937 
1938 	iwm_nic_unlock(sc);
1939 
1940 	/* wait 1s for this segment to load */
1941 	while (!sc->sc_fw_chunk_done)
1942 		if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
1943 			break;
1944 
1945 	return error;
1946 }
1947 
1948 static int
1949 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1950 {
1951 	struct iwm_fw_sects *fws;
1952 	int error, i, w;
1953 	const void *data;
1954 	uint32_t dlen;
1955 	uint32_t offset;
1956 
1957 	sc->sc_uc.uc_intr = 0;
1958 
1959 	fws = &sc->sc_fw.fw_sects[ucode_type];
1960 	for (i = 0; i < fws->fw_count; i++) {
1961 		data = fws->fw_sect[i].fws_data;
1962 		dlen = fws->fw_sect[i].fws_len;
1963 		offset = fws->fw_sect[i].fws_devoff;
1964 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
1965 		    "LOAD FIRMWARE type %d offset %u len %d\n",
1966 		    ucode_type, offset, dlen);
1967 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
1968 		if (error) {
1969 			device_printf(sc->sc_dev,
1970 			    "%s: chunk %u of %u returned error %02d\n",
1971 			    __func__, i, fws->fw_count, error);
1972 			return error;
1973 		}
1974 	}
1975 
1976 	/* wait for the firmware to load */
1977 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
1978 
1979 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
1980 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
1981 	}
1982 
1983 	return error;
1984 }
1985 
1986 /* iwlwifi: pcie/trans.c */
1987 static int
1988 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1989 {
1990 	int error;
1991 
1992 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1993 
1994 	if ((error = iwm_nic_init(sc)) != 0) {
1995 		device_printf(sc->sc_dev, "unable to init nic\n");
1996 		return error;
1997 	}
1998 
1999 	/* make sure rfkill handshake bits are cleared */
2000 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2001 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2002 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2003 
2004 	/* clear (again), then enable host interrupts */
2005 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2006 	iwm_enable_interrupts(sc);
2007 
2008 	/* really make sure rfkill handshake bits are cleared */
2009 	/* maybe we should write a few times more?  just to make sure */
2010 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2011 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2012 
2013 	/* Load the given image to the HW */
2014 	return iwm_load_firmware(sc, ucode_type);
2015 }
2016 
2017 static int
2018 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2019 {
2020 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2021 		.valid = htole32(valid_tx_ant),
2022 	};
2023 
2024 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2025 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2026 }
2027 
2028 /* iwlwifi: mvm/fw.c */
2029 static int
2030 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2031 {
2032 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2033 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2034 
2035 	/* Set parameters */
2036 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2037 	phy_cfg_cmd.calib_control.event_trigger =
2038 	    sc->sc_default_calib[ucode_type].event_trigger;
2039 	phy_cfg_cmd.calib_control.flow_trigger =
2040 	    sc->sc_default_calib[ucode_type].flow_trigger;
2041 
2042 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2043 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2044 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2045 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2046 }
2047 
2048 static int
2049 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2050 	enum iwm_ucode_type ucode_type)
2051 {
2052 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2053 	int error;
2054 
2055 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2056 		return error;
2057 
2058 	sc->sc_uc_current = ucode_type;
2059 	error = iwm_start_fw(sc, ucode_type);
2060 	if (error) {
2061 		sc->sc_uc_current = old_type;
2062 		return error;
2063 	}
2064 
2065 	return iwm_post_alive(sc);
2066 }
2067 
2068 /*
2069  * mvm misc bits
2070  */
2071 
2072 /*
2073  * follows iwlwifi/fw.c
2074  */
2075 static int
2076 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2077 {
2078 	int error;
2079 
2080 	/* do not operate with rfkill switch turned on */
2081 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2082 		device_printf(sc->sc_dev,
2083 		    "radio is disabled by hardware switch\n");
2084 		return EPERM;
2085 	}
2086 
2087 	sc->sc_init_complete = 0;
2088 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2089 	    IWM_UCODE_TYPE_INIT)) != 0) {
2090 		device_printf(sc->sc_dev, "failed to load init firmware\n");
2091 		return error;
2092 	}
2093 
2094 	if (justnvm) {
2095 		if ((error = iwm_nvm_init(sc)) != 0) {
2096 			device_printf(sc->sc_dev, "failed to read nvm\n");
2097 			return error;
2098 		}
2099 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2100 
2101 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2102 		    + sc->sc_capa_max_probe_len
2103 		    + IWM_MAX_NUM_SCAN_CHANNELS
2104 		    * sizeof(struct iwm_scan_channel);
2105 		sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
2106 		    M_NOWAIT);
2107 		if (sc->sc_scan_cmd == NULL)
2108 			return (ENOMEM);
2109 
2110 		return 0;
2111 	}
2112 
2113 	/* Send TX valid antennas before triggering calibrations */
2114 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2115 		return error;
2116 
2117 	/*
2118 	* Send phy configurations command to init uCode
2119 	* to start the 16.0 uCode init image internal calibrations.
2120 	*/
2121 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2122 		device_printf(sc->sc_dev,
2123 		    "%s: failed to run internal calibration: %d\n",
2124 		    __func__, error);
2125 		return error;
2126 	}
2127 
2128 	/*
2129 	 * Nothing to do but wait for the init complete notification
2130 	 * from the firmware
2131 	 */
2132 	while (!sc->sc_init_complete)
2133 		if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2134 		    0, "iwminit", 2*hz)) != 0)
2135 			break;
2136 
2137 	return error;
2138 }
2139 
2140 /*
2141  * receive side
2142  */
2143 
2144 /* (re)stock rx ring, called at init-time and at runtime */
2145 static int
2146 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2147 {
2148 	struct iwm_rx_ring *ring = &sc->rxq;
2149 	struct iwm_rx_data *data = &ring->data[idx];
2150 	struct mbuf *m;
2151 	bus_dmamap_t dmamap = NULL;
2152 	int error;
2153 	bus_addr_t paddr;
2154 
2155 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2156 	if (m == NULL)
2157 		return ENOBUFS;
2158 
2159 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2160 	error = bus_dmamap_load(ring->data_dmat, ring->spare_map,
2161 	    mtod(m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2162 	    &paddr, BUS_DMA_NOWAIT);
2163 	if (error != 0 && error != EFBIG) {
2164 		device_printf(sc->sc_dev,
2165 		    "%s: can't map mbuf, error %d\n", __func__, error);
2166 		goto fail;
2167 	}
2168 
2169 	if (data->m != NULL)
2170 		bus_dmamap_unload(ring->data_dmat, data->map);
2171 
2172 	/* Swap ring->spare_map with data->map */
2173 	dmamap = data->map;
2174 	data->map = ring->spare_map;
2175 	ring->spare_map = dmamap;
2176 
2177 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2178 	data->m = m;
2179 
2180 	/* Update RX descriptor. */
2181 	ring->desc[idx] = htole32(paddr >> 8);
2182 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2183 	    BUS_DMASYNC_PREWRITE);
2184 
2185 	return 0;
2186 fail:
2187 	m_free(m);
2188 	return error;
2189 }
2190 
2191 /* iwlwifi: mvm/rx.c */
2192 #define IWM_RSSI_OFFSET 50
2193 static int
2194 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2195 {
2196 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2197 	uint32_t agc_a, agc_b;
2198 	uint32_t val;
2199 
2200 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2201 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2202 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2203 
2204 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2205 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2206 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2207 
2208 	/*
2209 	 * dBm = rssi dB - agc dB - constant.
2210 	 * Higher AGC (higher radio gain) means lower signal.
2211 	 */
2212 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2213 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2214 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2215 
2216 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2217 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2218 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2219 
2220 	return max_rssi_dbm;
2221 }
2222 
2223 /* iwlwifi: mvm/rx.c */
2224 /*
2225  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2226  * values are reported by the fw as positive values - need to negate
2227  * to obtain their dBM.  Account for missing antennas by replacing 0
2228  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2229  */
2230 static int
2231 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2232 {
2233 	int energy_a, energy_b, energy_c, max_energy;
2234 	uint32_t val;
2235 
2236 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2237 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2238 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2239 	energy_a = energy_a ? -energy_a : -256;
2240 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2241 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2242 	energy_b = energy_b ? -energy_b : -256;
2243 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2244 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2245 	energy_c = energy_c ? -energy_c : -256;
2246 	max_energy = MAX(energy_a, energy_b);
2247 	max_energy = MAX(max_energy, energy_c);
2248 
2249 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2250 	    "energy In A %d B %d C %d , and max %d\n",
2251 	    energy_a, energy_b, energy_c, max_energy);
2252 
2253 	return max_energy;
2254 }
2255 
2256 static void
2257 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2258 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2259 {
2260 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2261 
2262 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2263 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2264 
2265 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2266 }
2267 
2268 /*
2269  * Retrieve the average noise (in dBm) among receivers.
2270  */
2271 static int
2272 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2273 {
2274 	int i, total, nbant, noise;
2275 
2276 	total = nbant = noise = 0;
2277 	for (i = 0; i < 3; i++) {
2278 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2279 		if (noise) {
2280 			total += noise;
2281 			nbant++;
2282 		}
2283 	}
2284 
2285 	/* There should be at least one antenna but check anyway. */
2286 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2287 }
2288 
2289 /*
2290  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2291  *
2292  * Handles the actual data of the Rx packet from the fw
2293  */
2294 static void
2295 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2296 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2297 {
2298 	struct ieee80211com *ic = &sc->sc_ic;
2299 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2300 	struct ieee80211_frame *wh;
2301 	struct ieee80211_node *ni;
2302 	struct ieee80211_rx_stats rxs;
2303 	struct mbuf *m;
2304 	struct iwm_rx_phy_info *phy_info;
2305 	struct iwm_rx_mpdu_res_start *rx_res;
2306 	uint32_t len;
2307 	uint32_t rx_pkt_status;
2308 	int rssi;
2309 
2310 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2311 
2312 	phy_info = &sc->sc_last_phy_info;
2313 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2314 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2315 	len = le16toh(rx_res->byte_count);
2316 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2317 
2318 	m = data->m;
2319 	m->m_data = pkt->data + sizeof(*rx_res);
2320 	m->m_pkthdr.len = m->m_len = len;
2321 
2322 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2323 		device_printf(sc->sc_dev,
2324 		    "dsp size out of range [0,20]: %d\n",
2325 		    phy_info->cfg_phy_cnt);
2326 		return;
2327 	}
2328 
2329 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2330 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2331 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2332 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2333 		return; /* drop */
2334 	}
2335 
2336 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2337 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2338 	} else {
2339 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
2340 	}
2341 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
2342 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
2343 
2344 	/* replenish ring for the buffer we're going to feed to the sharks */
2345 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2346 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2347 		    __func__);
2348 		return;
2349 	}
2350 
2351 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2352 
2353 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2354 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
2355 	    __func__,
2356 	    le16toh(phy_info->channel),
2357 	    le16toh(phy_info->phy_flags));
2358 
2359 	/*
2360 	 * Populate an RX state struct with the provided information.
2361 	 */
2362 	bzero(&rxs, sizeof(rxs));
2363 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2364 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2365 	rxs.c_ieee = le16toh(phy_info->channel);
2366 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2367 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2368 	} else {
2369 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2370 	}
2371 	rxs.rssi = rssi - sc->sc_noise;
2372 	rxs.nf = sc->sc_noise;
2373 
2374 	if (ieee80211_radiotap_active_vap(vap)) {
2375 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2376 
2377 		tap->wr_flags = 0;
2378 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2379 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2380 		tap->wr_chan_freq = htole16(rxs.c_freq);
2381 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2382 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2383 		tap->wr_dbm_antsignal = (int8_t)rssi;
2384 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2385 		tap->wr_tsft = phy_info->system_timestamp;
2386 		switch (phy_info->rate) {
2387 		/* CCK rates. */
2388 		case  10: tap->wr_rate =   2; break;
2389 		case  20: tap->wr_rate =   4; break;
2390 		case  55: tap->wr_rate =  11; break;
2391 		case 110: tap->wr_rate =  22; break;
2392 		/* OFDM rates. */
2393 		case 0xd: tap->wr_rate =  12; break;
2394 		case 0xf: tap->wr_rate =  18; break;
2395 		case 0x5: tap->wr_rate =  24; break;
2396 		case 0x7: tap->wr_rate =  36; break;
2397 		case 0x9: tap->wr_rate =  48; break;
2398 		case 0xb: tap->wr_rate =  72; break;
2399 		case 0x1: tap->wr_rate =  96; break;
2400 		case 0x3: tap->wr_rate = 108; break;
2401 		/* Unknown rate: should not happen. */
2402 		default:  tap->wr_rate =   0;
2403 		}
2404 	}
2405 
2406 	IWM_UNLOCK(sc);
2407 	if (ni != NULL) {
2408 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2409 		ieee80211_input_mimo(ni, m, &rxs);
2410 		ieee80211_free_node(ni);
2411 	} else {
2412 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2413 		ieee80211_input_mimo_all(ic, m, &rxs);
2414 	}
2415 	IWM_LOCK(sc);
2416 }
2417 
2418 static int
2419 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2420 	struct iwm_node *in)
2421 {
2422 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2423 	struct ieee80211_node *ni = &in->in_ni;
2424 	struct ieee80211vap *vap = ni->ni_vap;
2425 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2426 	int failack = tx_resp->failure_frame;
2427 
2428 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2429 
2430 	/* Update rate control statistics. */
2431 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2432 	    __func__,
2433 	    (int) le16toh(tx_resp->status.status),
2434 	    (int) le16toh(tx_resp->status.sequence),
2435 	    tx_resp->frame_count,
2436 	    tx_resp->bt_kill_count,
2437 	    tx_resp->failure_rts,
2438 	    tx_resp->failure_frame,
2439 	    le32toh(tx_resp->initial_rate),
2440 	    (int) le16toh(tx_resp->wireless_media_time));
2441 
2442 	if (status != IWM_TX_STATUS_SUCCESS &&
2443 	    status != IWM_TX_STATUS_DIRECT_DONE) {
2444 		ieee80211_ratectl_tx_complete(vap, ni,
2445 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2446 		return (1);
2447 	} else {
2448 		ieee80211_ratectl_tx_complete(vap, ni,
2449 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2450 		return (0);
2451 	}
2452 }
2453 
2454 static void
2455 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2456 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2457 {
2458 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2459 	int idx = cmd_hdr->idx;
2460 	int qid = cmd_hdr->qid;
2461 	struct iwm_tx_ring *ring = &sc->txq[qid];
2462 	struct iwm_tx_data *txd = &ring->data[idx];
2463 	struct iwm_node *in = txd->in;
2464 	struct mbuf *m = txd->m;
2465 	int status;
2466 
2467 	KASSERT(txd->done == 0, ("txd not done"));
2468 	KASSERT(txd->in != NULL, ("txd without node"));
2469 	KASSERT(txd->m != NULL, ("txd without mbuf"));
2470 
2471 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2472 
2473 	sc->sc_tx_timer = 0;
2474 
2475 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2476 
2477 	/* Unmap and free mbuf. */
2478 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2479 	bus_dmamap_unload(ring->data_dmat, txd->map);
2480 
2481 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2482 	    "free txd %p, in %p\n", txd, txd->in);
2483 	txd->done = 1;
2484 	txd->m = NULL;
2485 	txd->in = NULL;
2486 
2487 	ieee80211_tx_complete(&in->in_ni, m, status);
2488 
2489 	if (--ring->queued < IWM_TX_RING_LOMARK) {
2490 		sc->qfullmsk &= ~(1 << ring->qid);
2491 		if (sc->qfullmsk == 0) {
2492 			/*
2493 			 * Well, we're in interrupt context, but then again
2494 			 * I guess net80211 does all sorts of stunts in
2495 			 * interrupt context, so maybe this is no biggie.
2496 			 */
2497 			iwm_start(sc);
2498 		}
2499 	}
2500 }
2501 
2502 /*
2503  * transmit side
2504  */
2505 
2506 /*
2507  * Process a "command done" firmware notification.  This is where we wakeup
2508  * processes waiting for a synchronous command completion.
2509  * from if_iwn
2510  */
2511 static void
2512 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2513 {
2514 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2515 	struct iwm_tx_data *data;
2516 
2517 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2518 		return;	/* Not a command ack. */
2519 	}
2520 
2521 	data = &ring->data[pkt->hdr.idx];
2522 
2523 	/* If the command was mapped in an mbuf, free it. */
2524 	if (data->m != NULL) {
2525 		bus_dmamap_sync(ring->data_dmat, data->map,
2526 		    BUS_DMASYNC_POSTWRITE);
2527 		bus_dmamap_unload(ring->data_dmat, data->map);
2528 		m_freem(data->m);
2529 		data->m = NULL;
2530 	}
2531 	wakeup(&ring->desc[pkt->hdr.idx]);
2532 }
2533 
2534 #if 0
2535 /*
2536  * necessary only for block ack mode
2537  */
2538 void
2539 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2540 	uint16_t len)
2541 {
2542 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2543 	uint16_t w_val;
2544 
2545 	scd_bc_tbl = sc->sched_dma.vaddr;
2546 
2547 	len += 8; /* magic numbers came naturally from paris */
2548 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2549 		len = roundup(len, 4) / 4;
2550 
2551 	w_val = htole16(sta_id << 12 | len);
2552 
2553 	/* Update TX scheduler. */
2554 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2555 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2556 	    BUS_DMASYNC_PREWRITE);
2557 
2558 	/* I really wonder what this is ?!? */
2559 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2560 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2561 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2562 		    BUS_DMASYNC_PREWRITE);
2563 	}
2564 }
2565 #endif
2566 
2567 /*
2568  * Take an 802.11 (non-n) rate, find the relevant rate
2569  * table entry.  return the index into in_ridx[].
2570  *
2571  * The caller then uses that index back into in_ridx
2572  * to figure out the rate index programmed /into/
2573  * the firmware for this given node.
2574  */
2575 static int
2576 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2577     uint8_t rate)
2578 {
2579 	int i;
2580 	uint8_t r;
2581 
2582 	for (i = 0; i < nitems(in->in_ridx); i++) {
2583 		r = iwm_rates[in->in_ridx[i]].rate;
2584 		if (rate == r)
2585 			return (i);
2586 	}
2587 	/* XXX Return the first */
2588 	/* XXX TODO: have it return the /lowest/ */
2589 	return (0);
2590 }
2591 
2592 /*
2593  * Fill in the rate related information for a transmit command.
2594  */
2595 static const struct iwm_rate *
2596 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2597 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2598 {
2599 	struct ieee80211com *ic = &sc->sc_ic;
2600 	struct ieee80211_node *ni = &in->in_ni;
2601 	const struct iwm_rate *rinfo;
2602 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2603 	int ridx, rate_flags;
2604 
2605 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2606 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2607 
2608 	/*
2609 	 * XXX TODO: everything about the rate selection here is terrible!
2610 	 */
2611 
2612 	if (type == IEEE80211_FC0_TYPE_DATA) {
2613 		int i;
2614 		/* for data frames, use RS table */
2615 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2616 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2617 		ridx = in->in_ridx[i];
2618 
2619 		/* This is the index into the programmed table */
2620 		tx->initial_rate_index = i;
2621 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2622 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2623 		    "%s: start with i=%d, txrate %d\n",
2624 		    __func__, i, iwm_rates[ridx].rate);
2625 	} else {
2626 		/*
2627 		 * For non-data, use the lowest supported rate for the given
2628 		 * operational mode.
2629 		 *
2630 		 * Note: there may not be any rate control information available.
2631 		 * This driver currently assumes if we're transmitting data
2632 		 * frames, use the rate control table.  Grr.
2633 		 *
2634 		 * XXX TODO: use the configured rate for the traffic type!
2635 		 * XXX TODO: this should be per-vap, not curmode; as we later
2636 		 * on we'll want to handle off-channel stuff (eg TDLS).
2637 		 */
2638 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
2639 			/*
2640 			 * XXX this assumes the mode is either 11a or not 11a;
2641 			 * definitely won't work for 11n.
2642 			 */
2643 			ridx = IWM_RIDX_OFDM;
2644 		} else {
2645 			ridx = IWM_RIDX_CCK;
2646 		}
2647 	}
2648 
2649 	rinfo = &iwm_rates[ridx];
2650 
2651 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2652 	    __func__, ridx,
2653 	    rinfo->rate,
2654 	    !! (IWM_RIDX_IS_CCK(ridx))
2655 	    );
2656 
2657 	/* XXX TODO: hard-coded TX antenna? */
2658 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2659 	if (IWM_RIDX_IS_CCK(ridx))
2660 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
2661 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2662 
2663 	return rinfo;
2664 }
2665 
2666 #define TB0_SIZE 16
2667 static int
2668 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2669 {
2670 	struct ieee80211com *ic = &sc->sc_ic;
2671 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2672 	struct iwm_node *in = IWM_NODE(ni);
2673 	struct iwm_tx_ring *ring;
2674 	struct iwm_tx_data *data;
2675 	struct iwm_tfd *desc;
2676 	struct iwm_device_cmd *cmd;
2677 	struct iwm_tx_cmd *tx;
2678 	struct ieee80211_frame *wh;
2679 	struct ieee80211_key *k = NULL;
2680 	struct mbuf *m1;
2681 	const struct iwm_rate *rinfo;
2682 	uint32_t flags;
2683 	u_int hdrlen;
2684 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2685 	int nsegs;
2686 	uint8_t tid, type;
2687 	int i, totlen, error, pad;
2688 
2689 	wh = mtod(m, struct ieee80211_frame *);
2690 	hdrlen = ieee80211_anyhdrsize(wh);
2691 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2692 	tid = 0;
2693 	ring = &sc->txq[ac];
2694 	desc = &ring->desc[ring->cur];
2695 	memset(desc, 0, sizeof(*desc));
2696 	data = &ring->data[ring->cur];
2697 
2698 	/* Fill out iwm_tx_cmd to send to the firmware */
2699 	cmd = &ring->cmd[ring->cur];
2700 	cmd->hdr.code = IWM_TX_CMD;
2701 	cmd->hdr.flags = 0;
2702 	cmd->hdr.qid = ring->qid;
2703 	cmd->hdr.idx = ring->cur;
2704 
2705 	tx = (void *)cmd->data;
2706 	memset(tx, 0, sizeof(*tx));
2707 
2708 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2709 
2710 	/* Encrypt the frame if need be. */
2711 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2712 		/* Retrieve key for TX && do software encryption. */
2713 		k = ieee80211_crypto_encap(ni, m);
2714 		if (k == NULL) {
2715 			m_freem(m);
2716 			return (ENOBUFS);
2717 		}
2718 		/* 802.11 header may have moved. */
2719 		wh = mtod(m, struct ieee80211_frame *);
2720 	}
2721 
2722 	if (ieee80211_radiotap_active_vap(vap)) {
2723 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2724 
2725 		tap->wt_flags = 0;
2726 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2727 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2728 		tap->wt_rate = rinfo->rate;
2729 		if (k != NULL)
2730 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2731 		ieee80211_radiotap_tx(vap, m);
2732 	}
2733 
2734 
2735 	totlen = m->m_pkthdr.len;
2736 
2737 	flags = 0;
2738 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2739 		flags |= IWM_TX_CMD_FLG_ACK;
2740 	}
2741 
2742 	if (type != IEEE80211_FC0_TYPE_DATA
2743 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2744 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2745 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2746 	}
2747 
2748 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2749 	    type != IEEE80211_FC0_TYPE_DATA)
2750 		tx->sta_id = sc->sc_aux_sta.sta_id;
2751 	else
2752 		tx->sta_id = IWM_STATION_ID;
2753 
2754 	if (type == IEEE80211_FC0_TYPE_MGT) {
2755 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2756 
2757 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2758 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2759 			tx->pm_frame_timeout = htole16(3);
2760 		else
2761 			tx->pm_frame_timeout = htole16(2);
2762 	} else {
2763 		tx->pm_frame_timeout = htole16(0);
2764 	}
2765 
2766 	if (hdrlen & 3) {
2767 		/* First segment length must be a multiple of 4. */
2768 		flags |= IWM_TX_CMD_FLG_MH_PAD;
2769 		pad = 4 - (hdrlen & 3);
2770 	} else
2771 		pad = 0;
2772 
2773 	tx->driver_txop = 0;
2774 	tx->next_frame_len = 0;
2775 
2776 	tx->len = htole16(totlen);
2777 	tx->tid_tspec = tid;
2778 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2779 
2780 	/* Set physical address of "scratch area". */
2781 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2782 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2783 
2784 	/* Copy 802.11 header in TX command. */
2785 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2786 
2787 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2788 
2789 	tx->sec_ctl = 0;
2790 	tx->tx_flags |= htole32(flags);
2791 
2792 	/* Trim 802.11 header. */
2793 	m_adj(m, hdrlen);
2794 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2795 	    segs, &nsegs, BUS_DMA_NOWAIT);
2796 	if (error != 0) {
2797 		if (error != EFBIG) {
2798 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2799 			    error);
2800 			m_freem(m);
2801 			return error;
2802 		}
2803 		/* Too many DMA segments, linearize mbuf. */
2804 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
2805 		if (m1 == NULL) {
2806 			device_printf(sc->sc_dev,
2807 			    "%s: could not defrag mbuf\n", __func__);
2808 			m_freem(m);
2809 			return (ENOBUFS);
2810 		}
2811 		m = m1;
2812 
2813 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2814 		    segs, &nsegs, BUS_DMA_NOWAIT);
2815 		if (error != 0) {
2816 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2817 			    error);
2818 			m_freem(m);
2819 			return error;
2820 		}
2821 	}
2822 	data->m = m;
2823 	data->in = in;
2824 	data->done = 0;
2825 
2826 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2827 	    "sending txd %p, in %p\n", data, data->in);
2828 	KASSERT(data->in != NULL, ("node is NULL"));
2829 
2830 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2831 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%d\n",
2832 	    ring->qid, ring->cur, totlen, nsegs,
2833 	    le32toh(tx->tx_flags),
2834 	    le32toh(tx->rate_n_flags),
2835 	    (int) tx->initial_rate_index
2836 	    );
2837 
2838 	/* Fill TX descriptor. */
2839 	desc->num_tbs = 2 + nsegs;
2840 
2841 	desc->tbs[0].lo = htole32(data->cmd_paddr);
2842 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2843 	    (TB0_SIZE << 4);
2844 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2845 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2846 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2847 	      + hdrlen + pad - TB0_SIZE) << 4);
2848 
2849 	/* Other DMA segments are for data payload. */
2850 	for (i = 0; i < nsegs; i++) {
2851 		seg = &segs[i];
2852 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
2853 		desc->tbs[i+2].hi_n_len = \
2854 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2855 		    | ((seg->ds_len) << 4);
2856 	}
2857 
2858 	bus_dmamap_sync(ring->data_dmat, data->map,
2859 	    BUS_DMASYNC_PREWRITE);
2860 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2861 	    BUS_DMASYNC_PREWRITE);
2862 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2863 	    BUS_DMASYNC_PREWRITE);
2864 
2865 #if 0
2866 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2867 #endif
2868 
2869 	/* Kick TX ring. */
2870 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2871 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2872 
2873 	/* Mark TX ring as full if we reach a certain threshold. */
2874 	if (++ring->queued > IWM_TX_RING_HIMARK) {
2875 		sc->qfullmsk |= 1 << ring->qid;
2876 	}
2877 
2878 	return 0;
2879 }
2880 
2881 static int
2882 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2883     const struct ieee80211_bpf_params *params)
2884 {
2885 	struct ieee80211com *ic = ni->ni_ic;
2886 	struct iwm_softc *sc = ic->ic_softc;
2887 	int error = 0;
2888 
2889 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2890 	    "->%s begin\n", __func__);
2891 
2892 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2893 		m_freem(m);
2894 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2895 		    "<-%s not RUNNING\n", __func__);
2896 		return (ENETDOWN);
2897         }
2898 
2899 	IWM_LOCK(sc);
2900 	/* XXX fix this */
2901         if (params == NULL) {
2902 		error = iwm_tx(sc, m, ni, 0);
2903 	} else {
2904 		error = iwm_tx(sc, m, ni, 0);
2905 	}
2906 	sc->sc_tx_timer = 5;
2907 	IWM_UNLOCK(sc);
2908 
2909         return (error);
2910 }
2911 
2912 /*
2913  * mvm/tx.c
2914  */
2915 
2916 #if 0
2917 /*
2918  * Note that there are transports that buffer frames before they reach
2919  * the firmware. This means that after flush_tx_path is called, the
2920  * queue might not be empty. The race-free way to handle this is to:
2921  * 1) set the station as draining
2922  * 2) flush the Tx path
2923  * 3) wait for the transport queues to be empty
2924  */
2925 int
2926 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
2927 {
2928 	struct iwm_tx_path_flush_cmd flush_cmd = {
2929 		.queues_ctl = htole32(tfd_msk),
2930 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
2931 	};
2932 	int ret;
2933 
2934 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
2935 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
2936 	    sizeof(flush_cmd), &flush_cmd);
2937 	if (ret)
2938                 device_printf(sc->sc_dev,
2939 		    "Flushing tx queue failed: %d\n", ret);
2940 	return ret;
2941 }
2942 #endif
2943 
2944 /*
2945  * BEGIN mvm/sta.c
2946  */
2947 
2948 static void
2949 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
2950 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
2951 {
2952 	memset(cmd_v5, 0, sizeof(*cmd_v5));
2953 
2954 	cmd_v5->add_modify = cmd_v6->add_modify;
2955 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
2956 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
2957 	IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
2958 	cmd_v5->sta_id = cmd_v6->sta_id;
2959 	cmd_v5->modify_mask = cmd_v6->modify_mask;
2960 	cmd_v5->station_flags = cmd_v6->station_flags;
2961 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
2962 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
2963 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
2964 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
2965 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
2966 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
2967 	cmd_v5->assoc_id = cmd_v6->assoc_id;
2968 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
2969 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
2970 }
2971 
2972 static int
2973 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
2974 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
2975 {
2976 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
2977 
2978 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
2979 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
2980 		    sizeof(*cmd), cmd, status);
2981 	}
2982 
2983 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
2984 
2985 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
2986 	    &cmd_v5, status);
2987 }
2988 
2989 /* send station add/update command to firmware */
2990 static int
2991 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
2992 {
2993 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
2994 	int ret;
2995 	uint32_t status;
2996 
2997 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
2998 
2999 	add_sta_cmd.sta_id = IWM_STATION_ID;
3000 	add_sta_cmd.mac_id_n_color
3001 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3002 	        IWM_DEFAULT_COLOR));
3003 	if (!update) {
3004 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
3005 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3006 	}
3007 	add_sta_cmd.add_modify = update ? 1 : 0;
3008 	add_sta_cmd.station_flags_msk
3009 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3010 
3011 	status = IWM_ADD_STA_SUCCESS;
3012 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3013 	if (ret)
3014 		return ret;
3015 
3016 	switch (status) {
3017 	case IWM_ADD_STA_SUCCESS:
3018 		break;
3019 	default:
3020 		ret = EIO;
3021 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3022 		break;
3023 	}
3024 
3025 	return ret;
3026 }
3027 
3028 static int
3029 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3030 {
3031 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3032 }
3033 
3034 static int
3035 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3036 {
3037 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3038 }
3039 
3040 static int
3041 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3042 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3043 {
3044 	struct iwm_mvm_add_sta_cmd_v6 cmd;
3045 	int ret;
3046 	uint32_t status;
3047 
3048 	memset(&cmd, 0, sizeof(cmd));
3049 	cmd.sta_id = sta->sta_id;
3050 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3051 
3052 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3053 
3054 	if (addr)
3055 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3056 
3057 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3058 	if (ret)
3059 		return ret;
3060 
3061 	switch (status) {
3062 	case IWM_ADD_STA_SUCCESS:
3063 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3064 		    "%s: Internal station added.\n", __func__);
3065 		return 0;
3066 	default:
3067 		device_printf(sc->sc_dev,
3068 		    "%s: Add internal station failed, status=0x%x\n",
3069 		    __func__, status);
3070 		ret = EIO;
3071 		break;
3072 	}
3073 	return ret;
3074 }
3075 
3076 static int
3077 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3078 {
3079 	int ret;
3080 
3081 	sc->sc_aux_sta.sta_id = 3;
3082 	sc->sc_aux_sta.tfd_queue_msk = 0;
3083 
3084 	ret = iwm_mvm_add_int_sta_common(sc,
3085 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3086 
3087 	if (ret)
3088 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3089 	return ret;
3090 }
3091 
3092 /*
3093  * END mvm/sta.c
3094  */
3095 
3096 /*
3097  * BEGIN mvm/quota.c
3098  */
3099 
3100 static int
3101 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3102 {
3103 	struct iwm_time_quota_cmd cmd;
3104 	int i, idx, ret, num_active_macs, quota, quota_rem;
3105 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3106 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3107 	uint16_t id;
3108 
3109 	memset(&cmd, 0, sizeof(cmd));
3110 
3111 	/* currently, PHY ID == binding ID */
3112 	if (in) {
3113 		id = in->in_phyctxt->id;
3114 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3115 		colors[id] = in->in_phyctxt->color;
3116 
3117 		if (1)
3118 			n_ifs[id] = 1;
3119 	}
3120 
3121 	/*
3122 	 * The FW's scheduling session consists of
3123 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3124 	 * equally between all the bindings that require quota
3125 	 */
3126 	num_active_macs = 0;
3127 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3128 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3129 		num_active_macs += n_ifs[i];
3130 	}
3131 
3132 	quota = 0;
3133 	quota_rem = 0;
3134 	if (num_active_macs) {
3135 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3136 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3137 	}
3138 
3139 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3140 		if (colors[i] < 0)
3141 			continue;
3142 
3143 		cmd.quotas[idx].id_and_color =
3144 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3145 
3146 		if (n_ifs[i] <= 0) {
3147 			cmd.quotas[idx].quota = htole32(0);
3148 			cmd.quotas[idx].max_duration = htole32(0);
3149 		} else {
3150 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3151 			cmd.quotas[idx].max_duration = htole32(0);
3152 		}
3153 		idx++;
3154 	}
3155 
3156 	/* Give the remainder of the session to the first binding */
3157 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3158 
3159 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3160 	    sizeof(cmd), &cmd);
3161 	if (ret)
3162 		device_printf(sc->sc_dev,
3163 		    "%s: Failed to send quota: %d\n", __func__, ret);
3164 	return ret;
3165 }
3166 
3167 /*
3168  * END mvm/quota.c
3169  */
3170 
3171 /*
3172  * ieee80211 routines
3173  */
3174 
3175 /*
3176  * Change to AUTH state in 80211 state machine.  Roughly matches what
3177  * Linux does in bss_info_changed().
3178  */
3179 static int
3180 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3181 {
3182 	struct ieee80211_node *ni;
3183 	struct iwm_node *in;
3184 	struct iwm_vap *iv = IWM_VAP(vap);
3185 	uint32_t duration;
3186 	int error;
3187 
3188 	/*
3189 	 * XXX i have a feeling that the vap node is being
3190 	 * freed from underneath us. Grr.
3191 	 */
3192 	ni = ieee80211_ref_node(vap->iv_bss);
3193 	in = IWM_NODE(ni);
3194 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3195 	    "%s: called; vap=%p, bss ni=%p\n",
3196 	    __func__,
3197 	    vap,
3198 	    ni);
3199 
3200 	in->in_assoc = 0;
3201 
3202 	error = iwm_allow_mcast(vap, sc);
3203 	if (error) {
3204 		device_printf(sc->sc_dev,
3205 		    "%s: failed to set multicast\n", __func__);
3206 		goto out;
3207 	}
3208 
3209 	/*
3210 	 * This is where it deviates from what Linux does.
3211 	 *
3212 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3213 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3214 	 * and always does does a mac_ctx_changed().
3215 	 *
3216 	 * The openbsd port doesn't attempt to do that - it reset things
3217 	 * at odd states and does the add here.
3218 	 *
3219 	 * So, until the state handling is fixed (ie, we never reset
3220 	 * the NIC except for a firmware failure, which should drag
3221 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3222 	 * contexts that are required), let's do a dirty hack here.
3223 	 */
3224 	if (iv->is_uploaded) {
3225 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3226 			device_printf(sc->sc_dev,
3227 			    "%s: failed to update MAC\n", __func__);
3228 			goto out;
3229 		}
3230 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3231 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3232 			device_printf(sc->sc_dev,
3233 			    "%s: failed update phy ctxt\n", __func__);
3234 			goto out;
3235 		}
3236 		in->in_phyctxt = &sc->sc_phyctxt[0];
3237 
3238 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3239 			device_printf(sc->sc_dev,
3240 			    "%s: binding update cmd\n", __func__);
3241 			goto out;
3242 		}
3243 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3244 			device_printf(sc->sc_dev,
3245 			    "%s: failed to update sta\n", __func__);
3246 			goto out;
3247 		}
3248 	} else {
3249 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3250 			device_printf(sc->sc_dev,
3251 			    "%s: failed to add MAC\n", __func__);
3252 			goto out;
3253 		}
3254 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3255 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3256 			device_printf(sc->sc_dev,
3257 			    "%s: failed add phy ctxt!\n", __func__);
3258 			error = ETIMEDOUT;
3259 			goto out;
3260 		}
3261 		in->in_phyctxt = &sc->sc_phyctxt[0];
3262 
3263 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3264 			device_printf(sc->sc_dev,
3265 			    "%s: binding add cmd\n", __func__);
3266 			goto out;
3267 		}
3268 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3269 			device_printf(sc->sc_dev,
3270 			    "%s: failed to add sta\n", __func__);
3271 			goto out;
3272 		}
3273 	}
3274 
3275 	/*
3276 	 * Prevent the FW from wandering off channel during association
3277 	 * by "protecting" the session with a time event.
3278 	 */
3279 	/* XXX duration is in units of TU, not MS */
3280 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3281 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3282 	DELAY(100);
3283 
3284 	error = 0;
3285 out:
3286 	ieee80211_free_node(ni);
3287 	return (error);
3288 }
3289 
3290 static int
3291 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3292 {
3293 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3294 	int error;
3295 
3296 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3297 		device_printf(sc->sc_dev,
3298 		    "%s: failed to update STA\n", __func__);
3299 		return error;
3300 	}
3301 
3302 	in->in_assoc = 1;
3303 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3304 		device_printf(sc->sc_dev,
3305 		    "%s: failed to update MAC\n", __func__);
3306 		return error;
3307 	}
3308 
3309 	return 0;
3310 }
3311 
3312 static int
3313 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3314 {
3315 	/*
3316 	 * Ok, so *technically* the proper set of calls for going
3317 	 * from RUN back to SCAN is:
3318 	 *
3319 	 * iwm_mvm_power_mac_disable(sc, in);
3320 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3321 	 * iwm_mvm_rm_sta(sc, in);
3322 	 * iwm_mvm_update_quotas(sc, NULL);
3323 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3324 	 * iwm_mvm_binding_remove_vif(sc, in);
3325 	 * iwm_mvm_mac_ctxt_remove(sc, in);
3326 	 *
3327 	 * However, that freezes the device not matter which permutations
3328 	 * and modifications are attempted.  Obviously, this driver is missing
3329 	 * something since it works in the Linux driver, but figuring out what
3330 	 * is missing is a little more complicated.  Now, since we're going
3331 	 * back to nothing anyway, we'll just do a complete device reset.
3332 	 * Up your's, device!
3333 	 */
3334 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
3335 	iwm_stop_device(sc);
3336 	iwm_init_hw(sc);
3337 	if (in)
3338 		in->in_assoc = 0;
3339 	return 0;
3340 
3341 #if 0
3342 	int error;
3343 
3344 	iwm_mvm_power_mac_disable(sc, in);
3345 
3346 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3347 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3348 		return error;
3349 	}
3350 
3351 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3352 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3353 		return error;
3354 	}
3355 	error = iwm_mvm_rm_sta(sc, in);
3356 	in->in_assoc = 0;
3357 	iwm_mvm_update_quotas(sc, NULL);
3358 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3359 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3360 		return error;
3361 	}
3362 	iwm_mvm_binding_remove_vif(sc, in);
3363 
3364 	iwm_mvm_mac_ctxt_remove(sc, in);
3365 
3366 	return error;
3367 #endif
3368 }
3369 
3370 static struct ieee80211_node *
3371 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3372 {
3373 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
3374 	    M_NOWAIT | M_ZERO);
3375 }
3376 
3377 static void
3378 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3379 {
3380 	struct ieee80211_node *ni = &in->in_ni;
3381 	struct iwm_lq_cmd *lq = &in->in_lq;
3382 	int nrates = ni->ni_rates.rs_nrates;
3383 	int i, ridx, tab = 0;
3384 	int txant = 0;
3385 
3386 	if (nrates > nitems(lq->rs_table)) {
3387 		device_printf(sc->sc_dev,
3388 		    "%s: node supports %d rates, driver handles "
3389 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3390 		return;
3391 	}
3392 	if (nrates == 0) {
3393 		device_printf(sc->sc_dev,
3394 		    "%s: node supports 0 rates, odd!\n", __func__);
3395 		return;
3396 	}
3397 
3398 	/*
3399 	 * XXX .. and most of iwm_node is not initialised explicitly;
3400 	 * it's all just 0x0 passed to the firmware.
3401 	 */
3402 
3403 	/* first figure out which rates we should support */
3404 	/* XXX TODO: this isn't 11n aware /at all/ */
3405 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3406 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3407 	    "%s: nrates=%d\n", __func__, nrates);
3408 
3409 	/*
3410 	 * Loop over nrates and populate in_ridx from the highest
3411 	 * rate to the lowest rate.  Remember, in_ridx[] has
3412 	 * IEEE80211_RATE_MAXSIZE entries!
3413 	 */
3414 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3415 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3416 
3417 		/* Map 802.11 rate to HW rate index. */
3418 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3419 			if (iwm_rates[ridx].rate == rate)
3420 				break;
3421 		if (ridx > IWM_RIDX_MAX) {
3422 			device_printf(sc->sc_dev,
3423 			    "%s: WARNING: device rate for %d not found!\n",
3424 			    __func__, rate);
3425 		} else {
3426 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3427 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
3428 			    __func__,
3429 			    i,
3430 			    rate,
3431 			    ridx);
3432 			in->in_ridx[i] = ridx;
3433 		}
3434 	}
3435 
3436 	/* then construct a lq_cmd based on those */
3437 	memset(lq, 0, sizeof(*lq));
3438 	lq->sta_id = IWM_STATION_ID;
3439 
3440 	/*
3441 	 * are these used? (we don't do SISO or MIMO)
3442 	 * need to set them to non-zero, though, or we get an error.
3443 	 */
3444 	lq->single_stream_ant_msk = 1;
3445 	lq->dual_stream_ant_msk = 1;
3446 
3447 	/*
3448 	 * Build the actual rate selection table.
3449 	 * The lowest bits are the rates.  Additionally,
3450 	 * CCK needs bit 9 to be set.  The rest of the bits
3451 	 * we add to the table select the tx antenna
3452 	 * Note that we add the rates in the highest rate first
3453 	 * (opposite of ni_rates).
3454 	 */
3455 	/*
3456 	 * XXX TODO: this should be looping over the min of nrates
3457 	 * and LQ_MAX_RETRY_NUM.  Sigh.
3458 	 */
3459 	for (i = 0; i < nrates; i++) {
3460 		int nextant;
3461 
3462 		if (txant == 0)
3463 			txant = IWM_FW_VALID_TX_ANT(sc);
3464 		nextant = 1<<(ffs(txant)-1);
3465 		txant &= ~nextant;
3466 
3467 		/*
3468 		 * Map the rate id into a rate index into
3469 		 * our hardware table containing the
3470 		 * configuration to use for this rate.
3471 		 */
3472 		ridx = in->in_ridx[i];
3473 		tab = iwm_rates[ridx].plcp;
3474 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
3475 		if (IWM_RIDX_IS_CCK(ridx))
3476 			tab |= IWM_RATE_MCS_CCK_MSK;
3477 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3478 		    "station rate i=%d, rate=%d, hw=%x\n",
3479 		    i, iwm_rates[ridx].rate, tab);
3480 		lq->rs_table[i] = htole32(tab);
3481 	}
3482 	/* then fill the rest with the lowest possible rate */
3483 	for (i = nrates; i < nitems(lq->rs_table); i++) {
3484 		KASSERT(tab != 0, ("invalid tab"));
3485 		lq->rs_table[i] = htole32(tab);
3486 	}
3487 }
3488 
3489 static int
3490 iwm_media_change(struct ifnet *ifp)
3491 {
3492 	struct ieee80211vap *vap = ifp->if_softc;
3493 	struct ieee80211com *ic = vap->iv_ic;
3494 	struct iwm_softc *sc = ic->ic_softc;
3495 	int error;
3496 
3497 	error = ieee80211_media_change(ifp);
3498 	if (error != ENETRESET)
3499 		return error;
3500 
3501 	IWM_LOCK(sc);
3502 	if (ic->ic_nrunning > 0) {
3503 		iwm_stop(sc);
3504 		iwm_init(sc);
3505 	}
3506 	IWM_UNLOCK(sc);
3507 	return error;
3508 }
3509 
3510 
3511 static int
3512 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3513 {
3514 	struct iwm_vap *ivp = IWM_VAP(vap);
3515 	struct ieee80211com *ic = vap->iv_ic;
3516 	struct iwm_softc *sc = ic->ic_softc;
3517 	struct iwm_node *in;
3518 	int error;
3519 
3520 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3521 	    "switching state %s -> %s\n",
3522 	    ieee80211_state_name[vap->iv_state],
3523 	    ieee80211_state_name[nstate]);
3524 	IEEE80211_UNLOCK(ic);
3525 	IWM_LOCK(sc);
3526 
3527 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3528 		iwm_led_blink_stop(sc);
3529 
3530 	/* disable beacon filtering if we're hopping out of RUN */
3531 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3532 		iwm_mvm_disable_beacon_filter(sc);
3533 
3534 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3535 			in->in_assoc = 0;
3536 
3537 		iwm_release(sc, NULL);
3538 
3539 		/*
3540 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3541 		 * above then the card will be completely reinitialized,
3542 		 * so the driver must do everything necessary to bring the card
3543 		 * from INIT to SCAN.
3544 		 *
3545 		 * Additionally, upon receiving deauth frame from AP,
3546 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3547 		 * state. This will also fail with this driver, so bring the FSM
3548 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3549 		 *
3550 		 * XXX TODO: fix this for FreeBSD!
3551 		 */
3552 		if (nstate == IEEE80211_S_SCAN ||
3553 		    nstate == IEEE80211_S_AUTH ||
3554 		    nstate == IEEE80211_S_ASSOC) {
3555 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3556 			    "Force transition to INIT; MGT=%d\n", arg);
3557 			IWM_UNLOCK(sc);
3558 			IEEE80211_LOCK(ic);
3559 			vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3560 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3561 			    "Going INIT->SCAN\n");
3562 			nstate = IEEE80211_S_SCAN;
3563 			IEEE80211_UNLOCK(ic);
3564 			IWM_LOCK(sc);
3565 		}
3566 	}
3567 
3568 	switch (nstate) {
3569 	case IEEE80211_S_INIT:
3570 		sc->sc_scanband = 0;
3571 		break;
3572 
3573 	case IEEE80211_S_AUTH:
3574 		if ((error = iwm_auth(vap, sc)) != 0) {
3575 			device_printf(sc->sc_dev,
3576 			    "%s: could not move to auth state: %d\n",
3577 			    __func__, error);
3578 			break;
3579 		}
3580 		break;
3581 
3582 	case IEEE80211_S_ASSOC:
3583 		if ((error = iwm_assoc(vap, sc)) != 0) {
3584 			device_printf(sc->sc_dev,
3585 			    "%s: failed to associate: %d\n", __func__,
3586 			    error);
3587 			break;
3588 		}
3589 		break;
3590 
3591 	case IEEE80211_S_RUN:
3592 	{
3593 		struct iwm_host_cmd cmd = {
3594 			.id = IWM_LQ_CMD,
3595 			.len = { sizeof(in->in_lq), },
3596 			.flags = IWM_CMD_SYNC,
3597 		};
3598 
3599 		/* Update the association state, now we have it all */
3600 		/* (eg associd comes in at this point */
3601 		error = iwm_assoc(vap, sc);
3602 		if (error != 0) {
3603 			device_printf(sc->sc_dev,
3604 			    "%s: failed to update association state: %d\n",
3605 			    __func__,
3606 			    error);
3607 			break;
3608 		}
3609 
3610 		in = IWM_NODE(vap->iv_bss);
3611 		iwm_mvm_power_mac_update_mode(sc, in);
3612 		iwm_mvm_enable_beacon_filter(sc, in);
3613 		iwm_mvm_update_quotas(sc, in);
3614 		iwm_setrates(sc, in);
3615 
3616 		cmd.data[0] = &in->in_lq;
3617 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3618 			device_printf(sc->sc_dev,
3619 			    "%s: IWM_LQ_CMD failed\n", __func__);
3620 		}
3621 
3622 		break;
3623 	}
3624 
3625 	default:
3626 		break;
3627 	}
3628 	IWM_UNLOCK(sc);
3629 	IEEE80211_LOCK(ic);
3630 
3631 	return (ivp->iv_newstate(vap, nstate, arg));
3632 }
3633 
3634 void
3635 iwm_endscan_cb(void *arg, int pending)
3636 {
3637 	struct iwm_softc *sc = arg;
3638 	struct ieee80211com *ic = &sc->sc_ic;
3639 	int done;
3640 	int error;
3641 
3642 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3643 	    "%s: scan ended\n",
3644 	    __func__);
3645 
3646 	IWM_LOCK(sc);
3647 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3648 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
3649 		done = 0;
3650 		if ((error = iwm_mvm_scan_request(sc,
3651 		    IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3652 			device_printf(sc->sc_dev,
3653 			    "could not initiate 5 GHz scan\n");
3654 			done = 1;
3655 		}
3656 	} else {
3657 		done = 1;
3658 	}
3659 
3660 	if (done) {
3661 		IWM_UNLOCK(sc);
3662 		ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3663 		IWM_LOCK(sc);
3664 		sc->sc_scanband = 0;
3665 	}
3666 	IWM_UNLOCK(sc);
3667 }
3668 
3669 static int
3670 iwm_init_hw(struct iwm_softc *sc)
3671 {
3672 	struct ieee80211com *ic = &sc->sc_ic;
3673 	int error, i, qid;
3674 
3675 	if ((error = iwm_start_hw(sc)) != 0)
3676 		return error;
3677 
3678 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3679 		return error;
3680 	}
3681 
3682 	/*
3683 	 * should stop and start HW since that INIT
3684 	 * image just loaded
3685 	 */
3686 	iwm_stop_device(sc);
3687 	if ((error = iwm_start_hw(sc)) != 0) {
3688 		device_printf(sc->sc_dev, "could not initialize hardware\n");
3689 		return error;
3690 	}
3691 
3692 	/* omstart, this time with the regular firmware */
3693 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3694 	if (error) {
3695 		device_printf(sc->sc_dev, "could not load firmware\n");
3696 		goto error;
3697 	}
3698 
3699 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
3700 		goto error;
3701 
3702 	/* Send phy db control command and then phy db calibration*/
3703 	if ((error = iwm_send_phy_db_data(sc)) != 0)
3704 		goto error;
3705 
3706 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
3707 		goto error;
3708 
3709 	/* Add auxiliary station for scanning */
3710 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
3711 		goto error;
3712 
3713 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3714 		/*
3715 		 * The channel used here isn't relevant as it's
3716 		 * going to be overwritten in the other flows.
3717 		 * For now use the first channel we have.
3718 		 */
3719 		if ((error = iwm_mvm_phy_ctxt_add(sc,
3720 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3721 			goto error;
3722 	}
3723 
3724 	error = iwm_mvm_power_update_device(sc);
3725 	if (error)
3726 		goto error;
3727 
3728 	/* Mark TX rings as active. */
3729 	for (qid = 0; qid < 4; qid++) {
3730 		iwm_enable_txq(sc, qid, qid);
3731 	}
3732 
3733 	return 0;
3734 
3735  error:
3736 	iwm_stop_device(sc);
3737 	return error;
3738 }
3739 
3740 /* Allow multicast from our BSSID. */
3741 static int
3742 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3743 {
3744 	struct ieee80211_node *ni = vap->iv_bss;
3745 	struct iwm_mcast_filter_cmd *cmd;
3746 	size_t size;
3747 	int error;
3748 
3749 	size = roundup(sizeof(*cmd), 4);
3750 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3751 	if (cmd == NULL)
3752 		return ENOMEM;
3753 	cmd->filter_own = 1;
3754 	cmd->port_id = 0;
3755 	cmd->count = 0;
3756 	cmd->pass_all = 1;
3757 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3758 
3759 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3760 	    IWM_CMD_SYNC, size, cmd);
3761 	free(cmd, M_DEVBUF);
3762 
3763 	return (error);
3764 }
3765 
3766 static void
3767 iwm_init(struct iwm_softc *sc)
3768 {
3769 	int error;
3770 
3771 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3772 		return;
3773 	}
3774 	sc->sc_generation++;
3775 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
3776 
3777 	if ((error = iwm_init_hw(sc)) != 0) {
3778 		iwm_stop(sc);
3779 		return;
3780 	}
3781 
3782 	/*
3783  	 * Ok, firmware loaded and we are jogging
3784 	 */
3785 	sc->sc_flags |= IWM_FLAG_HW_INITED;
3786 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3787 }
3788 
3789 static int
3790 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3791 {
3792 	struct iwm_softc *sc;
3793 	int error;
3794 
3795 	sc = ic->ic_softc;
3796 
3797 	IWM_LOCK(sc);
3798 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3799 		IWM_UNLOCK(sc);
3800 		return (ENXIO);
3801 	}
3802 	error = mbufq_enqueue(&sc->sc_snd, m);
3803 	if (error) {
3804 		IWM_UNLOCK(sc);
3805 		return (error);
3806 	}
3807 	iwm_start(sc);
3808 	IWM_UNLOCK(sc);
3809 	return (0);
3810 }
3811 
3812 /*
3813  * Dequeue packets from sendq and call send.
3814  */
3815 static void
3816 iwm_start(struct iwm_softc *sc)
3817 {
3818 	struct ieee80211_node *ni;
3819 	struct mbuf *m;
3820 	int ac = 0;
3821 
3822 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3823 	while (sc->qfullmsk == 0 &&
3824 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3825 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3826 		if (iwm_tx(sc, m, ni, ac) != 0) {
3827 			if_inc_counter(ni->ni_vap->iv_ifp,
3828 			    IFCOUNTER_OERRORS, 1);
3829 			ieee80211_free_node(ni);
3830 			continue;
3831 		}
3832 		sc->sc_tx_timer = 15;
3833 	}
3834 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3835 }
3836 
3837 static void
3838 iwm_stop(struct iwm_softc *sc)
3839 {
3840 
3841 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3842 	sc->sc_flags |= IWM_FLAG_STOPPED;
3843 	sc->sc_generation++;
3844 	sc->sc_scanband = 0;
3845 	iwm_led_blink_stop(sc);
3846 	sc->sc_tx_timer = 0;
3847 	iwm_stop_device(sc);
3848 }
3849 
3850 static void
3851 iwm_watchdog(void *arg)
3852 {
3853 	struct iwm_softc *sc = arg;
3854 	struct ieee80211com *ic = &sc->sc_ic;
3855 
3856 	if (sc->sc_tx_timer > 0) {
3857 		if (--sc->sc_tx_timer == 0) {
3858 			device_printf(sc->sc_dev, "device timeout\n");
3859 #ifdef IWM_DEBUG
3860 			iwm_nic_error(sc);
3861 #endif
3862 			ieee80211_restart_all(ic);
3863 			counter_u64_add(ic->ic_oerrors, 1);
3864 			return;
3865 		}
3866 	}
3867 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3868 }
3869 
3870 static void
3871 iwm_parent(struct ieee80211com *ic)
3872 {
3873 	struct iwm_softc *sc = ic->ic_softc;
3874 	int startall = 0;
3875 
3876 	IWM_LOCK(sc);
3877 	if (ic->ic_nrunning > 0) {
3878 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3879 			iwm_init(sc);
3880 			startall = 1;
3881 		}
3882 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3883 		iwm_stop(sc);
3884 	IWM_UNLOCK(sc);
3885 	if (startall)
3886 		ieee80211_start_all(ic);
3887 }
3888 
3889 /*
3890  * The interrupt side of things
3891  */
3892 
3893 /*
3894  * error dumping routines are from iwlwifi/mvm/utils.c
3895  */
3896 
3897 /*
3898  * Note: This structure is read from the device with IO accesses,
3899  * and the reading already does the endian conversion. As it is
3900  * read with uint32_t-sized accesses, any members with a different size
3901  * need to be ordered correctly though!
3902  */
3903 struct iwm_error_event_table {
3904 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
3905 	uint32_t error_id;		/* type of error */
3906 	uint32_t pc;			/* program counter */
3907 	uint32_t blink1;		/* branch link */
3908 	uint32_t blink2;		/* branch link */
3909 	uint32_t ilink1;		/* interrupt link */
3910 	uint32_t ilink2;		/* interrupt link */
3911 	uint32_t data1;		/* error-specific data */
3912 	uint32_t data2;		/* error-specific data */
3913 	uint32_t data3;		/* error-specific data */
3914 	uint32_t bcon_time;		/* beacon timer */
3915 	uint32_t tsf_low;		/* network timestamp function timer */
3916 	uint32_t tsf_hi;		/* network timestamp function timer */
3917 	uint32_t gp1;		/* GP1 timer register */
3918 	uint32_t gp2;		/* GP2 timer register */
3919 	uint32_t gp3;		/* GP3 timer register */
3920 	uint32_t ucode_ver;		/* uCode version */
3921 	uint32_t hw_ver;		/* HW Silicon version */
3922 	uint32_t brd_ver;		/* HW board version */
3923 	uint32_t log_pc;		/* log program counter */
3924 	uint32_t frame_ptr;		/* frame pointer */
3925 	uint32_t stack_ptr;		/* stack pointer */
3926 	uint32_t hcmd;		/* last host command header */
3927 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
3928 				 * rxtx_flag */
3929 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
3930 				 * host_flag */
3931 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
3932 				 * enc_flag */
3933 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
3934 				 * time_flag */
3935 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
3936 				 * wico interrupt */
3937 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
3938 	uint32_t wait_event;		/* wait event() caller address */
3939 	uint32_t l2p_control;	/* L2pControlField */
3940 	uint32_t l2p_duration;	/* L2pDurationField */
3941 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
3942 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
3943 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
3944 				 * (LMPM_PMG_SEL) */
3945 	uint32_t u_timestamp;	/* indicate when the date and time of the
3946 				 * compilation */
3947 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
3948 } __packed;
3949 
3950 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
3951 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
3952 
3953 #ifdef IWM_DEBUG
3954 struct {
3955 	const char *name;
3956 	uint8_t num;
3957 } advanced_lookup[] = {
3958 	{ "NMI_INTERRUPT_WDG", 0x34 },
3959 	{ "SYSASSERT", 0x35 },
3960 	{ "UCODE_VERSION_MISMATCH", 0x37 },
3961 	{ "BAD_COMMAND", 0x38 },
3962 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
3963 	{ "FATAL_ERROR", 0x3D },
3964 	{ "NMI_TRM_HW_ERR", 0x46 },
3965 	{ "NMI_INTERRUPT_TRM", 0x4C },
3966 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
3967 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
3968 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
3969 	{ "NMI_INTERRUPT_HOST", 0x66 },
3970 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
3971 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
3972 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
3973 	{ "ADVANCED_SYSASSERT", 0 },
3974 };
3975 
3976 static const char *
3977 iwm_desc_lookup(uint32_t num)
3978 {
3979 	int i;
3980 
3981 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
3982 		if (advanced_lookup[i].num == num)
3983 			return advanced_lookup[i].name;
3984 
3985 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
3986 	return advanced_lookup[i].name;
3987 }
3988 
3989 /*
3990  * Support for dumping the error log seemed like a good idea ...
3991  * but it's mostly hex junk and the only sensible thing is the
3992  * hw/ucode revision (which we know anyway).  Since it's here,
3993  * I'll just leave it in, just in case e.g. the Intel guys want to
3994  * help us decipher some "ADVANCED_SYSASSERT" later.
3995  */
3996 static void
3997 iwm_nic_error(struct iwm_softc *sc)
3998 {
3999 	struct iwm_error_event_table table;
4000 	uint32_t base;
4001 
4002 	device_printf(sc->sc_dev, "dumping device error log\n");
4003 	base = sc->sc_uc.uc_error_event_table;
4004 	if (base < 0x800000 || base >= 0x80C000) {
4005 		device_printf(sc->sc_dev,
4006 		    "Not valid error log pointer 0x%08x\n", base);
4007 		return;
4008 	}
4009 
4010 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4011 		device_printf(sc->sc_dev, "reading errlog failed\n");
4012 		return;
4013 	}
4014 
4015 	if (!table.valid) {
4016 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
4017 		return;
4018 	}
4019 
4020 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4021 		device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4022 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4023 		    sc->sc_flags, table.valid);
4024 	}
4025 
4026 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4027 		iwm_desc_lookup(table.error_id));
4028 	device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4029 	device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4030 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4031 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4032 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4033 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4034 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4035 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4036 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4037 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4038 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4039 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4040 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4041 	device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4042 	device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4043 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4044 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4045 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4046 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4047 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4048 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4049 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4050 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4051 	device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4052 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4053 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4054 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4055 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4056 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4057 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4058 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4059 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4060 }
4061 #endif
4062 
4063 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
4064 do {									\
4065 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4066 	_var_ = (void *)((_pkt_)+1);					\
4067 } while (/*CONSTCOND*/0)
4068 
4069 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
4070 do {									\
4071 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4072 	_ptr_ = (void *)((_pkt_)+1);					\
4073 } while (/*CONSTCOND*/0)
4074 
4075 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4076 
4077 /*
4078  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4079  * Basic structure from if_iwn
4080  */
4081 static void
4082 iwm_notif_intr(struct iwm_softc *sc)
4083 {
4084 	uint16_t hw;
4085 
4086 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4087 	    BUS_DMASYNC_POSTREAD);
4088 
4089 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4090 	while (sc->rxq.cur != hw) {
4091 		struct iwm_rx_ring *ring = &sc->rxq;
4092 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4093 		struct iwm_rx_packet *pkt;
4094 		struct iwm_cmd_response *cresp;
4095 		int qid, idx;
4096 
4097 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4098 		    BUS_DMASYNC_POSTREAD);
4099 		pkt = mtod(data->m, struct iwm_rx_packet *);
4100 
4101 		qid = pkt->hdr.qid & ~0x80;
4102 		idx = pkt->hdr.idx;
4103 
4104 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4105 		    "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4106 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4107 		    pkt->hdr.code, sc->rxq.cur, hw);
4108 
4109 		/*
4110 		 * randomly get these from the firmware, no idea why.
4111 		 * they at least seem harmless, so just ignore them for now
4112 		 */
4113 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4114 		    || pkt->len_n_flags == htole32(0x55550000))) {
4115 			ADVANCE_RXQ(sc);
4116 			continue;
4117 		}
4118 
4119 		switch (pkt->hdr.code) {
4120 		case IWM_REPLY_RX_PHY_CMD:
4121 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4122 			break;
4123 
4124 		case IWM_REPLY_RX_MPDU_CMD:
4125 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4126 			break;
4127 
4128 		case IWM_TX_CMD:
4129 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
4130 			break;
4131 
4132 		case IWM_MISSED_BEACONS_NOTIFICATION: {
4133 			struct iwm_missed_beacons_notif *resp;
4134 			int missed;
4135 
4136 			/* XXX look at mac_id to determine interface ID */
4137 			struct ieee80211com *ic = &sc->sc_ic;
4138 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4139 
4140 			SYNC_RESP_STRUCT(resp, pkt);
4141 			missed = le32toh(resp->consec_missed_beacons);
4142 
4143 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4144 			    "%s: MISSED_BEACON: mac_id=%d, "
4145 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4146 			    "num_rx=%d\n",
4147 			    __func__,
4148 			    le32toh(resp->mac_id),
4149 			    le32toh(resp->consec_missed_beacons_since_last_rx),
4150 			    le32toh(resp->consec_missed_beacons),
4151 			    le32toh(resp->num_expected_beacons),
4152 			    le32toh(resp->num_recvd_beacons));
4153 
4154 			/* Be paranoid */
4155 			if (vap == NULL)
4156 				break;
4157 
4158 			/* XXX no net80211 locking? */
4159 			if (vap->iv_state == IEEE80211_S_RUN &&
4160 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4161 				if (missed > vap->iv_bmissthreshold) {
4162 					/* XXX bad locking; turn into task */
4163 					IWM_UNLOCK(sc);
4164 					ieee80211_beacon_miss(ic);
4165 					IWM_LOCK(sc);
4166 				}
4167 			}
4168 
4169 			break; }
4170 
4171 		case IWM_MVM_ALIVE: {
4172 			struct iwm_mvm_alive_resp *resp;
4173 			SYNC_RESP_STRUCT(resp, pkt);
4174 
4175 			sc->sc_uc.uc_error_event_table
4176 			    = le32toh(resp->error_event_table_ptr);
4177 			sc->sc_uc.uc_log_event_table
4178 			    = le32toh(resp->log_event_table_ptr);
4179 			sc->sched_base = le32toh(resp->scd_base_ptr);
4180 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4181 
4182 			sc->sc_uc.uc_intr = 1;
4183 			wakeup(&sc->sc_uc);
4184 			break; }
4185 
4186 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
4187 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
4188 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
4189 
4190 			iwm_phy_db_set_section(sc, phy_db_notif);
4191 
4192 			break; }
4193 
4194 		case IWM_STATISTICS_NOTIFICATION: {
4195 			struct iwm_notif_statistics *stats;
4196 			SYNC_RESP_STRUCT(stats, pkt);
4197 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4198 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
4199 			break; }
4200 
4201 		case IWM_NVM_ACCESS_CMD:
4202 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4203 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4204 				    BUS_DMASYNC_POSTREAD);
4205 				memcpy(sc->sc_cmd_resp,
4206 				    pkt, sizeof(sc->sc_cmd_resp));
4207 			}
4208 			break;
4209 
4210 		case IWM_PHY_CONFIGURATION_CMD:
4211 		case IWM_TX_ANT_CONFIGURATION_CMD:
4212 		case IWM_ADD_STA:
4213 		case IWM_MAC_CONTEXT_CMD:
4214 		case IWM_REPLY_SF_CFG_CMD:
4215 		case IWM_POWER_TABLE_CMD:
4216 		case IWM_PHY_CONTEXT_CMD:
4217 		case IWM_BINDING_CONTEXT_CMD:
4218 		case IWM_TIME_EVENT_CMD:
4219 		case IWM_SCAN_REQUEST_CMD:
4220 		case IWM_REPLY_BEACON_FILTERING_CMD:
4221 		case IWM_MAC_PM_POWER_TABLE:
4222 		case IWM_TIME_QUOTA_CMD:
4223 		case IWM_REMOVE_STA:
4224 		case IWM_TXPATH_FLUSH:
4225 		case IWM_LQ_CMD:
4226 			SYNC_RESP_STRUCT(cresp, pkt);
4227 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4228 				memcpy(sc->sc_cmd_resp,
4229 				    pkt, sizeof(*pkt)+sizeof(*cresp));
4230 			}
4231 			break;
4232 
4233 		/* ignore */
4234 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4235 			break;
4236 
4237 		case IWM_INIT_COMPLETE_NOTIF:
4238 			sc->sc_init_complete = 1;
4239 			wakeup(&sc->sc_init_complete);
4240 			break;
4241 
4242 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
4243 			struct iwm_scan_complete_notif *notif;
4244 			SYNC_RESP_STRUCT(notif, pkt);
4245 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4246 			break; }
4247 
4248 		case IWM_REPLY_ERROR: {
4249 			struct iwm_error_resp *resp;
4250 			SYNC_RESP_STRUCT(resp, pkt);
4251 
4252 			device_printf(sc->sc_dev,
4253 			    "firmware error 0x%x, cmd 0x%x\n",
4254 			    le32toh(resp->error_type),
4255 			    resp->cmd_id);
4256 			break; }
4257 
4258 		case IWM_TIME_EVENT_NOTIFICATION: {
4259 			struct iwm_time_event_notif *notif;
4260 			SYNC_RESP_STRUCT(notif, pkt);
4261 
4262 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4263 			    "TE notif status = 0x%x action = 0x%x\n",
4264 			        notif->status, notif->action);
4265 			break; }
4266 
4267 		case IWM_MCAST_FILTER_CMD:
4268 			break;
4269 
4270 		default:
4271 			device_printf(sc->sc_dev,
4272 			    "frame %d/%d %x UNHANDLED (this should "
4273 			    "not happen)\n", qid, idx,
4274 			    pkt->len_n_flags);
4275 			break;
4276 		}
4277 
4278 		/*
4279 		 * Why test bit 0x80?  The Linux driver:
4280 		 *
4281 		 * There is one exception:  uCode sets bit 15 when it
4282 		 * originates the response/notification, i.e. when the
4283 		 * response/notification is not a direct response to a
4284 		 * command sent by the driver.  For example, uCode issues
4285 		 * IWM_REPLY_RX when it sends a received frame to the driver;
4286 		 * it is not a direct response to any driver command.
4287 		 *
4288 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
4289 		 * uses a slightly different format for pkt->hdr, and "qid"
4290 		 * is actually the upper byte of a two-byte field.
4291 		 */
4292 		if (!(pkt->hdr.qid & (1 << 7))) {
4293 			iwm_cmd_done(sc, pkt);
4294 		}
4295 
4296 		ADVANCE_RXQ(sc);
4297 	}
4298 
4299 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4300 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4301 
4302 	/*
4303 	 * Tell the firmware what we have processed.
4304 	 * Seems like the hardware gets upset unless we align
4305 	 * the write by 8??
4306 	 */
4307 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4308 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4309 }
4310 
4311 static void
4312 iwm_intr(void *arg)
4313 {
4314 	struct iwm_softc *sc = arg;
4315 	int handled = 0;
4316 	int r1, r2, rv = 0;
4317 	int isperiodic = 0;
4318 
4319 	IWM_LOCK(sc);
4320 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4321 
4322 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4323 		uint32_t *ict = sc->ict_dma.vaddr;
4324 		int tmp;
4325 
4326 		tmp = htole32(ict[sc->ict_cur]);
4327 		if (!tmp)
4328 			goto out_ena;
4329 
4330 		/*
4331 		 * ok, there was something.  keep plowing until we have all.
4332 		 */
4333 		r1 = r2 = 0;
4334 		while (tmp) {
4335 			r1 |= tmp;
4336 			ict[sc->ict_cur] = 0;
4337 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4338 			tmp = htole32(ict[sc->ict_cur]);
4339 		}
4340 
4341 		/* this is where the fun begins.  don't ask */
4342 		if (r1 == 0xffffffff)
4343 			r1 = 0;
4344 
4345 		/* i am not expected to understand this */
4346 		if (r1 & 0xc0000)
4347 			r1 |= 0x8000;
4348 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4349 	} else {
4350 		r1 = IWM_READ(sc, IWM_CSR_INT);
4351 		/* "hardware gone" (where, fishing?) */
4352 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4353 			goto out;
4354 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4355 	}
4356 	if (r1 == 0 && r2 == 0) {
4357 		goto out_ena;
4358 	}
4359 
4360 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4361 
4362 	/* ignored */
4363 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4364 
4365 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4366 		int i;
4367 		struct ieee80211com *ic = &sc->sc_ic;
4368 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4369 
4370 #ifdef IWM_DEBUG
4371 		iwm_nic_error(sc);
4372 #endif
4373 		/* Dump driver status (TX and RX rings) while we're here. */
4374 		device_printf(sc->sc_dev, "driver status:\n");
4375 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4376 			struct iwm_tx_ring *ring = &sc->txq[i];
4377 			device_printf(sc->sc_dev,
4378 			    "  tx ring %2d: qid=%-2d cur=%-3d "
4379 			    "queued=%-3d\n",
4380 			    i, ring->qid, ring->cur, ring->queued);
4381 		}
4382 		device_printf(sc->sc_dev,
4383 		    "  rx ring: cur=%d\n", sc->rxq.cur);
4384 		device_printf(sc->sc_dev,
4385 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4386 
4387 		/* Don't stop the device; just do a VAP restart */
4388 		IWM_UNLOCK(sc);
4389 
4390 		if (vap == NULL) {
4391 			printf("%s: null vap\n", __func__);
4392 			return;
4393 		}
4394 
4395 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4396 		    "restarting\n", __func__, vap->iv_state);
4397 
4398 		/* XXX TODO: turn this into a callout/taskqueue */
4399 		ieee80211_restart_all(ic);
4400 		return;
4401 	}
4402 
4403 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4404 		handled |= IWM_CSR_INT_BIT_HW_ERR;
4405 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
4406 		iwm_stop(sc);
4407 		rv = 1;
4408 		goto out;
4409 	}
4410 
4411 	/* firmware chunk loaded */
4412 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4413 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4414 		handled |= IWM_CSR_INT_BIT_FH_TX;
4415 		sc->sc_fw_chunk_done = 1;
4416 		wakeup(&sc->sc_fw);
4417 	}
4418 
4419 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4420 		handled |= IWM_CSR_INT_BIT_RF_KILL;
4421 		if (iwm_check_rfkill(sc)) {
4422 			device_printf(sc->sc_dev,
4423 			    "%s: rfkill switch, disabling interface\n",
4424 			    __func__);
4425 			iwm_stop(sc);
4426 		}
4427 	}
4428 
4429 	/*
4430 	 * The Linux driver uses periodic interrupts to avoid races.
4431 	 * We cargo-cult like it's going out of fashion.
4432 	 */
4433 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4434 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4435 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4436 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4437 			IWM_WRITE_1(sc,
4438 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4439 		isperiodic = 1;
4440 	}
4441 
4442 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4443 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4444 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4445 
4446 		iwm_notif_intr(sc);
4447 
4448 		/* enable periodic interrupt, see above */
4449 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4450 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4451 			    IWM_CSR_INT_PERIODIC_ENA);
4452 	}
4453 
4454 	if (__predict_false(r1 & ~handled))
4455 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4456 		    "%s: unhandled interrupts: %x\n", __func__, r1);
4457 	rv = 1;
4458 
4459  out_ena:
4460 	iwm_restore_interrupts(sc);
4461  out:
4462 	IWM_UNLOCK(sc);
4463 	return;
4464 }
4465 
4466 /*
4467  * Autoconf glue-sniffing
4468  */
4469 #define	PCI_VENDOR_INTEL		0x8086
4470 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
4471 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
4472 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
4473 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
4474 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
4475 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
4476 
4477 static const struct iwm_devices {
4478 	uint16_t	device;
4479 	const char	*name;
4480 } iwm_devices[] = {
4481 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4482 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4483 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4484 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4485 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4486 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4487 };
4488 
4489 static int
4490 iwm_probe(device_t dev)
4491 {
4492 	int i;
4493 
4494 	for (i = 0; i < nitems(iwm_devices); i++)
4495 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4496 		    pci_get_device(dev) == iwm_devices[i].device) {
4497 			device_set_desc(dev, iwm_devices[i].name);
4498 			return (BUS_PROBE_DEFAULT);
4499 		}
4500 
4501 	return (ENXIO);
4502 }
4503 
4504 static int
4505 iwm_dev_check(device_t dev)
4506 {
4507 	struct iwm_softc *sc;
4508 
4509 	sc = device_get_softc(dev);
4510 
4511 	switch (pci_get_device(dev)) {
4512 	case PCI_PRODUCT_INTEL_WL_3160_1:
4513 	case PCI_PRODUCT_INTEL_WL_3160_2:
4514 		sc->sc_fwname = "iwm3160fw";
4515 		sc->host_interrupt_operation_mode = 1;
4516 		return (0);
4517 	case PCI_PRODUCT_INTEL_WL_7260_1:
4518 	case PCI_PRODUCT_INTEL_WL_7260_2:
4519 		sc->sc_fwname = "iwm7260fw";
4520 		sc->host_interrupt_operation_mode = 1;
4521 		return (0);
4522 	case PCI_PRODUCT_INTEL_WL_7265_1:
4523 	case PCI_PRODUCT_INTEL_WL_7265_2:
4524 		sc->sc_fwname = "iwm7265fw";
4525 		sc->host_interrupt_operation_mode = 0;
4526 		return (0);
4527 	default:
4528 		device_printf(dev, "unknown adapter type\n");
4529 		return ENXIO;
4530 	}
4531 }
4532 
4533 static int
4534 iwm_pci_attach(device_t dev)
4535 {
4536 	struct iwm_softc *sc;
4537 	int count, error, rid;
4538 	uint16_t reg;
4539 
4540 	sc = device_get_softc(dev);
4541 
4542 	/* Clear device-specific "PCI retry timeout" register (41h). */
4543 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4544 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4545 
4546 	/* Enable bus-mastering and hardware bug workaround. */
4547 	pci_enable_busmaster(dev);
4548 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4549 	/* if !MSI */
4550 	if (reg & PCIM_STATUS_INTxSTATE) {
4551 		reg &= ~PCIM_STATUS_INTxSTATE;
4552 	}
4553 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4554 
4555 	rid = PCIR_BAR(0);
4556 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4557 	    RF_ACTIVE);
4558 	if (sc->sc_mem == NULL) {
4559 		device_printf(sc->sc_dev, "can't map mem space\n");
4560 		return (ENXIO);
4561 	}
4562 	sc->sc_st = rman_get_bustag(sc->sc_mem);
4563 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4564 
4565 	/* Install interrupt handler. */
4566 	count = 1;
4567 	rid = 0;
4568 	if (pci_alloc_msi(dev, &count) == 0)
4569 		rid = 1;
4570 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4571 	    (rid != 0 ? 0 : RF_SHAREABLE));
4572 	if (sc->sc_irq == NULL) {
4573 		device_printf(dev, "can't map interrupt\n");
4574 			return (ENXIO);
4575 	}
4576 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4577 	    NULL, iwm_intr, sc, &sc->sc_ih);
4578 	if (sc->sc_ih == NULL) {
4579 		device_printf(dev, "can't establish interrupt");
4580 			return (ENXIO);
4581 	}
4582 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4583 
4584 	return (0);
4585 }
4586 
4587 static void
4588 iwm_pci_detach(device_t dev)
4589 {
4590 	struct iwm_softc *sc = device_get_softc(dev);
4591 
4592 	if (sc->sc_irq != NULL) {
4593 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4594 		bus_release_resource(dev, SYS_RES_IRQ,
4595 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
4596 		pci_release_msi(dev);
4597         }
4598 	if (sc->sc_mem != NULL)
4599 		bus_release_resource(dev, SYS_RES_MEMORY,
4600 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
4601 }
4602 
4603 
4604 
4605 static int
4606 iwm_attach(device_t dev)
4607 {
4608 	struct iwm_softc *sc = device_get_softc(dev);
4609 	struct ieee80211com *ic = &sc->sc_ic;
4610 	int error;
4611 	int txq_i, i;
4612 
4613 	sc->sc_dev = dev;
4614 	IWM_LOCK_INIT(sc);
4615 	mbufq_init(&sc->sc_snd, ifqmaxlen);
4616 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4617 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
4618 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4619 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4620             taskqueue_thread_enqueue, &sc->sc_tq);
4621         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4622         if (error != 0) {
4623                 device_printf(dev, "can't start threads, error %d\n",
4624 		    error);
4625 		goto fail;
4626         }
4627 
4628 	/* PCI attach */
4629 	error = iwm_pci_attach(dev);
4630 	if (error != 0)
4631 		goto fail;
4632 
4633 	sc->sc_wantresp = -1;
4634 
4635 	/* Check device type */
4636 	error = iwm_dev_check(dev);
4637 	if (error != 0)
4638 		goto fail;
4639 
4640 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4641 
4642 	/*
4643 	 * We now start fiddling with the hardware
4644 	 */
4645 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4646 	if (iwm_prepare_card_hw(sc) != 0) {
4647 		device_printf(dev, "could not initialize hardware\n");
4648 		goto fail;
4649 	}
4650 
4651 	/* Allocate DMA memory for firmware transfers. */
4652 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
4653 		device_printf(dev, "could not allocate memory for firmware\n");
4654 		goto fail;
4655 	}
4656 
4657 	/* Allocate "Keep Warm" page. */
4658 	if ((error = iwm_alloc_kw(sc)) != 0) {
4659 		device_printf(dev, "could not allocate keep warm page\n");
4660 		goto fail;
4661 	}
4662 
4663 	/* We use ICT interrupts */
4664 	if ((error = iwm_alloc_ict(sc)) != 0) {
4665 		device_printf(dev, "could not allocate ICT table\n");
4666 		goto fail;
4667 	}
4668 
4669 	/* Allocate TX scheduler "rings". */
4670 	if ((error = iwm_alloc_sched(sc)) != 0) {
4671 		device_printf(dev, "could not allocate TX scheduler rings\n");
4672 		goto fail;
4673 	}
4674 
4675 	/* Allocate TX rings */
4676 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4677 		if ((error = iwm_alloc_tx_ring(sc,
4678 		    &sc->txq[txq_i], txq_i)) != 0) {
4679 			device_printf(dev,
4680 			    "could not allocate TX ring %d\n",
4681 			    txq_i);
4682 			goto fail;
4683 		}
4684 	}
4685 
4686 	/* Allocate RX ring. */
4687 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4688 		device_printf(dev, "could not allocate RX ring\n");
4689 		goto fail;
4690 	}
4691 
4692 	/* Clear pending interrupts. */
4693 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4694 
4695 	ic->ic_softc = sc;
4696 	ic->ic_name = device_get_nameunit(sc->sc_dev);
4697 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
4698 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
4699 
4700 	/* Set device capabilities. */
4701 	ic->ic_caps =
4702 	    IEEE80211_C_STA |
4703 	    IEEE80211_C_WPA |		/* WPA/RSN */
4704 	    IEEE80211_C_WME |
4705 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
4706 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
4707 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
4708 	    ;
4709 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4710 		sc->sc_phyctxt[i].id = i;
4711 		sc->sc_phyctxt[i].color = 0;
4712 		sc->sc_phyctxt[i].ref = 0;
4713 		sc->sc_phyctxt[i].channel = NULL;
4714 	}
4715 
4716 	/* Max RSSI */
4717 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4718 	sc->sc_preinit_hook.ich_func = iwm_preinit;
4719 	sc->sc_preinit_hook.ich_arg = sc;
4720 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4721 		device_printf(dev, "config_intrhook_establish failed\n");
4722 		goto fail;
4723 	}
4724 
4725 #ifdef IWM_DEBUG
4726 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4727 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4728 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4729 #endif
4730 
4731 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4732 	    "<-%s\n", __func__);
4733 
4734 	return 0;
4735 
4736 	/* Free allocated memory if something failed during attachment. */
4737 fail:
4738 	iwm_detach_local(sc, 0);
4739 
4740 	return ENXIO;
4741 }
4742 
4743 static int
4744 iwm_update_edca(struct ieee80211com *ic)
4745 {
4746 	struct iwm_softc *sc = ic->ic_softc;
4747 
4748 	device_printf(sc->sc_dev, "%s: called\n", __func__);
4749 	return (0);
4750 }
4751 
4752 static void
4753 iwm_preinit(void *arg)
4754 {
4755 	struct iwm_softc *sc = arg;
4756 	device_t dev = sc->sc_dev;
4757 	struct ieee80211com *ic = &sc->sc_ic;
4758 	int error;
4759 
4760 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4761 	    "->%s\n", __func__);
4762 
4763 	IWM_LOCK(sc);
4764 	if ((error = iwm_start_hw(sc)) != 0) {
4765 		device_printf(dev, "could not initialize hardware\n");
4766 		IWM_UNLOCK(sc);
4767 		goto fail;
4768 	}
4769 
4770 	error = iwm_run_init_mvm_ucode(sc, 1);
4771 	iwm_stop_device(sc);
4772 	if (error) {
4773 		IWM_UNLOCK(sc);
4774 		goto fail;
4775 	}
4776 	device_printf(dev,
4777 	    "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
4778 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4779 	    IWM_UCODE_MAJOR(sc->sc_fwver),
4780 	    IWM_UCODE_MINOR(sc->sc_fwver),
4781 	    IWM_UCODE_API(sc->sc_fwver));
4782 
4783 	/* not all hardware can do 5GHz band */
4784 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4785 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4786 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4787 	IWM_UNLOCK(sc);
4788 
4789 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4790 	    ic->ic_channels);
4791 
4792 	/*
4793 	 * At this point we've committed - if we fail to do setup,
4794 	 * we now also have to tear down the net80211 state.
4795 	 */
4796 	ieee80211_ifattach(ic);
4797 	ic->ic_vap_create = iwm_vap_create;
4798 	ic->ic_vap_delete = iwm_vap_delete;
4799 	ic->ic_raw_xmit = iwm_raw_xmit;
4800 	ic->ic_node_alloc = iwm_node_alloc;
4801 	ic->ic_scan_start = iwm_scan_start;
4802 	ic->ic_scan_end = iwm_scan_end;
4803 	ic->ic_update_mcast = iwm_update_mcast;
4804 	ic->ic_getradiocaps = iwm_init_channel_map;
4805 	ic->ic_set_channel = iwm_set_channel;
4806 	ic->ic_scan_curchan = iwm_scan_curchan;
4807 	ic->ic_scan_mindwell = iwm_scan_mindwell;
4808 	ic->ic_wme.wme_update = iwm_update_edca;
4809 	ic->ic_parent = iwm_parent;
4810 	ic->ic_transmit = iwm_transmit;
4811 	iwm_radiotap_attach(sc);
4812 	if (bootverbose)
4813 		ieee80211_announce(ic);
4814 
4815 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4816 	    "<-%s\n", __func__);
4817 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4818 
4819 	return;
4820 fail:
4821 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4822 	iwm_detach_local(sc, 0);
4823 }
4824 
4825 /*
4826  * Attach the interface to 802.11 radiotap.
4827  */
4828 static void
4829 iwm_radiotap_attach(struct iwm_softc *sc)
4830 {
4831         struct ieee80211com *ic = &sc->sc_ic;
4832 
4833 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4834 	    "->%s begin\n", __func__);
4835         ieee80211_radiotap_attach(ic,
4836             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4837                 IWM_TX_RADIOTAP_PRESENT,
4838             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4839                 IWM_RX_RADIOTAP_PRESENT);
4840 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4841 	    "->%s end\n", __func__);
4842 }
4843 
4844 static struct ieee80211vap *
4845 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4846     enum ieee80211_opmode opmode, int flags,
4847     const uint8_t bssid[IEEE80211_ADDR_LEN],
4848     const uint8_t mac[IEEE80211_ADDR_LEN])
4849 {
4850 	struct iwm_vap *ivp;
4851 	struct ieee80211vap *vap;
4852 
4853 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
4854 		return NULL;
4855 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
4856 	vap = &ivp->iv_vap;
4857 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
4858 	vap->iv_bmissthreshold = 10;            /* override default */
4859 	/* Override with driver methods. */
4860 	ivp->iv_newstate = vap->iv_newstate;
4861 	vap->iv_newstate = iwm_newstate;
4862 
4863 	ieee80211_ratectl_init(vap);
4864 	/* Complete setup. */
4865 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
4866 	    mac);
4867 	ic->ic_opmode = opmode;
4868 
4869 	return vap;
4870 }
4871 
4872 static void
4873 iwm_vap_delete(struct ieee80211vap *vap)
4874 {
4875 	struct iwm_vap *ivp = IWM_VAP(vap);
4876 
4877 	ieee80211_ratectl_deinit(vap);
4878 	ieee80211_vap_detach(vap);
4879 	free(ivp, M_80211_VAP);
4880 }
4881 
4882 static void
4883 iwm_scan_start(struct ieee80211com *ic)
4884 {
4885 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4886         struct iwm_softc *sc = ic->ic_softc;
4887 	int error;
4888 
4889 	if (sc->sc_scanband)
4890 		return;
4891 	IWM_LOCK(sc);
4892 	error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
4893 	if (error) {
4894 		device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
4895 		IWM_UNLOCK(sc);
4896 		ieee80211_cancel_scan(vap);
4897 		sc->sc_scanband = 0;
4898 	} else {
4899 		iwm_led_blink_start(sc);
4900 		IWM_UNLOCK(sc);
4901 	}
4902 }
4903 
4904 static void
4905 iwm_scan_end(struct ieee80211com *ic)
4906 {
4907 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4908 	struct iwm_softc *sc = ic->ic_softc;
4909 
4910 	IWM_LOCK(sc);
4911 	iwm_led_blink_stop(sc);
4912 	if (vap->iv_state == IEEE80211_S_RUN)
4913 		iwm_mvm_led_enable(sc);
4914 	IWM_UNLOCK(sc);
4915 }
4916 
4917 static void
4918 iwm_update_mcast(struct ieee80211com *ic)
4919 {
4920 }
4921 
4922 static void
4923 iwm_set_channel(struct ieee80211com *ic)
4924 {
4925 }
4926 
4927 static void
4928 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
4929 {
4930 }
4931 
4932 static void
4933 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
4934 {
4935 	return;
4936 }
4937 
4938 void
4939 iwm_init_task(void *arg1)
4940 {
4941 	struct iwm_softc *sc = arg1;
4942 
4943 	IWM_LOCK(sc);
4944 	while (sc->sc_flags & IWM_FLAG_BUSY)
4945 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
4946 	sc->sc_flags |= IWM_FLAG_BUSY;
4947 	iwm_stop(sc);
4948 	if (sc->sc_ic.ic_nrunning > 0)
4949 		iwm_init(sc);
4950 	sc->sc_flags &= ~IWM_FLAG_BUSY;
4951 	wakeup(&sc->sc_flags);
4952 	IWM_UNLOCK(sc);
4953 }
4954 
4955 static int
4956 iwm_resume(device_t dev)
4957 {
4958 	struct iwm_softc *sc = device_get_softc(dev);
4959 	int do_reinit = 0;
4960 	uint16_t reg;
4961 
4962 	/* Clear device-specific "PCI retry timeout" register (41h). */
4963 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4964 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4965 	iwm_init_task(device_get_softc(dev));
4966 
4967 	IWM_LOCK(sc);
4968 	if (sc->sc_flags & IWM_FLAG_DORESUME) {
4969 		sc->sc_flags &= ~IWM_FLAG_DORESUME;
4970 		do_reinit = 1;
4971 	}
4972 	IWM_UNLOCK(sc);
4973 
4974 	if (do_reinit)
4975 		ieee80211_resume_all(&sc->sc_ic);
4976 
4977 	return 0;
4978 }
4979 
4980 static int
4981 iwm_suspend(device_t dev)
4982 {
4983 	int do_stop = 0;
4984 	struct iwm_softc *sc = device_get_softc(dev);
4985 
4986 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
4987 
4988 	ieee80211_suspend_all(&sc->sc_ic);
4989 
4990 	if (do_stop) {
4991 		IWM_LOCK(sc);
4992 		iwm_stop(sc);
4993 		sc->sc_flags |= IWM_FLAG_DORESUME;
4994 		IWM_UNLOCK(sc);
4995 	}
4996 
4997 	return (0);
4998 }
4999 
5000 static int
5001 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5002 {
5003 	struct iwm_fw_info *fw = &sc->sc_fw;
5004 	device_t dev = sc->sc_dev;
5005 	int i;
5006 
5007 	if (sc->sc_tq) {
5008 		taskqueue_drain_all(sc->sc_tq);
5009 		taskqueue_free(sc->sc_tq);
5010 	}
5011 	callout_drain(&sc->sc_led_blink_to);
5012 	callout_drain(&sc->sc_watchdog_to);
5013 	iwm_stop_device(sc);
5014 	if (do_net80211)
5015 		ieee80211_ifdetach(&sc->sc_ic);
5016 
5017 	/* Free descriptor rings */
5018 	iwm_free_rx_ring(sc, &sc->rxq);
5019 	for (i = 0; i < nitems(sc->txq); i++)
5020 		iwm_free_tx_ring(sc, &sc->txq[i]);
5021 
5022 	/* Free firmware */
5023 	if (fw->fw_fp != NULL)
5024 		iwm_fw_info_free(fw);
5025 
5026 	/* Free scheduler */
5027 	iwm_free_sched(sc);
5028 	if (sc->ict_dma.vaddr != NULL)
5029 		iwm_free_ict(sc);
5030 	if (sc->kw_dma.vaddr != NULL)
5031 		iwm_free_kw(sc);
5032 	if (sc->fw_dma.vaddr != NULL)
5033 		iwm_free_fwmem(sc);
5034 
5035 	/* Finished with the hardware - detach things */
5036 	iwm_pci_detach(dev);
5037 
5038 	mbufq_drain(&sc->sc_snd);
5039 	IWM_LOCK_DESTROY(sc);
5040 
5041 	return (0);
5042 }
5043 
5044 static int
5045 iwm_detach(device_t dev)
5046 {
5047 	struct iwm_softc *sc = device_get_softc(dev);
5048 
5049 	return (iwm_detach_local(sc, 1));
5050 }
5051 
5052 static device_method_t iwm_pci_methods[] = {
5053         /* Device interface */
5054         DEVMETHOD(device_probe,         iwm_probe),
5055         DEVMETHOD(device_attach,        iwm_attach),
5056         DEVMETHOD(device_detach,        iwm_detach),
5057         DEVMETHOD(device_suspend,       iwm_suspend),
5058         DEVMETHOD(device_resume,        iwm_resume),
5059 
5060         DEVMETHOD_END
5061 };
5062 
5063 static driver_t iwm_pci_driver = {
5064         "iwm",
5065         iwm_pci_methods,
5066         sizeof (struct iwm_softc)
5067 };
5068 
5069 static devclass_t iwm_devclass;
5070 
5071 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5072 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5073 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5074 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
5075