xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 282e23f07bf49b4e37aabdcc1c513a788db36d10)
1 /*	$OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126 
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130 
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133 
134 #include <net/bpf.h>
135 
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142 
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147 
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152 
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164 
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167 
168 const uint8_t iwm_nvm_channels[] = {
169 	/* 2.4 GHz */
170 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171 	/* 5 GHz */
172 	36, 40, 44, 48, 52, 56, 60, 64,
173 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174 	149, 153, 157, 161, 165
175 };
176 #define IWM_NUM_2GHZ_CHANNELS	14
177 
178 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
179     "IWM_NUM_CHANNELS is too small");
180 
181 /*
182  * XXX For now, there's simply a fixed set of rate table entries
183  * that are populated.
184  */
185 const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 } iwm_rates[] = {
189 	{   2,	IWM_RATE_1M_PLCP  },
190 	{   4,	IWM_RATE_2M_PLCP  },
191 	{  11,	IWM_RATE_5M_PLCP  },
192 	{  22,	IWM_RATE_11M_PLCP },
193 	{  12,	IWM_RATE_6M_PLCP  },
194 	{  18,	IWM_RATE_9M_PLCP  },
195 	{  24,	IWM_RATE_12M_PLCP },
196 	{  36,	IWM_RATE_18M_PLCP },
197 	{  48,	IWM_RATE_24M_PLCP },
198 	{  72,	IWM_RATE_36M_PLCP },
199 	{  96,	IWM_RATE_48M_PLCP },
200 	{ 108,	IWM_RATE_54M_PLCP },
201 };
202 #define IWM_RIDX_CCK	0
203 #define IWM_RIDX_OFDM	4
204 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
205 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
206 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
207 
208 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
209 static int	iwm_firmware_store_section(struct iwm_softc *,
210                                            enum iwm_ucode_type,
211                                            const uint8_t *, size_t);
212 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
213 static void	iwm_fw_info_free(struct iwm_fw_info *);
214 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
215 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
216 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
217                                      bus_size_t, bus_size_t);
218 static void	iwm_dma_contig_free(struct iwm_dma_info *);
219 static int	iwm_alloc_fwmem(struct iwm_softc *);
220 static void	iwm_free_fwmem(struct iwm_softc *);
221 static int	iwm_alloc_sched(struct iwm_softc *);
222 static void	iwm_free_sched(struct iwm_softc *);
223 static int	iwm_alloc_kw(struct iwm_softc *);
224 static void	iwm_free_kw(struct iwm_softc *);
225 static int	iwm_alloc_ict(struct iwm_softc *);
226 static void	iwm_free_ict(struct iwm_softc *);
227 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
228 static void	iwm_disable_rx_dma(struct iwm_softc *);
229 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
230 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
231 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
232                                   int);
233 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
234 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
235 static void	iwm_enable_interrupts(struct iwm_softc *);
236 static void	iwm_restore_interrupts(struct iwm_softc *);
237 static void	iwm_disable_interrupts(struct iwm_softc *);
238 static void	iwm_ict_reset(struct iwm_softc *);
239 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
240 static void	iwm_stop_device(struct iwm_softc *);
241 static void	iwm_mvm_nic_config(struct iwm_softc *);
242 static int	iwm_nic_rx_init(struct iwm_softc *);
243 static int	iwm_nic_tx_init(struct iwm_softc *);
244 static int	iwm_nic_init(struct iwm_softc *);
245 static void	iwm_enable_txq(struct iwm_softc *, int, int);
246 static int	iwm_post_alive(struct iwm_softc *);
247 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
248                                    uint16_t, uint8_t *, uint16_t *);
249 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
250 				     uint16_t *);
251 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
252 static void	iwm_add_channel_band(struct iwm_softc *,
253 		    struct ieee80211_channel[], int, int *, int, int,
254 		    const uint8_t[]);
255 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
256 		    struct ieee80211_channel[]);
257 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
258 			           const uint16_t *, const uint16_t *, uint8_t,
259 				   uint8_t);
260 struct iwm_nvm_section;
261 static int	iwm_parse_nvm_sections(struct iwm_softc *,
262                                        struct iwm_nvm_section *);
263 static int	iwm_nvm_init(struct iwm_softc *);
264 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
265                                         const uint8_t *, uint32_t);
266 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
267 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
268 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
269 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
270 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
271                                               enum iwm_ucode_type);
272 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
273 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
274 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
275 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
276 					    struct iwm_rx_phy_info *);
277 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
278                                       struct iwm_rx_packet *,
279                                       struct iwm_rx_data *);
280 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
281 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
282                                    struct iwm_rx_data *);
283 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
284                                          struct iwm_rx_packet *,
285 				         struct iwm_node *);
286 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
287                                   struct iwm_rx_data *);
288 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
289 #if 0
290 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
291                                  uint16_t);
292 #endif
293 static const struct iwm_rate *
294 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
295 			struct ieee80211_frame *, struct iwm_tx_cmd *);
296 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
297                        struct ieee80211_node *, int);
298 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
299 			     const struct ieee80211_bpf_params *);
300 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
301 					     struct iwm_mvm_add_sta_cmd_v5 *);
302 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
303 					        struct iwm_mvm_add_sta_cmd_v6 *,
304                                                 int *);
305 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
306                                        int);
307 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
308 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
309 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
310                                            struct iwm_int_sta *,
311 				           const uint8_t *, uint16_t, uint16_t);
312 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
313 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
314 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
315 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
316 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
317 static struct ieee80211_node *
318 		iwm_node_alloc(struct ieee80211vap *,
319 		               const uint8_t[IEEE80211_ADDR_LEN]);
320 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
321 static int	iwm_media_change(struct ifnet *);
322 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
323 static void	iwm_endscan_cb(void *, int);
324 static int	iwm_init_hw(struct iwm_softc *);
325 static void	iwm_init(struct iwm_softc *);
326 static void	iwm_start(struct iwm_softc *);
327 static void	iwm_stop(struct iwm_softc *);
328 static void	iwm_watchdog(void *);
329 static void	iwm_parent(struct ieee80211com *);
330 #ifdef IWM_DEBUG
331 static const char *
332 		iwm_desc_lookup(uint32_t);
333 static void	iwm_nic_error(struct iwm_softc *);
334 #endif
335 static void	iwm_notif_intr(struct iwm_softc *);
336 static void	iwm_intr(void *);
337 static int	iwm_attach(device_t);
338 static void	iwm_preinit(void *);
339 static int	iwm_detach_local(struct iwm_softc *sc, int);
340 static void	iwm_init_task(void *);
341 static void	iwm_radiotap_attach(struct iwm_softc *);
342 static struct ieee80211vap *
343 		iwm_vap_create(struct ieee80211com *,
344 		               const char [IFNAMSIZ], int,
345 		               enum ieee80211_opmode, int,
346 		               const uint8_t [IEEE80211_ADDR_LEN],
347 		               const uint8_t [IEEE80211_ADDR_LEN]);
348 static void	iwm_vap_delete(struct ieee80211vap *);
349 static void	iwm_scan_start(struct ieee80211com *);
350 static void	iwm_scan_end(struct ieee80211com *);
351 static void	iwm_update_mcast(struct ieee80211com *);
352 static void	iwm_set_channel(struct ieee80211com *);
353 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
354 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
355 static int	iwm_detach(device_t);
356 
357 /*
358  * Firmware parser.
359  */
360 
361 static int
362 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
363 {
364 	const struct iwm_fw_cscheme_list *l = (const void *)data;
365 
366 	if (dlen < sizeof(*l) ||
367 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
368 		return EINVAL;
369 
370 	/* we don't actually store anything for now, always use s/w crypto */
371 
372 	return 0;
373 }
374 
375 static int
376 iwm_firmware_store_section(struct iwm_softc *sc,
377     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
378 {
379 	struct iwm_fw_sects *fws;
380 	struct iwm_fw_onesect *fwone;
381 
382 	if (type >= IWM_UCODE_TYPE_MAX)
383 		return EINVAL;
384 	if (dlen < sizeof(uint32_t))
385 		return EINVAL;
386 
387 	fws = &sc->sc_fw.fw_sects[type];
388 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
389 		return EINVAL;
390 
391 	fwone = &fws->fw_sect[fws->fw_count];
392 
393 	/* first 32bit are device load offset */
394 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
395 
396 	/* rest is data */
397 	fwone->fws_data = data + sizeof(uint32_t);
398 	fwone->fws_len = dlen - sizeof(uint32_t);
399 
400 	fws->fw_count++;
401 	fws->fw_totlen += fwone->fws_len;
402 
403 	return 0;
404 }
405 
406 /* iwlwifi: iwl-drv.c */
407 struct iwm_tlv_calib_data {
408 	uint32_t ucode_type;
409 	struct iwm_tlv_calib_ctrl calib;
410 } __packed;
411 
412 static int
413 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
414 {
415 	const struct iwm_tlv_calib_data *def_calib = data;
416 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
417 
418 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
419 		device_printf(sc->sc_dev,
420 		    "Wrong ucode_type %u for default "
421 		    "calibration.\n", ucode_type);
422 		return EINVAL;
423 	}
424 
425 	sc->sc_default_calib[ucode_type].flow_trigger =
426 	    def_calib->calib.flow_trigger;
427 	sc->sc_default_calib[ucode_type].event_trigger =
428 	    def_calib->calib.event_trigger;
429 
430 	return 0;
431 }
432 
433 static void
434 iwm_fw_info_free(struct iwm_fw_info *fw)
435 {
436 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
437 	fw->fw_fp = NULL;
438 	/* don't touch fw->fw_status */
439 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
440 }
441 
442 static int
443 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
444 {
445 	struct iwm_fw_info *fw = &sc->sc_fw;
446 	const struct iwm_tlv_ucode_header *uhdr;
447 	struct iwm_ucode_tlv tlv;
448 	enum iwm_ucode_tlv_type tlv_type;
449 	const struct firmware *fwp;
450 	const uint8_t *data;
451 	int error = 0;
452 	size_t len;
453 
454 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
455 	    ucode_type != IWM_UCODE_TYPE_INIT)
456 		return 0;
457 
458 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
459 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
460 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
461 
462 	if (fw->fw_fp != NULL)
463 		iwm_fw_info_free(fw);
464 
465 	/*
466 	 * Load firmware into driver memory.
467 	 * fw_fp will be set.
468 	 */
469 	IWM_UNLOCK(sc);
470 	fwp = firmware_get(sc->sc_fwname);
471 	IWM_LOCK(sc);
472 	if (fwp == NULL) {
473 		device_printf(sc->sc_dev,
474 		    "could not read firmware %s (error %d)\n",
475 		    sc->sc_fwname, error);
476 		goto out;
477 	}
478 	fw->fw_fp = fwp;
479 
480 	/*
481 	 * Parse firmware contents
482 	 */
483 
484 	uhdr = (const void *)fw->fw_fp->data;
485 	if (*(const uint32_t *)fw->fw_fp->data != 0
486 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
487 		device_printf(sc->sc_dev, "invalid firmware %s\n",
488 		    sc->sc_fwname);
489 		error = EINVAL;
490 		goto out;
491 	}
492 
493 	sc->sc_fwver = le32toh(uhdr->ver);
494 	data = uhdr->data;
495 	len = fw->fw_fp->datasize - sizeof(*uhdr);
496 
497 	while (len >= sizeof(tlv)) {
498 		size_t tlv_len;
499 		const void *tlv_data;
500 
501 		memcpy(&tlv, data, sizeof(tlv));
502 		tlv_len = le32toh(tlv.length);
503 		tlv_type = le32toh(tlv.type);
504 
505 		len -= sizeof(tlv);
506 		data += sizeof(tlv);
507 		tlv_data = data;
508 
509 		if (len < tlv_len) {
510 			device_printf(sc->sc_dev,
511 			    "firmware too short: %zu bytes\n",
512 			    len);
513 			error = EINVAL;
514 			goto parse_out;
515 		}
516 
517 		switch ((int)tlv_type) {
518 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
519 			if (tlv_len < sizeof(uint32_t)) {
520 				device_printf(sc->sc_dev,
521 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
522 				    __func__,
523 				    (int) tlv_len);
524 				error = EINVAL;
525 				goto parse_out;
526 			}
527 			sc->sc_capa_max_probe_len
528 			    = le32toh(*(const uint32_t *)tlv_data);
529 			/* limit it to something sensible */
530 			if (sc->sc_capa_max_probe_len > (1<<16)) {
531 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
532 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
533 				    "ridiculous\n", __func__);
534 				error = EINVAL;
535 				goto parse_out;
536 			}
537 			break;
538 		case IWM_UCODE_TLV_PAN:
539 			if (tlv_len) {
540 				device_printf(sc->sc_dev,
541 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
542 				    __func__,
543 				    (int) tlv_len);
544 				error = EINVAL;
545 				goto parse_out;
546 			}
547 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
548 			break;
549 		case IWM_UCODE_TLV_FLAGS:
550 			if (tlv_len < sizeof(uint32_t)) {
551 				device_printf(sc->sc_dev,
552 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
553 				    __func__,
554 				    (int) tlv_len);
555 				error = EINVAL;
556 				goto parse_out;
557 			}
558 			/*
559 			 * Apparently there can be many flags, but Linux driver
560 			 * parses only the first one, and so do we.
561 			 *
562 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
563 			 * Intentional or a bug?  Observations from
564 			 * current firmware file:
565 			 *  1) TLV_PAN is parsed first
566 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
567 			 * ==> this resets TLV_PAN to itself... hnnnk
568 			 */
569 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
570 			break;
571 		case IWM_UCODE_TLV_CSCHEME:
572 			if ((error = iwm_store_cscheme(sc,
573 			    tlv_data, tlv_len)) != 0) {
574 				device_printf(sc->sc_dev,
575 				    "%s: iwm_store_cscheme(): returned %d\n",
576 				    __func__,
577 				    error);
578 				goto parse_out;
579 			}
580 			break;
581 		case IWM_UCODE_TLV_NUM_OF_CPU:
582 			if (tlv_len != sizeof(uint32_t)) {
583 				device_printf(sc->sc_dev,
584 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
585 				    __func__,
586 				    (int) tlv_len);
587 				error = EINVAL;
588 				goto parse_out;
589 			}
590 			if (le32toh(*(const uint32_t*)tlv_data) != 1) {
591 				device_printf(sc->sc_dev,
592 				    "%s: driver supports "
593 				    "only TLV_NUM_OF_CPU == 1",
594 				    __func__);
595 				error = EINVAL;
596 				goto parse_out;
597 			}
598 			break;
599 		case IWM_UCODE_TLV_SEC_RT:
600 			if ((error = iwm_firmware_store_section(sc,
601 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
602 				device_printf(sc->sc_dev,
603 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
604 				    __func__,
605 				    error);
606 				goto parse_out;
607 			}
608 			break;
609 		case IWM_UCODE_TLV_SEC_INIT:
610 			if ((error = iwm_firmware_store_section(sc,
611 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
612 				device_printf(sc->sc_dev,
613 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
614 				    __func__,
615 				    error);
616 				goto parse_out;
617 			}
618 			break;
619 		case IWM_UCODE_TLV_SEC_WOWLAN:
620 			if ((error = iwm_firmware_store_section(sc,
621 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
622 				device_printf(sc->sc_dev,
623 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
624 				    __func__,
625 				    error);
626 				goto parse_out;
627 			}
628 			break;
629 		case IWM_UCODE_TLV_DEF_CALIB:
630 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
631 				device_printf(sc->sc_dev,
632 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
633 				    __func__,
634 				    (int) tlv_len,
635 				    (int) sizeof(struct iwm_tlv_calib_data));
636 				error = EINVAL;
637 				goto parse_out;
638 			}
639 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
640 				device_printf(sc->sc_dev,
641 				    "%s: iwm_set_default_calib() failed: %d\n",
642 				    __func__,
643 				    error);
644 				goto parse_out;
645 			}
646 			break;
647 		case IWM_UCODE_TLV_PHY_SKU:
648 			if (tlv_len != sizeof(uint32_t)) {
649 				error = EINVAL;
650 				device_printf(sc->sc_dev,
651 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
652 				    __func__,
653 				    (int) tlv_len);
654 				goto parse_out;
655 			}
656 			sc->sc_fw_phy_config =
657 			    le32toh(*(const uint32_t *)tlv_data);
658 			break;
659 
660 		case IWM_UCODE_TLV_API_CHANGES_SET:
661 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
662 			/* ignore, not used by current driver */
663 			break;
664 
665 		default:
666 			device_printf(sc->sc_dev,
667 			    "%s: unknown firmware section %d, abort\n",
668 			    __func__, tlv_type);
669 			error = EINVAL;
670 			goto parse_out;
671 		}
672 
673 		len -= roundup(tlv_len, 4);
674 		data += roundup(tlv_len, 4);
675 	}
676 
677 	KASSERT(error == 0, ("unhandled error"));
678 
679  parse_out:
680 	if (error) {
681 		device_printf(sc->sc_dev, "firmware parse error %d, "
682 		    "section type %d\n", error, tlv_type);
683 	}
684 
685 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
686 		device_printf(sc->sc_dev,
687 		    "device uses unsupported power ops\n");
688 		error = ENOTSUP;
689 	}
690 
691  out:
692 	if (error) {
693 		fw->fw_status = IWM_FW_STATUS_NONE;
694 		if (fw->fw_fp != NULL)
695 			iwm_fw_info_free(fw);
696 	} else
697 		fw->fw_status = IWM_FW_STATUS_DONE;
698 	wakeup(&sc->sc_fw);
699 
700 	return error;
701 }
702 
703 /*
704  * DMA resource routines
705  */
706 
707 static void
708 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
709 {
710         if (error != 0)
711                 return;
712 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
713         *(bus_addr_t *)arg = segs[0].ds_addr;
714 }
715 
716 static int
717 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
718     bus_size_t size, bus_size_t alignment)
719 {
720 	int error;
721 
722 	dma->tag = NULL;
723 	dma->size = size;
724 	dma->vaddr = NULL;
725 
726 	error = bus_dma_tag_create(tag, alignment,
727             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
728             1, size, 0, NULL, NULL, &dma->tag);
729         if (error != 0)
730                 goto fail;
731 
732         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
733             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
734         if (error != 0)
735                 goto fail;
736 
737         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
738             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
739         if (error != 0) {
740 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
741 		dma->vaddr = NULL;
742                 goto fail;
743 	}
744 
745 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
746 
747 	return 0;
748 
749 fail:	iwm_dma_contig_free(dma);
750 	return error;
751 }
752 
753 static void
754 iwm_dma_contig_free(struct iwm_dma_info *dma)
755 {
756 	if (dma->vaddr != NULL) {
757 		bus_dmamap_sync(dma->tag, dma->map,
758 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
759 		bus_dmamap_unload(dma->tag, dma->map);
760 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
761 		dma->vaddr = NULL;
762 	}
763 	if (dma->tag != NULL) {
764 		bus_dma_tag_destroy(dma->tag);
765 		dma->tag = NULL;
766 	}
767 
768 }
769 
770 /* fwmem is used to load firmware onto the card */
771 static int
772 iwm_alloc_fwmem(struct iwm_softc *sc)
773 {
774 	/* Must be aligned on a 16-byte boundary. */
775 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
776 	    sc->sc_fwdmasegsz, 16);
777 }
778 
779 static void
780 iwm_free_fwmem(struct iwm_softc *sc)
781 {
782 	iwm_dma_contig_free(&sc->fw_dma);
783 }
784 
785 /* tx scheduler rings.  not used? */
786 static int
787 iwm_alloc_sched(struct iwm_softc *sc)
788 {
789 	int rv;
790 
791 	/* TX scheduler rings must be aligned on a 1KB boundary. */
792 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
793 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
794 	return rv;
795 }
796 
797 static void
798 iwm_free_sched(struct iwm_softc *sc)
799 {
800 	iwm_dma_contig_free(&sc->sched_dma);
801 }
802 
803 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
804 static int
805 iwm_alloc_kw(struct iwm_softc *sc)
806 {
807 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
808 }
809 
810 static void
811 iwm_free_kw(struct iwm_softc *sc)
812 {
813 	iwm_dma_contig_free(&sc->kw_dma);
814 }
815 
816 /* interrupt cause table */
817 static int
818 iwm_alloc_ict(struct iwm_softc *sc)
819 {
820 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
821 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
822 }
823 
824 static void
825 iwm_free_ict(struct iwm_softc *sc)
826 {
827 	iwm_dma_contig_free(&sc->ict_dma);
828 }
829 
830 static int
831 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
832 {
833 	bus_size_t size;
834 	int i, error;
835 
836 	ring->cur = 0;
837 
838 	/* Allocate RX descriptors (256-byte aligned). */
839 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
840 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
841 	if (error != 0) {
842 		device_printf(sc->sc_dev,
843 		    "could not allocate RX ring DMA memory\n");
844 		goto fail;
845 	}
846 	ring->desc = ring->desc_dma.vaddr;
847 
848 	/* Allocate RX status area (16-byte aligned). */
849 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
850 	    sizeof(*ring->stat), 16);
851 	if (error != 0) {
852 		device_printf(sc->sc_dev,
853 		    "could not allocate RX status DMA memory\n");
854 		goto fail;
855 	}
856 	ring->stat = ring->stat_dma.vaddr;
857 
858         /* Create RX buffer DMA tag. */
859         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
860             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
861             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
862         if (error != 0) {
863                 device_printf(sc->sc_dev,
864                     "%s: could not create RX buf DMA tag, error %d\n",
865                     __func__, error);
866                 goto fail;
867         }
868 
869 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
870 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
871 	if (error != 0) {
872 		device_printf(sc->sc_dev,
873 		    "%s: could not create RX buf DMA map, error %d\n",
874 		    __func__, error);
875 		goto fail;
876 	}
877 	/*
878 	 * Allocate and map RX buffers.
879 	 */
880 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
881 		struct iwm_rx_data *data = &ring->data[i];
882 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
883 		if (error != 0) {
884 			device_printf(sc->sc_dev,
885 			    "%s: could not create RX buf DMA map, error %d\n",
886 			    __func__, error);
887 			goto fail;
888 		}
889 		data->m = NULL;
890 
891 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
892 			goto fail;
893 		}
894 	}
895 	return 0;
896 
897 fail:	iwm_free_rx_ring(sc, ring);
898 	return error;
899 }
900 
901 static void
902 iwm_disable_rx_dma(struct iwm_softc *sc)
903 {
904 
905 	/* XXX print out if we can't lock the NIC? */
906 	if (iwm_nic_lock(sc)) {
907 		/* XXX handle if RX stop doesn't finish? */
908 		(void) iwm_pcie_rx_stop(sc);
909 		iwm_nic_unlock(sc);
910 	}
911 }
912 
913 static void
914 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
915 {
916 	/* Reset the ring state */
917 	ring->cur = 0;
918 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
919 }
920 
921 static void
922 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
923 {
924 	int i;
925 
926 	iwm_dma_contig_free(&ring->desc_dma);
927 	iwm_dma_contig_free(&ring->stat_dma);
928 
929 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
930 		struct iwm_rx_data *data = &ring->data[i];
931 
932 		if (data->m != NULL) {
933 			bus_dmamap_sync(ring->data_dmat, data->map,
934 			    BUS_DMASYNC_POSTREAD);
935 			bus_dmamap_unload(ring->data_dmat, data->map);
936 			m_freem(data->m);
937 			data->m = NULL;
938 		}
939 		if (data->map != NULL) {
940 			bus_dmamap_destroy(ring->data_dmat, data->map);
941 			data->map = NULL;
942 		}
943 	}
944 	if (ring->spare_map != NULL) {
945 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
946 		ring->spare_map = NULL;
947 	}
948 	if (ring->data_dmat != NULL) {
949 		bus_dma_tag_destroy(ring->data_dmat);
950 		ring->data_dmat = NULL;
951 	}
952 }
953 
954 static int
955 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
956 {
957 	bus_addr_t paddr;
958 	bus_size_t size;
959 	size_t maxsize;
960 	int nsegments;
961 	int i, error;
962 
963 	ring->qid = qid;
964 	ring->queued = 0;
965 	ring->cur = 0;
966 
967 	/* Allocate TX descriptors (256-byte aligned). */
968 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
969 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
970 	if (error != 0) {
971 		device_printf(sc->sc_dev,
972 		    "could not allocate TX ring DMA memory\n");
973 		goto fail;
974 	}
975 	ring->desc = ring->desc_dma.vaddr;
976 
977 	/*
978 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
979 	 * to allocate commands space for other rings.
980 	 */
981 	if (qid > IWM_MVM_CMD_QUEUE)
982 		return 0;
983 
984 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
985 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
986 	if (error != 0) {
987 		device_printf(sc->sc_dev,
988 		    "could not allocate TX cmd DMA memory\n");
989 		goto fail;
990 	}
991 	ring->cmd = ring->cmd_dma.vaddr;
992 
993 	/* FW commands may require more mapped space than packets. */
994 	if (qid == IWM_MVM_CMD_QUEUE) {
995 		maxsize = IWM_RBUF_SIZE;
996 		nsegments = 1;
997 	} else {
998 		maxsize = MCLBYTES;
999 		nsegments = IWM_MAX_SCATTER - 2;
1000 	}
1001 
1002 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1003 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1004             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1005 	if (error != 0) {
1006 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1007 		goto fail;
1008 	}
1009 
1010 	paddr = ring->cmd_dma.paddr;
1011 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1012 		struct iwm_tx_data *data = &ring->data[i];
1013 
1014 		data->cmd_paddr = paddr;
1015 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1016 		    + offsetof(struct iwm_tx_cmd, scratch);
1017 		paddr += sizeof(struct iwm_device_cmd);
1018 
1019 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1020 		if (error != 0) {
1021 			device_printf(sc->sc_dev,
1022 			    "could not create TX buf DMA map\n");
1023 			goto fail;
1024 		}
1025 	}
1026 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1027 	    ("invalid physical address"));
1028 	return 0;
1029 
1030 fail:	iwm_free_tx_ring(sc, ring);
1031 	return error;
1032 }
1033 
1034 static void
1035 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1036 {
1037 	int i;
1038 
1039 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1040 		struct iwm_tx_data *data = &ring->data[i];
1041 
1042 		if (data->m != NULL) {
1043 			bus_dmamap_sync(ring->data_dmat, data->map,
1044 			    BUS_DMASYNC_POSTWRITE);
1045 			bus_dmamap_unload(ring->data_dmat, data->map);
1046 			m_freem(data->m);
1047 			data->m = NULL;
1048 		}
1049 	}
1050 	/* Clear TX descriptors. */
1051 	memset(ring->desc, 0, ring->desc_dma.size);
1052 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1053 	    BUS_DMASYNC_PREWRITE);
1054 	sc->qfullmsk &= ~(1 << ring->qid);
1055 	ring->queued = 0;
1056 	ring->cur = 0;
1057 }
1058 
1059 static void
1060 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1061 {
1062 	int i;
1063 
1064 	iwm_dma_contig_free(&ring->desc_dma);
1065 	iwm_dma_contig_free(&ring->cmd_dma);
1066 
1067 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1068 		struct iwm_tx_data *data = &ring->data[i];
1069 
1070 		if (data->m != NULL) {
1071 			bus_dmamap_sync(ring->data_dmat, data->map,
1072 			    BUS_DMASYNC_POSTWRITE);
1073 			bus_dmamap_unload(ring->data_dmat, data->map);
1074 			m_freem(data->m);
1075 			data->m = NULL;
1076 		}
1077 		if (data->map != NULL) {
1078 			bus_dmamap_destroy(ring->data_dmat, data->map);
1079 			data->map = NULL;
1080 		}
1081 	}
1082 	if (ring->data_dmat != NULL) {
1083 		bus_dma_tag_destroy(ring->data_dmat);
1084 		ring->data_dmat = NULL;
1085 	}
1086 }
1087 
1088 /*
1089  * High-level hardware frobbing routines
1090  */
1091 
1092 static void
1093 iwm_enable_interrupts(struct iwm_softc *sc)
1094 {
1095 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1096 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1097 }
1098 
1099 static void
1100 iwm_restore_interrupts(struct iwm_softc *sc)
1101 {
1102 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1103 }
1104 
1105 static void
1106 iwm_disable_interrupts(struct iwm_softc *sc)
1107 {
1108 	/* disable interrupts */
1109 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1110 
1111 	/* acknowledge all interrupts */
1112 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1113 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1114 }
1115 
1116 static void
1117 iwm_ict_reset(struct iwm_softc *sc)
1118 {
1119 	iwm_disable_interrupts(sc);
1120 
1121 	/* Reset ICT table. */
1122 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1123 	sc->ict_cur = 0;
1124 
1125 	/* Set physical address of ICT table (4KB aligned). */
1126 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1127 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1128 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1129 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1130 
1131 	/* Switch to ICT interrupt mode in driver. */
1132 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1133 
1134 	/* Re-enable interrupts. */
1135 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1136 	iwm_enable_interrupts(sc);
1137 }
1138 
1139 /* iwlwifi pcie/trans.c */
1140 
1141 /*
1142  * Since this .. hard-resets things, it's time to actually
1143  * mark the first vap (if any) as having no mac context.
1144  * It's annoying, but since the driver is potentially being
1145  * stop/start'ed whilst active (thanks openbsd port!) we
1146  * have to correctly track this.
1147  */
1148 static void
1149 iwm_stop_device(struct iwm_softc *sc)
1150 {
1151 	struct ieee80211com *ic = &sc->sc_ic;
1152 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1153 	int chnl, ntries;
1154 	int qid;
1155 
1156 	/* tell the device to stop sending interrupts */
1157 	iwm_disable_interrupts(sc);
1158 
1159 	/*
1160 	 * FreeBSD-local: mark the first vap as not-uploaded,
1161 	 * so the next transition through auth/assoc
1162 	 * will correctly populate the MAC context.
1163 	 */
1164 	if (vap) {
1165 		struct iwm_vap *iv = IWM_VAP(vap);
1166 		iv->is_uploaded = 0;
1167 	}
1168 
1169 	/* device going down, Stop using ICT table */
1170 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1171 
1172 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1173 
1174 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1175 
1176 	/* Stop all DMA channels. */
1177 	if (iwm_nic_lock(sc)) {
1178 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1179 			IWM_WRITE(sc,
1180 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1181 			for (ntries = 0; ntries < 200; ntries++) {
1182 				uint32_t r;
1183 
1184 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1185 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1186 				    chnl))
1187 					break;
1188 				DELAY(20);
1189 			}
1190 		}
1191 		iwm_nic_unlock(sc);
1192 	}
1193 	iwm_disable_rx_dma(sc);
1194 
1195 	/* Stop RX ring. */
1196 	iwm_reset_rx_ring(sc, &sc->rxq);
1197 
1198 	/* Reset all TX rings. */
1199 	for (qid = 0; qid < nitems(sc->txq); qid++)
1200 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1201 
1202 	/*
1203 	 * Power-down device's busmaster DMA clocks
1204 	 */
1205 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1206 	DELAY(5);
1207 
1208 	/* Make sure (redundant) we've released our request to stay awake */
1209 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1210 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1211 
1212 	/* Stop the device, and put it in low power state */
1213 	iwm_apm_stop(sc);
1214 
1215 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1216 	 * Clean again the interrupt here
1217 	 */
1218 	iwm_disable_interrupts(sc);
1219 	/* stop and reset the on-board processor */
1220 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1221 
1222 	/*
1223 	 * Even if we stop the HW, we still want the RF kill
1224 	 * interrupt
1225 	 */
1226 	iwm_enable_rfkill_int(sc);
1227 	iwm_check_rfkill(sc);
1228 }
1229 
1230 /* iwlwifi: mvm/ops.c */
1231 static void
1232 iwm_mvm_nic_config(struct iwm_softc *sc)
1233 {
1234 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1235 	uint32_t reg_val = 0;
1236 
1237 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1238 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1239 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1240 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1241 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1242 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1243 
1244 	/* SKU control */
1245 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1246 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1247 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1248 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1249 
1250 	/* radio configuration */
1251 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1252 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1253 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1254 
1255 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1256 
1257 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1258 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1259 	    radio_cfg_step, radio_cfg_dash);
1260 
1261 	/*
1262 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1263 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1264 	 * to lose ownership and not being able to obtain it back.
1265 	 */
1266 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1267 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1268 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1269 }
1270 
1271 static int
1272 iwm_nic_rx_init(struct iwm_softc *sc)
1273 {
1274 	if (!iwm_nic_lock(sc))
1275 		return EBUSY;
1276 
1277 	/*
1278 	 * Initialize RX ring.  This is from the iwn driver.
1279 	 */
1280 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1281 
1282 	/* stop DMA */
1283 	iwm_disable_rx_dma(sc);
1284 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1285 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1286 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1287 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1288 
1289 	/* Set physical address of RX ring (256-byte aligned). */
1290 	IWM_WRITE(sc,
1291 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1292 
1293 	/* Set physical address of RX status (16-byte aligned). */
1294 	IWM_WRITE(sc,
1295 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1296 
1297 	/* Enable RX. */
1298 	/*
1299 	 * Note: Linux driver also sets this:
1300 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1301 	 *
1302 	 * It causes weird behavior.  YMMV.
1303 	 */
1304 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1305 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1306 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1307 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1308 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1309 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1310 
1311 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1312 
1313 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1314 	if (sc->host_interrupt_operation_mode)
1315 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1316 
1317 	/*
1318 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1319 	 *
1320 	 * This value should initially be 0 (before preparing any
1321  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1322 	 */
1323 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1324 
1325 	iwm_nic_unlock(sc);
1326 
1327 	return 0;
1328 }
1329 
1330 static int
1331 iwm_nic_tx_init(struct iwm_softc *sc)
1332 {
1333 	int qid;
1334 
1335 	if (!iwm_nic_lock(sc))
1336 		return EBUSY;
1337 
1338 	/* Deactivate TX scheduler. */
1339 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1340 
1341 	/* Set physical address of "keep warm" page (16-byte aligned). */
1342 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1343 
1344 	/* Initialize TX rings. */
1345 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1346 		struct iwm_tx_ring *txq = &sc->txq[qid];
1347 
1348 		/* Set physical address of TX ring (256-byte aligned). */
1349 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1350 		    txq->desc_dma.paddr >> 8);
1351 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1352 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1353 		    __func__,
1354 		    qid, txq->desc,
1355 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1356 	}
1357 	iwm_nic_unlock(sc);
1358 
1359 	return 0;
1360 }
1361 
1362 static int
1363 iwm_nic_init(struct iwm_softc *sc)
1364 {
1365 	int error;
1366 
1367 	iwm_apm_init(sc);
1368 	iwm_set_pwr(sc);
1369 
1370 	iwm_mvm_nic_config(sc);
1371 
1372 	if ((error = iwm_nic_rx_init(sc)) != 0)
1373 		return error;
1374 
1375 	/*
1376 	 * Ditto for TX, from iwn
1377 	 */
1378 	if ((error = iwm_nic_tx_init(sc)) != 0)
1379 		return error;
1380 
1381 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1382 	    "%s: shadow registers enabled\n", __func__);
1383 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1384 
1385 	return 0;
1386 }
1387 
1388 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1389 	IWM_MVM_TX_FIFO_VO,
1390 	IWM_MVM_TX_FIFO_VI,
1391 	IWM_MVM_TX_FIFO_BE,
1392 	IWM_MVM_TX_FIFO_BK,
1393 };
1394 
1395 static void
1396 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1397 {
1398 	if (!iwm_nic_lock(sc)) {
1399 		device_printf(sc->sc_dev,
1400 		    "%s: cannot enable txq %d\n",
1401 		    __func__,
1402 		    qid);
1403 		return; /* XXX return EBUSY */
1404 	}
1405 
1406 	/* unactivate before configuration */
1407 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1408 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1409 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1410 
1411 	if (qid != IWM_MVM_CMD_QUEUE) {
1412 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1413 	}
1414 
1415 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1416 
1417 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1418 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1419 
1420 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1421 	/* Set scheduler window size and frame limit. */
1422 	iwm_write_mem32(sc,
1423 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1424 	    sizeof(uint32_t),
1425 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1426 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1427 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1428 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1429 
1430 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1431 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1432 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1433 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1434 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1435 
1436 	iwm_nic_unlock(sc);
1437 
1438 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1439 	    "%s: enabled txq %d FIFO %d\n",
1440 	    __func__, qid, fifo);
1441 }
1442 
1443 static int
1444 iwm_post_alive(struct iwm_softc *sc)
1445 {
1446 	int nwords;
1447 	int error, chnl;
1448 
1449 	if (!iwm_nic_lock(sc))
1450 		return EBUSY;
1451 
1452 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1453 		device_printf(sc->sc_dev,
1454 		    "%s: sched addr mismatch",
1455 		    __func__);
1456 		error = EINVAL;
1457 		goto out;
1458 	}
1459 
1460 	iwm_ict_reset(sc);
1461 
1462 	/* Clear TX scheduler state in SRAM. */
1463 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1464 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1465 	    / sizeof(uint32_t);
1466 	error = iwm_write_mem(sc,
1467 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1468 	    NULL, nwords);
1469 	if (error)
1470 		goto out;
1471 
1472 	/* Set physical address of TX scheduler rings (1KB aligned). */
1473 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1474 
1475 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1476 
1477 	/* enable command channel */
1478 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1479 
1480 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1481 
1482 	/* Enable DMA channels. */
1483 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1484 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1485 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1486 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1487 	}
1488 
1489 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1490 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1491 
1492 	/* Enable L1-Active */
1493 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1494 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1495 
1496  out:
1497  	iwm_nic_unlock(sc);
1498 	return error;
1499 }
1500 
1501 /*
1502  * NVM read access and content parsing.  We do not support
1503  * external NVM or writing NVM.
1504  * iwlwifi/mvm/nvm.c
1505  */
1506 
1507 /* list of NVM sections we are allowed/need to read */
1508 const int nvm_to_read[] = {
1509 	IWM_NVM_SECTION_TYPE_HW,
1510 	IWM_NVM_SECTION_TYPE_SW,
1511 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1512 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1513 };
1514 
1515 /* Default NVM size to read */
1516 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1517 #define IWM_MAX_NVM_SECTION_SIZE 7000
1518 
1519 #define IWM_NVM_WRITE_OPCODE 1
1520 #define IWM_NVM_READ_OPCODE 0
1521 
1522 static int
1523 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1524 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1525 {
1526 	offset = 0;
1527 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1528 		.offset = htole16(offset),
1529 		.length = htole16(length),
1530 		.type = htole16(section),
1531 		.op_code = IWM_NVM_READ_OPCODE,
1532 	};
1533 	struct iwm_nvm_access_resp *nvm_resp;
1534 	struct iwm_rx_packet *pkt;
1535 	struct iwm_host_cmd cmd = {
1536 		.id = IWM_NVM_ACCESS_CMD,
1537 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1538 		    IWM_CMD_SEND_IN_RFKILL,
1539 		.data = { &nvm_access_cmd, },
1540 	};
1541 	int ret, bytes_read, offset_read;
1542 	uint8_t *resp_data;
1543 
1544 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1545 
1546 	ret = iwm_send_cmd(sc, &cmd);
1547 	if (ret)
1548 		return ret;
1549 
1550 	pkt = cmd.resp_pkt;
1551 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1552 		device_printf(sc->sc_dev,
1553 		    "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1554 		    __func__, pkt->hdr.flags);
1555 		ret = EIO;
1556 		goto exit;
1557 	}
1558 
1559 	/* Extract NVM response */
1560 	nvm_resp = (void *)pkt->data;
1561 
1562 	ret = le16toh(nvm_resp->status);
1563 	bytes_read = le16toh(nvm_resp->length);
1564 	offset_read = le16toh(nvm_resp->offset);
1565 	resp_data = nvm_resp->data;
1566 	if (ret) {
1567 		device_printf(sc->sc_dev,
1568 		    "%s: NVM access command failed with status %d\n",
1569 		    __func__, ret);
1570 		ret = EINVAL;
1571 		goto exit;
1572 	}
1573 
1574 	if (offset_read != offset) {
1575 		device_printf(sc->sc_dev,
1576 		    "%s: NVM ACCESS response with invalid offset %d\n",
1577 		    __func__, offset_read);
1578 		ret = EINVAL;
1579 		goto exit;
1580 	}
1581 
1582 	memcpy(data + offset, resp_data, bytes_read);
1583 	*len = bytes_read;
1584 
1585  exit:
1586 	iwm_free_resp(sc, &cmd);
1587 	return ret;
1588 }
1589 
1590 /*
1591  * Reads an NVM section completely.
1592  * NICs prior to 7000 family doesn't have a real NVM, but just read
1593  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1594  * by uCode, we need to manually check in this case that we don't
1595  * overflow and try to read more than the EEPROM size.
1596  * For 7000 family NICs, we supply the maximal size we can read, and
1597  * the uCode fills the response with as much data as we can,
1598  * without overflowing, so no check is needed.
1599  */
1600 static int
1601 iwm_nvm_read_section(struct iwm_softc *sc,
1602 	uint16_t section, uint8_t *data, uint16_t *len)
1603 {
1604 	uint16_t length, seglen;
1605 	int error;
1606 
1607 	/* Set nvm section read length */
1608 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1609 	*len = 0;
1610 
1611 	/* Read the NVM until exhausted (reading less than requested) */
1612 	while (seglen == length) {
1613 		error = iwm_nvm_read_chunk(sc,
1614 		    section, *len, length, data, &seglen);
1615 		if (error) {
1616 			device_printf(sc->sc_dev,
1617 			    "Cannot read NVM from section "
1618 			    "%d offset %d, length %d\n",
1619 			    section, *len, length);
1620 			return error;
1621 		}
1622 		*len += seglen;
1623 	}
1624 
1625 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1626 	    "NVM section %d read completed\n", section);
1627 	return 0;
1628 }
1629 
1630 /*
1631  * BEGIN IWM_NVM_PARSE
1632  */
1633 
1634 /* iwlwifi/iwl-nvm-parse.c */
1635 
1636 /* NVM offsets (in words) definitions */
1637 enum wkp_nvm_offsets {
1638 	/* NVM HW-Section offset (in words) definitions */
1639 	IWM_HW_ADDR = 0x15,
1640 
1641 /* NVM SW-Section offset (in words) definitions */
1642 	IWM_NVM_SW_SECTION = 0x1C0,
1643 	IWM_NVM_VERSION = 0,
1644 	IWM_RADIO_CFG = 1,
1645 	IWM_SKU = 2,
1646 	IWM_N_HW_ADDRS = 3,
1647 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1648 
1649 /* NVM calibration section offset (in words) definitions */
1650 	IWM_NVM_CALIB_SECTION = 0x2B8,
1651 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1652 };
1653 
1654 /* SKU Capabilities (actual values from NVM definition) */
1655 enum nvm_sku_bits {
1656 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1657 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1658 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1659 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1660 };
1661 
1662 /* radio config bits (actual values from NVM definition) */
1663 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1664 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1665 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1666 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1667 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1668 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1669 
1670 #define DEFAULT_MAX_TX_POWER 16
1671 
1672 /**
1673  * enum iwm_nvm_channel_flags - channel flags in NVM
1674  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1675  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1676  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1677  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1678  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1679  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1680  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1681  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1682  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1683  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1684  */
1685 enum iwm_nvm_channel_flags {
1686 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1687 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1688 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1689 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1690 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1691 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1692 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1693 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1694 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1695 };
1696 
1697 /*
1698  * Translate EEPROM flags to net80211.
1699  */
1700 static uint32_t
1701 iwm_eeprom_channel_flags(uint16_t ch_flags)
1702 {
1703 	uint32_t nflags;
1704 
1705 	nflags = 0;
1706 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1707 		nflags |= IEEE80211_CHAN_PASSIVE;
1708 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1709 		nflags |= IEEE80211_CHAN_NOADHOC;
1710 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1711 		nflags |= IEEE80211_CHAN_DFS;
1712 		/* Just in case. */
1713 		nflags |= IEEE80211_CHAN_NOADHOC;
1714 	}
1715 
1716 	return (nflags);
1717 }
1718 
1719 static void
1720 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1721     int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1722 {
1723 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1724 	uint32_t nflags;
1725 	uint16_t ch_flags;
1726 	uint8_t ieee;
1727 	int error;
1728 
1729 	for (; ch_idx < ch_num; ch_idx++) {
1730 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1731 		ieee = iwm_nvm_channels[ch_idx];
1732 
1733 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1734 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1735 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1736 			    ieee, ch_flags,
1737 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1738 			    "5.2" : "2.4");
1739 			continue;
1740 		}
1741 
1742 		nflags = iwm_eeprom_channel_flags(ch_flags);
1743 		error = ieee80211_add_channel(chans, maxchans, nchans,
1744 		    ieee, 0, 0, nflags, bands);
1745 		if (error != 0)
1746 			break;
1747 
1748 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1749 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1750 		    ieee, ch_flags,
1751 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1752 		    "5.2" : "2.4");
1753 	}
1754 }
1755 
1756 static void
1757 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1758     struct ieee80211_channel chans[])
1759 {
1760 	struct iwm_softc *sc = ic->ic_softc;
1761 	struct iwm_nvm_data *data = &sc->sc_nvm;
1762 	uint8_t bands[IEEE80211_MODE_BYTES];
1763 
1764 	memset(bands, 0, sizeof(bands));
1765 	/* 1-13: 11b/g channels. */
1766 	setbit(bands, IEEE80211_MODE_11B);
1767 	setbit(bands, IEEE80211_MODE_11G);
1768 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1769 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1770 
1771 	/* 14: 11b channel only. */
1772 	clrbit(bands, IEEE80211_MODE_11G);
1773 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1774 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1775 
1776 	if (data->sku_cap_band_52GHz_enable) {
1777 		memset(bands, 0, sizeof(bands));
1778 		setbit(bands, IEEE80211_MODE_11A);
1779 		iwm_add_channel_band(sc, chans, maxchans, nchans,
1780 		    IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1781 	}
1782 }
1783 
1784 static int
1785 iwm_parse_nvm_data(struct iwm_softc *sc,
1786 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1787 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1788 {
1789 	struct iwm_nvm_data *data = &sc->sc_nvm;
1790 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
1791 	uint16_t radio_cfg, sku;
1792 
1793 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1794 
1795 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1796 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1797 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1798 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1799 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1800 
1801 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
1802 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1803 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1804 	data->sku_cap_11n_enable = 0;
1805 
1806 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1807 
1808 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1809 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1810 
1811 	/* The byte order is little endian 16 bit, meaning 214365 */
1812 	IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1813 	data->hw_addr[0] = hw_addr[1];
1814 	data->hw_addr[1] = hw_addr[0];
1815 	data->hw_addr[2] = hw_addr[3];
1816 	data->hw_addr[3] = hw_addr[2];
1817 	data->hw_addr[4] = hw_addr[5];
1818 	data->hw_addr[5] = hw_addr[4];
1819 
1820 	memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1821 	    sizeof(data->nvm_ch_flags));
1822 	data->calib_version = 255;   /* TODO:
1823 					this value will prevent some checks from
1824 					failing, we need to check if this
1825 					field is still needed, and if it does,
1826 					where is it in the NVM */
1827 
1828 	return 0;
1829 }
1830 
1831 /*
1832  * END NVM PARSE
1833  */
1834 
1835 struct iwm_nvm_section {
1836 	uint16_t length;
1837 	uint8_t *data;
1838 };
1839 
1840 static int
1841 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1842 {
1843 	const uint16_t *hw, *sw, *calib;
1844 
1845 	/* Checking for required sections */
1846 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1847 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1848 		device_printf(sc->sc_dev,
1849 		    "%s: Can't parse empty NVM sections\n",
1850 		    __func__);
1851 		return ENOENT;
1852 	}
1853 
1854 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1855 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1856 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1857 	return iwm_parse_nvm_data(sc, hw, sw, calib,
1858 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1859 }
1860 
1861 static int
1862 iwm_nvm_init(struct iwm_softc *sc)
1863 {
1864 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1865 	int i, section, error;
1866 	uint16_t len;
1867 	uint8_t *nvm_buffer, *temp;
1868 
1869 	/* Read From FW NVM */
1870 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1871 	    "%s: Read NVM\n",
1872 	    __func__);
1873 
1874 	memset(nvm_sections, 0, sizeof(nvm_sections));
1875 
1876 	/* TODO: find correct NVM max size for a section */
1877 	nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
1878 	if (nvm_buffer == NULL)
1879 		return (ENOMEM);
1880 	for (i = 0; i < nitems(nvm_to_read); i++) {
1881 		section = nvm_to_read[i];
1882 		KASSERT(section <= nitems(nvm_sections),
1883 		    ("too many sections"));
1884 
1885 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1886 		if (error)
1887 			break;
1888 
1889 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
1890 		if (temp == NULL) {
1891 			error = ENOMEM;
1892 			break;
1893 		}
1894 		memcpy(temp, nvm_buffer, len);
1895 		nvm_sections[section].data = temp;
1896 		nvm_sections[section].length = len;
1897 	}
1898 	free(nvm_buffer, M_DEVBUF);
1899 	if (error == 0)
1900 		error = iwm_parse_nvm_sections(sc, nvm_sections);
1901 
1902 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
1903 		if (nvm_sections[i].data != NULL)
1904 			free(nvm_sections[i].data, M_DEVBUF);
1905 	}
1906 
1907 	return error;
1908 }
1909 
1910 /*
1911  * Firmware loading gunk.  This is kind of a weird hybrid between the
1912  * iwn driver and the Linux iwlwifi driver.
1913  */
1914 
1915 static int
1916 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1917 	const uint8_t *section, uint32_t byte_cnt)
1918 {
1919 	struct iwm_dma_info *dma = &sc->fw_dma;
1920 	int error;
1921 
1922 	/* Copy firmware section into pre-allocated DMA-safe memory. */
1923 	memcpy(dma->vaddr, section, byte_cnt);
1924 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1925 
1926 	if (!iwm_nic_lock(sc))
1927 		return EBUSY;
1928 
1929 	sc->sc_fw_chunk_done = 0;
1930 
1931 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1932 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1933 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1934 	    dst_addr);
1935 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1936 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1937 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1938 	    (iwm_get_dma_hi_addr(dma->paddr)
1939 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1940 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1941 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1942 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
1943 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
1944 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1945 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
1946 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
1947 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
1948 
1949 	iwm_nic_unlock(sc);
1950 
1951 	/* wait 1s for this segment to load */
1952 	while (!sc->sc_fw_chunk_done)
1953 		if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
1954 			break;
1955 
1956 	return error;
1957 }
1958 
1959 static int
1960 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1961 {
1962 	struct iwm_fw_sects *fws;
1963 	int error, i, w;
1964 	const void *data;
1965 	uint32_t dlen;
1966 	uint32_t offset;
1967 
1968 	sc->sc_uc.uc_intr = 0;
1969 
1970 	fws = &sc->sc_fw.fw_sects[ucode_type];
1971 	for (i = 0; i < fws->fw_count; i++) {
1972 		data = fws->fw_sect[i].fws_data;
1973 		dlen = fws->fw_sect[i].fws_len;
1974 		offset = fws->fw_sect[i].fws_devoff;
1975 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
1976 		    "LOAD FIRMWARE type %d offset %u len %d\n",
1977 		    ucode_type, offset, dlen);
1978 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
1979 		if (error) {
1980 			device_printf(sc->sc_dev,
1981 			    "%s: chunk %u of %u returned error %02d\n",
1982 			    __func__, i, fws->fw_count, error);
1983 			return error;
1984 		}
1985 	}
1986 
1987 	/* wait for the firmware to load */
1988 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
1989 
1990 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
1991 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
1992 	}
1993 
1994 	return error;
1995 }
1996 
1997 /* iwlwifi: pcie/trans.c */
1998 static int
1999 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2000 {
2001 	int error;
2002 
2003 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2004 
2005 	if ((error = iwm_nic_init(sc)) != 0) {
2006 		device_printf(sc->sc_dev, "unable to init nic\n");
2007 		return error;
2008 	}
2009 
2010 	/* make sure rfkill handshake bits are cleared */
2011 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2012 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2013 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2014 
2015 	/* clear (again), then enable host interrupts */
2016 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2017 	iwm_enable_interrupts(sc);
2018 
2019 	/* really make sure rfkill handshake bits are cleared */
2020 	/* maybe we should write a few times more?  just to make sure */
2021 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2022 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2023 
2024 	/* Load the given image to the HW */
2025 	return iwm_load_firmware(sc, ucode_type);
2026 }
2027 
2028 static int
2029 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2030 {
2031 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2032 		.valid = htole32(valid_tx_ant),
2033 	};
2034 
2035 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2036 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2037 }
2038 
2039 /* iwlwifi: mvm/fw.c */
2040 static int
2041 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2042 {
2043 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2044 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2045 
2046 	/* Set parameters */
2047 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2048 	phy_cfg_cmd.calib_control.event_trigger =
2049 	    sc->sc_default_calib[ucode_type].event_trigger;
2050 	phy_cfg_cmd.calib_control.flow_trigger =
2051 	    sc->sc_default_calib[ucode_type].flow_trigger;
2052 
2053 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2054 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2055 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2056 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2057 }
2058 
2059 static int
2060 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2061 	enum iwm_ucode_type ucode_type)
2062 {
2063 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2064 	int error;
2065 
2066 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2067 		return error;
2068 
2069 	sc->sc_uc_current = ucode_type;
2070 	error = iwm_start_fw(sc, ucode_type);
2071 	if (error) {
2072 		sc->sc_uc_current = old_type;
2073 		return error;
2074 	}
2075 
2076 	return iwm_post_alive(sc);
2077 }
2078 
2079 /*
2080  * mvm misc bits
2081  */
2082 
2083 /*
2084  * follows iwlwifi/fw.c
2085  */
2086 static int
2087 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2088 {
2089 	int error;
2090 
2091 	/* do not operate with rfkill switch turned on */
2092 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2093 		device_printf(sc->sc_dev,
2094 		    "radio is disabled by hardware switch\n");
2095 		return EPERM;
2096 	}
2097 
2098 	sc->sc_init_complete = 0;
2099 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2100 	    IWM_UCODE_TYPE_INIT)) != 0) {
2101 		device_printf(sc->sc_dev, "failed to load init firmware\n");
2102 		return error;
2103 	}
2104 
2105 	if (justnvm) {
2106 		if ((error = iwm_nvm_init(sc)) != 0) {
2107 			device_printf(sc->sc_dev, "failed to read nvm\n");
2108 			return error;
2109 		}
2110 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2111 
2112 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2113 		    + sc->sc_capa_max_probe_len
2114 		    + IWM_MAX_NUM_SCAN_CHANNELS
2115 		    * sizeof(struct iwm_scan_channel);
2116 		sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
2117 		    M_NOWAIT);
2118 		if (sc->sc_scan_cmd == NULL)
2119 			return (ENOMEM);
2120 
2121 		return 0;
2122 	}
2123 
2124 	/* Send TX valid antennas before triggering calibrations */
2125 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2126 		return error;
2127 
2128 	/*
2129 	* Send phy configurations command to init uCode
2130 	* to start the 16.0 uCode init image internal calibrations.
2131 	*/
2132 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2133 		device_printf(sc->sc_dev,
2134 		    "%s: failed to run internal calibration: %d\n",
2135 		    __func__, error);
2136 		return error;
2137 	}
2138 
2139 	/*
2140 	 * Nothing to do but wait for the init complete notification
2141 	 * from the firmware
2142 	 */
2143 	while (!sc->sc_init_complete)
2144 		if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2145 		    0, "iwminit", 2*hz)) != 0)
2146 			break;
2147 
2148 	return error;
2149 }
2150 
2151 /*
2152  * receive side
2153  */
2154 
2155 /* (re)stock rx ring, called at init-time and at runtime */
2156 static int
2157 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2158 {
2159 	struct iwm_rx_ring *ring = &sc->rxq;
2160 	struct iwm_rx_data *data = &ring->data[idx];
2161 	struct mbuf *m;
2162 	bus_dmamap_t dmamap = NULL;
2163 	int error;
2164 	bus_addr_t paddr;
2165 
2166 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2167 	if (m == NULL)
2168 		return ENOBUFS;
2169 
2170 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2171 	error = bus_dmamap_load(ring->data_dmat, ring->spare_map,
2172 	    mtod(m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2173 	    &paddr, BUS_DMA_NOWAIT);
2174 	if (error != 0 && error != EFBIG) {
2175 		device_printf(sc->sc_dev,
2176 		    "%s: can't map mbuf, error %d\n", __func__, error);
2177 		goto fail;
2178 	}
2179 
2180 	if (data->m != NULL)
2181 		bus_dmamap_unload(ring->data_dmat, data->map);
2182 
2183 	/* Swap ring->spare_map with data->map */
2184 	dmamap = data->map;
2185 	data->map = ring->spare_map;
2186 	ring->spare_map = dmamap;
2187 
2188 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2189 	data->m = m;
2190 
2191 	/* Update RX descriptor. */
2192 	ring->desc[idx] = htole32(paddr >> 8);
2193 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2194 	    BUS_DMASYNC_PREWRITE);
2195 
2196 	return 0;
2197 fail:
2198 	m_free(m);
2199 	return error;
2200 }
2201 
2202 /* iwlwifi: mvm/rx.c */
2203 #define IWM_RSSI_OFFSET 50
2204 static int
2205 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2206 {
2207 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2208 	uint32_t agc_a, agc_b;
2209 	uint32_t val;
2210 
2211 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2212 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2213 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2214 
2215 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2216 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2217 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2218 
2219 	/*
2220 	 * dBm = rssi dB - agc dB - constant.
2221 	 * Higher AGC (higher radio gain) means lower signal.
2222 	 */
2223 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2224 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2225 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2226 
2227 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2228 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2229 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2230 
2231 	return max_rssi_dbm;
2232 }
2233 
2234 /* iwlwifi: mvm/rx.c */
2235 /*
2236  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2237  * values are reported by the fw as positive values - need to negate
2238  * to obtain their dBM.  Account for missing antennas by replacing 0
2239  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2240  */
2241 static int
2242 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2243 {
2244 	int energy_a, energy_b, energy_c, max_energy;
2245 	uint32_t val;
2246 
2247 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2248 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2249 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2250 	energy_a = energy_a ? -energy_a : -256;
2251 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2252 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2253 	energy_b = energy_b ? -energy_b : -256;
2254 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2255 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2256 	energy_c = energy_c ? -energy_c : -256;
2257 	max_energy = MAX(energy_a, energy_b);
2258 	max_energy = MAX(max_energy, energy_c);
2259 
2260 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2261 	    "energy In A %d B %d C %d , and max %d\n",
2262 	    energy_a, energy_b, energy_c, max_energy);
2263 
2264 	return max_energy;
2265 }
2266 
2267 static void
2268 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2269 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2270 {
2271 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2272 
2273 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2274 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2275 
2276 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2277 }
2278 
2279 /*
2280  * Retrieve the average noise (in dBm) among receivers.
2281  */
2282 static int
2283 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2284 {
2285 	int i, total, nbant, noise;
2286 
2287 	total = nbant = noise = 0;
2288 	for (i = 0; i < 3; i++) {
2289 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2290 		if (noise) {
2291 			total += noise;
2292 			nbant++;
2293 		}
2294 	}
2295 
2296 	/* There should be at least one antenna but check anyway. */
2297 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2298 }
2299 
2300 /*
2301  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2302  *
2303  * Handles the actual data of the Rx packet from the fw
2304  */
2305 static void
2306 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2307 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2308 {
2309 	struct ieee80211com *ic = &sc->sc_ic;
2310 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2311 	struct ieee80211_frame *wh;
2312 	struct ieee80211_node *ni;
2313 	struct ieee80211_rx_stats rxs;
2314 	struct mbuf *m;
2315 	struct iwm_rx_phy_info *phy_info;
2316 	struct iwm_rx_mpdu_res_start *rx_res;
2317 	uint32_t len;
2318 	uint32_t rx_pkt_status;
2319 	int rssi;
2320 
2321 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2322 
2323 	phy_info = &sc->sc_last_phy_info;
2324 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2325 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2326 	len = le16toh(rx_res->byte_count);
2327 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2328 
2329 	m = data->m;
2330 	m->m_data = pkt->data + sizeof(*rx_res);
2331 	m->m_pkthdr.len = m->m_len = len;
2332 
2333 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2334 		device_printf(sc->sc_dev,
2335 		    "dsp size out of range [0,20]: %d\n",
2336 		    phy_info->cfg_phy_cnt);
2337 		return;
2338 	}
2339 
2340 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2341 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2342 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2343 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2344 		return; /* drop */
2345 	}
2346 
2347 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2348 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2349 	} else {
2350 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
2351 	}
2352 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
2353 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
2354 
2355 	/* replenish ring for the buffer we're going to feed to the sharks */
2356 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2357 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2358 		    __func__);
2359 		return;
2360 	}
2361 
2362 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2363 
2364 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2365 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
2366 	    __func__,
2367 	    le16toh(phy_info->channel),
2368 	    le16toh(phy_info->phy_flags));
2369 
2370 	/*
2371 	 * Populate an RX state struct with the provided information.
2372 	 */
2373 	bzero(&rxs, sizeof(rxs));
2374 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2375 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2376 	rxs.c_ieee = le16toh(phy_info->channel);
2377 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2378 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2379 	} else {
2380 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2381 	}
2382 	rxs.rssi = rssi - sc->sc_noise;
2383 	rxs.nf = sc->sc_noise;
2384 
2385 	if (ieee80211_radiotap_active_vap(vap)) {
2386 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2387 
2388 		tap->wr_flags = 0;
2389 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2390 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2391 		tap->wr_chan_freq = htole16(rxs.c_freq);
2392 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2393 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2394 		tap->wr_dbm_antsignal = (int8_t)rssi;
2395 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2396 		tap->wr_tsft = phy_info->system_timestamp;
2397 		switch (phy_info->rate) {
2398 		/* CCK rates. */
2399 		case  10: tap->wr_rate =   2; break;
2400 		case  20: tap->wr_rate =   4; break;
2401 		case  55: tap->wr_rate =  11; break;
2402 		case 110: tap->wr_rate =  22; break;
2403 		/* OFDM rates. */
2404 		case 0xd: tap->wr_rate =  12; break;
2405 		case 0xf: tap->wr_rate =  18; break;
2406 		case 0x5: tap->wr_rate =  24; break;
2407 		case 0x7: tap->wr_rate =  36; break;
2408 		case 0x9: tap->wr_rate =  48; break;
2409 		case 0xb: tap->wr_rate =  72; break;
2410 		case 0x1: tap->wr_rate =  96; break;
2411 		case 0x3: tap->wr_rate = 108; break;
2412 		/* Unknown rate: should not happen. */
2413 		default:  tap->wr_rate =   0;
2414 		}
2415 	}
2416 
2417 	IWM_UNLOCK(sc);
2418 	if (ni != NULL) {
2419 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2420 		ieee80211_input_mimo(ni, m, &rxs);
2421 		ieee80211_free_node(ni);
2422 	} else {
2423 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2424 		ieee80211_input_mimo_all(ic, m, &rxs);
2425 	}
2426 	IWM_LOCK(sc);
2427 }
2428 
2429 static int
2430 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2431 	struct iwm_node *in)
2432 {
2433 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2434 	struct ieee80211_node *ni = &in->in_ni;
2435 	struct ieee80211vap *vap = ni->ni_vap;
2436 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2437 	int failack = tx_resp->failure_frame;
2438 
2439 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2440 
2441 	/* Update rate control statistics. */
2442 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2443 	    __func__,
2444 	    (int) le16toh(tx_resp->status.status),
2445 	    (int) le16toh(tx_resp->status.sequence),
2446 	    tx_resp->frame_count,
2447 	    tx_resp->bt_kill_count,
2448 	    tx_resp->failure_rts,
2449 	    tx_resp->failure_frame,
2450 	    le32toh(tx_resp->initial_rate),
2451 	    (int) le16toh(tx_resp->wireless_media_time));
2452 
2453 	if (status != IWM_TX_STATUS_SUCCESS &&
2454 	    status != IWM_TX_STATUS_DIRECT_DONE) {
2455 		ieee80211_ratectl_tx_complete(vap, ni,
2456 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2457 		return (1);
2458 	} else {
2459 		ieee80211_ratectl_tx_complete(vap, ni,
2460 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2461 		return (0);
2462 	}
2463 }
2464 
2465 static void
2466 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2467 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2468 {
2469 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2470 	int idx = cmd_hdr->idx;
2471 	int qid = cmd_hdr->qid;
2472 	struct iwm_tx_ring *ring = &sc->txq[qid];
2473 	struct iwm_tx_data *txd = &ring->data[idx];
2474 	struct iwm_node *in = txd->in;
2475 	struct mbuf *m = txd->m;
2476 	int status;
2477 
2478 	KASSERT(txd->done == 0, ("txd not done"));
2479 	KASSERT(txd->in != NULL, ("txd without node"));
2480 	KASSERT(txd->m != NULL, ("txd without mbuf"));
2481 
2482 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2483 
2484 	sc->sc_tx_timer = 0;
2485 
2486 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2487 
2488 	/* Unmap and free mbuf. */
2489 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2490 	bus_dmamap_unload(ring->data_dmat, txd->map);
2491 
2492 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2493 	    "free txd %p, in %p\n", txd, txd->in);
2494 	txd->done = 1;
2495 	txd->m = NULL;
2496 	txd->in = NULL;
2497 
2498 	ieee80211_tx_complete(&in->in_ni, m, status);
2499 
2500 	if (--ring->queued < IWM_TX_RING_LOMARK) {
2501 		sc->qfullmsk &= ~(1 << ring->qid);
2502 		if (sc->qfullmsk == 0) {
2503 			/*
2504 			 * Well, we're in interrupt context, but then again
2505 			 * I guess net80211 does all sorts of stunts in
2506 			 * interrupt context, so maybe this is no biggie.
2507 			 */
2508 			iwm_start(sc);
2509 		}
2510 	}
2511 }
2512 
2513 /*
2514  * transmit side
2515  */
2516 
2517 /*
2518  * Process a "command done" firmware notification.  This is where we wakeup
2519  * processes waiting for a synchronous command completion.
2520  * from if_iwn
2521  */
2522 static void
2523 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2524 {
2525 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2526 	struct iwm_tx_data *data;
2527 
2528 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2529 		return;	/* Not a command ack. */
2530 	}
2531 
2532 	data = &ring->data[pkt->hdr.idx];
2533 
2534 	/* If the command was mapped in an mbuf, free it. */
2535 	if (data->m != NULL) {
2536 		bus_dmamap_sync(ring->data_dmat, data->map,
2537 		    BUS_DMASYNC_POSTWRITE);
2538 		bus_dmamap_unload(ring->data_dmat, data->map);
2539 		m_freem(data->m);
2540 		data->m = NULL;
2541 	}
2542 	wakeup(&ring->desc[pkt->hdr.idx]);
2543 }
2544 
2545 #if 0
2546 /*
2547  * necessary only for block ack mode
2548  */
2549 void
2550 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2551 	uint16_t len)
2552 {
2553 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2554 	uint16_t w_val;
2555 
2556 	scd_bc_tbl = sc->sched_dma.vaddr;
2557 
2558 	len += 8; /* magic numbers came naturally from paris */
2559 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2560 		len = roundup(len, 4) / 4;
2561 
2562 	w_val = htole16(sta_id << 12 | len);
2563 
2564 	/* Update TX scheduler. */
2565 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2566 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2567 	    BUS_DMASYNC_PREWRITE);
2568 
2569 	/* I really wonder what this is ?!? */
2570 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2571 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2572 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2573 		    BUS_DMASYNC_PREWRITE);
2574 	}
2575 }
2576 #endif
2577 
2578 /*
2579  * Take an 802.11 (non-n) rate, find the relevant rate
2580  * table entry.  return the index into in_ridx[].
2581  *
2582  * The caller then uses that index back into in_ridx
2583  * to figure out the rate index programmed /into/
2584  * the firmware for this given node.
2585  */
2586 static int
2587 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2588     uint8_t rate)
2589 {
2590 	int i;
2591 	uint8_t r;
2592 
2593 	for (i = 0; i < nitems(in->in_ridx); i++) {
2594 		r = iwm_rates[in->in_ridx[i]].rate;
2595 		if (rate == r)
2596 			return (i);
2597 	}
2598 	/* XXX Return the first */
2599 	/* XXX TODO: have it return the /lowest/ */
2600 	return (0);
2601 }
2602 
2603 /*
2604  * Fill in the rate related information for a transmit command.
2605  */
2606 static const struct iwm_rate *
2607 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2608 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2609 {
2610 	struct ieee80211com *ic = &sc->sc_ic;
2611 	struct ieee80211_node *ni = &in->in_ni;
2612 	const struct iwm_rate *rinfo;
2613 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2614 	int ridx, rate_flags;
2615 
2616 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2617 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2618 
2619 	/*
2620 	 * XXX TODO: everything about the rate selection here is terrible!
2621 	 */
2622 
2623 	if (type == IEEE80211_FC0_TYPE_DATA) {
2624 		int i;
2625 		/* for data frames, use RS table */
2626 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2627 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2628 		ridx = in->in_ridx[i];
2629 
2630 		/* This is the index into the programmed table */
2631 		tx->initial_rate_index = i;
2632 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2633 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2634 		    "%s: start with i=%d, txrate %d\n",
2635 		    __func__, i, iwm_rates[ridx].rate);
2636 	} else {
2637 		/*
2638 		 * For non-data, use the lowest supported rate for the given
2639 		 * operational mode.
2640 		 *
2641 		 * Note: there may not be any rate control information available.
2642 		 * This driver currently assumes if we're transmitting data
2643 		 * frames, use the rate control table.  Grr.
2644 		 *
2645 		 * XXX TODO: use the configured rate for the traffic type!
2646 		 * XXX TODO: this should be per-vap, not curmode; as we later
2647 		 * on we'll want to handle off-channel stuff (eg TDLS).
2648 		 */
2649 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
2650 			/*
2651 			 * XXX this assumes the mode is either 11a or not 11a;
2652 			 * definitely won't work for 11n.
2653 			 */
2654 			ridx = IWM_RIDX_OFDM;
2655 		} else {
2656 			ridx = IWM_RIDX_CCK;
2657 		}
2658 	}
2659 
2660 	rinfo = &iwm_rates[ridx];
2661 
2662 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2663 	    __func__, ridx,
2664 	    rinfo->rate,
2665 	    !! (IWM_RIDX_IS_CCK(ridx))
2666 	    );
2667 
2668 	/* XXX TODO: hard-coded TX antenna? */
2669 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2670 	if (IWM_RIDX_IS_CCK(ridx))
2671 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
2672 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2673 
2674 	return rinfo;
2675 }
2676 
2677 #define TB0_SIZE 16
2678 static int
2679 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2680 {
2681 	struct ieee80211com *ic = &sc->sc_ic;
2682 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2683 	struct iwm_node *in = IWM_NODE(ni);
2684 	struct iwm_tx_ring *ring;
2685 	struct iwm_tx_data *data;
2686 	struct iwm_tfd *desc;
2687 	struct iwm_device_cmd *cmd;
2688 	struct iwm_tx_cmd *tx;
2689 	struct ieee80211_frame *wh;
2690 	struct ieee80211_key *k = NULL;
2691 	struct mbuf *m1;
2692 	const struct iwm_rate *rinfo;
2693 	uint32_t flags;
2694 	u_int hdrlen;
2695 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2696 	int nsegs;
2697 	uint8_t tid, type;
2698 	int i, totlen, error, pad;
2699 
2700 	wh = mtod(m, struct ieee80211_frame *);
2701 	hdrlen = ieee80211_anyhdrsize(wh);
2702 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2703 	tid = 0;
2704 	ring = &sc->txq[ac];
2705 	desc = &ring->desc[ring->cur];
2706 	memset(desc, 0, sizeof(*desc));
2707 	data = &ring->data[ring->cur];
2708 
2709 	/* Fill out iwm_tx_cmd to send to the firmware */
2710 	cmd = &ring->cmd[ring->cur];
2711 	cmd->hdr.code = IWM_TX_CMD;
2712 	cmd->hdr.flags = 0;
2713 	cmd->hdr.qid = ring->qid;
2714 	cmd->hdr.idx = ring->cur;
2715 
2716 	tx = (void *)cmd->data;
2717 	memset(tx, 0, sizeof(*tx));
2718 
2719 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2720 
2721 	/* Encrypt the frame if need be. */
2722 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2723 		/* Retrieve key for TX && do software encryption. */
2724 		k = ieee80211_crypto_encap(ni, m);
2725 		if (k == NULL) {
2726 			m_freem(m);
2727 			return (ENOBUFS);
2728 		}
2729 		/* 802.11 header may have moved. */
2730 		wh = mtod(m, struct ieee80211_frame *);
2731 	}
2732 
2733 	if (ieee80211_radiotap_active_vap(vap)) {
2734 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2735 
2736 		tap->wt_flags = 0;
2737 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2738 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2739 		tap->wt_rate = rinfo->rate;
2740 		if (k != NULL)
2741 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2742 		ieee80211_radiotap_tx(vap, m);
2743 	}
2744 
2745 
2746 	totlen = m->m_pkthdr.len;
2747 
2748 	flags = 0;
2749 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2750 		flags |= IWM_TX_CMD_FLG_ACK;
2751 	}
2752 
2753 	if (type != IEEE80211_FC0_TYPE_DATA
2754 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2755 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2756 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2757 	}
2758 
2759 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2760 	    type != IEEE80211_FC0_TYPE_DATA)
2761 		tx->sta_id = sc->sc_aux_sta.sta_id;
2762 	else
2763 		tx->sta_id = IWM_STATION_ID;
2764 
2765 	if (type == IEEE80211_FC0_TYPE_MGT) {
2766 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2767 
2768 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2769 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2770 			tx->pm_frame_timeout = htole16(3);
2771 		else
2772 			tx->pm_frame_timeout = htole16(2);
2773 	} else {
2774 		tx->pm_frame_timeout = htole16(0);
2775 	}
2776 
2777 	if (hdrlen & 3) {
2778 		/* First segment length must be a multiple of 4. */
2779 		flags |= IWM_TX_CMD_FLG_MH_PAD;
2780 		pad = 4 - (hdrlen & 3);
2781 	} else
2782 		pad = 0;
2783 
2784 	tx->driver_txop = 0;
2785 	tx->next_frame_len = 0;
2786 
2787 	tx->len = htole16(totlen);
2788 	tx->tid_tspec = tid;
2789 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2790 
2791 	/* Set physical address of "scratch area". */
2792 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2793 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2794 
2795 	/* Copy 802.11 header in TX command. */
2796 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2797 
2798 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2799 
2800 	tx->sec_ctl = 0;
2801 	tx->tx_flags |= htole32(flags);
2802 
2803 	/* Trim 802.11 header. */
2804 	m_adj(m, hdrlen);
2805 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2806 	    segs, &nsegs, BUS_DMA_NOWAIT);
2807 	if (error != 0) {
2808 		if (error != EFBIG) {
2809 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2810 			    error);
2811 			m_freem(m);
2812 			return error;
2813 		}
2814 		/* Too many DMA segments, linearize mbuf. */
2815 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
2816 		if (m1 == NULL) {
2817 			device_printf(sc->sc_dev,
2818 			    "%s: could not defrag mbuf\n", __func__);
2819 			m_freem(m);
2820 			return (ENOBUFS);
2821 		}
2822 		m = m1;
2823 
2824 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2825 		    segs, &nsegs, BUS_DMA_NOWAIT);
2826 		if (error != 0) {
2827 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2828 			    error);
2829 			m_freem(m);
2830 			return error;
2831 		}
2832 	}
2833 	data->m = m;
2834 	data->in = in;
2835 	data->done = 0;
2836 
2837 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2838 	    "sending txd %p, in %p\n", data, data->in);
2839 	KASSERT(data->in != NULL, ("node is NULL"));
2840 
2841 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2842 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%d\n",
2843 	    ring->qid, ring->cur, totlen, nsegs,
2844 	    le32toh(tx->tx_flags),
2845 	    le32toh(tx->rate_n_flags),
2846 	    (int) tx->initial_rate_index
2847 	    );
2848 
2849 	/* Fill TX descriptor. */
2850 	desc->num_tbs = 2 + nsegs;
2851 
2852 	desc->tbs[0].lo = htole32(data->cmd_paddr);
2853 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2854 	    (TB0_SIZE << 4);
2855 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2856 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2857 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2858 	      + hdrlen + pad - TB0_SIZE) << 4);
2859 
2860 	/* Other DMA segments are for data payload. */
2861 	for (i = 0; i < nsegs; i++) {
2862 		seg = &segs[i];
2863 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
2864 		desc->tbs[i+2].hi_n_len = \
2865 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2866 		    | ((seg->ds_len) << 4);
2867 	}
2868 
2869 	bus_dmamap_sync(ring->data_dmat, data->map,
2870 	    BUS_DMASYNC_PREWRITE);
2871 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2872 	    BUS_DMASYNC_PREWRITE);
2873 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2874 	    BUS_DMASYNC_PREWRITE);
2875 
2876 #if 0
2877 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2878 #endif
2879 
2880 	/* Kick TX ring. */
2881 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2882 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2883 
2884 	/* Mark TX ring as full if we reach a certain threshold. */
2885 	if (++ring->queued > IWM_TX_RING_HIMARK) {
2886 		sc->qfullmsk |= 1 << ring->qid;
2887 	}
2888 
2889 	return 0;
2890 }
2891 
2892 static int
2893 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2894     const struct ieee80211_bpf_params *params)
2895 {
2896 	struct ieee80211com *ic = ni->ni_ic;
2897 	struct iwm_softc *sc = ic->ic_softc;
2898 	int error = 0;
2899 
2900 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2901 	    "->%s begin\n", __func__);
2902 
2903 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2904 		m_freem(m);
2905 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2906 		    "<-%s not RUNNING\n", __func__);
2907 		return (ENETDOWN);
2908         }
2909 
2910 	IWM_LOCK(sc);
2911 	/* XXX fix this */
2912         if (params == NULL) {
2913 		error = iwm_tx(sc, m, ni, 0);
2914 	} else {
2915 		error = iwm_tx(sc, m, ni, 0);
2916 	}
2917 	sc->sc_tx_timer = 5;
2918 	IWM_UNLOCK(sc);
2919 
2920         return (error);
2921 }
2922 
2923 /*
2924  * mvm/tx.c
2925  */
2926 
2927 #if 0
2928 /*
2929  * Note that there are transports that buffer frames before they reach
2930  * the firmware. This means that after flush_tx_path is called, the
2931  * queue might not be empty. The race-free way to handle this is to:
2932  * 1) set the station as draining
2933  * 2) flush the Tx path
2934  * 3) wait for the transport queues to be empty
2935  */
2936 int
2937 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
2938 {
2939 	struct iwm_tx_path_flush_cmd flush_cmd = {
2940 		.queues_ctl = htole32(tfd_msk),
2941 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
2942 	};
2943 	int ret;
2944 
2945 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
2946 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
2947 	    sizeof(flush_cmd), &flush_cmd);
2948 	if (ret)
2949                 device_printf(sc->sc_dev,
2950 		    "Flushing tx queue failed: %d\n", ret);
2951 	return ret;
2952 }
2953 #endif
2954 
2955 /*
2956  * BEGIN mvm/sta.c
2957  */
2958 
2959 static void
2960 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
2961 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
2962 {
2963 	memset(cmd_v5, 0, sizeof(*cmd_v5));
2964 
2965 	cmd_v5->add_modify = cmd_v6->add_modify;
2966 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
2967 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
2968 	IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
2969 	cmd_v5->sta_id = cmd_v6->sta_id;
2970 	cmd_v5->modify_mask = cmd_v6->modify_mask;
2971 	cmd_v5->station_flags = cmd_v6->station_flags;
2972 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
2973 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
2974 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
2975 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
2976 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
2977 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
2978 	cmd_v5->assoc_id = cmd_v6->assoc_id;
2979 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
2980 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
2981 }
2982 
2983 static int
2984 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
2985 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
2986 {
2987 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
2988 
2989 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
2990 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
2991 		    sizeof(*cmd), cmd, status);
2992 	}
2993 
2994 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
2995 
2996 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
2997 	    &cmd_v5, status);
2998 }
2999 
3000 /* send station add/update command to firmware */
3001 static int
3002 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3003 {
3004 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3005 	int ret;
3006 	uint32_t status;
3007 
3008 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3009 
3010 	add_sta_cmd.sta_id = IWM_STATION_ID;
3011 	add_sta_cmd.mac_id_n_color
3012 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3013 	        IWM_DEFAULT_COLOR));
3014 	if (!update) {
3015 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
3016 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3017 	}
3018 	add_sta_cmd.add_modify = update ? 1 : 0;
3019 	add_sta_cmd.station_flags_msk
3020 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3021 
3022 	status = IWM_ADD_STA_SUCCESS;
3023 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3024 	if (ret)
3025 		return ret;
3026 
3027 	switch (status) {
3028 	case IWM_ADD_STA_SUCCESS:
3029 		break;
3030 	default:
3031 		ret = EIO;
3032 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3033 		break;
3034 	}
3035 
3036 	return ret;
3037 }
3038 
3039 static int
3040 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3041 {
3042 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3043 }
3044 
3045 static int
3046 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3047 {
3048 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3049 }
3050 
3051 static int
3052 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3053 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3054 {
3055 	struct iwm_mvm_add_sta_cmd_v6 cmd;
3056 	int ret;
3057 	uint32_t status;
3058 
3059 	memset(&cmd, 0, sizeof(cmd));
3060 	cmd.sta_id = sta->sta_id;
3061 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3062 
3063 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3064 
3065 	if (addr)
3066 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3067 
3068 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3069 	if (ret)
3070 		return ret;
3071 
3072 	switch (status) {
3073 	case IWM_ADD_STA_SUCCESS:
3074 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3075 		    "%s: Internal station added.\n", __func__);
3076 		return 0;
3077 	default:
3078 		device_printf(sc->sc_dev,
3079 		    "%s: Add internal station failed, status=0x%x\n",
3080 		    __func__, status);
3081 		ret = EIO;
3082 		break;
3083 	}
3084 	return ret;
3085 }
3086 
3087 static int
3088 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3089 {
3090 	int ret;
3091 
3092 	sc->sc_aux_sta.sta_id = 3;
3093 	sc->sc_aux_sta.tfd_queue_msk = 0;
3094 
3095 	ret = iwm_mvm_add_int_sta_common(sc,
3096 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3097 
3098 	if (ret)
3099 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3100 	return ret;
3101 }
3102 
3103 /*
3104  * END mvm/sta.c
3105  */
3106 
3107 /*
3108  * BEGIN mvm/quota.c
3109  */
3110 
3111 static int
3112 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3113 {
3114 	struct iwm_time_quota_cmd cmd;
3115 	int i, idx, ret, num_active_macs, quota, quota_rem;
3116 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3117 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3118 	uint16_t id;
3119 
3120 	memset(&cmd, 0, sizeof(cmd));
3121 
3122 	/* currently, PHY ID == binding ID */
3123 	if (in) {
3124 		id = in->in_phyctxt->id;
3125 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3126 		colors[id] = in->in_phyctxt->color;
3127 
3128 		if (1)
3129 			n_ifs[id] = 1;
3130 	}
3131 
3132 	/*
3133 	 * The FW's scheduling session consists of
3134 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3135 	 * equally between all the bindings that require quota
3136 	 */
3137 	num_active_macs = 0;
3138 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3139 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3140 		num_active_macs += n_ifs[i];
3141 	}
3142 
3143 	quota = 0;
3144 	quota_rem = 0;
3145 	if (num_active_macs) {
3146 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3147 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3148 	}
3149 
3150 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3151 		if (colors[i] < 0)
3152 			continue;
3153 
3154 		cmd.quotas[idx].id_and_color =
3155 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3156 
3157 		if (n_ifs[i] <= 0) {
3158 			cmd.quotas[idx].quota = htole32(0);
3159 			cmd.quotas[idx].max_duration = htole32(0);
3160 		} else {
3161 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3162 			cmd.quotas[idx].max_duration = htole32(0);
3163 		}
3164 		idx++;
3165 	}
3166 
3167 	/* Give the remainder of the session to the first binding */
3168 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3169 
3170 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3171 	    sizeof(cmd), &cmd);
3172 	if (ret)
3173 		device_printf(sc->sc_dev,
3174 		    "%s: Failed to send quota: %d\n", __func__, ret);
3175 	return ret;
3176 }
3177 
3178 /*
3179  * END mvm/quota.c
3180  */
3181 
3182 /*
3183  * ieee80211 routines
3184  */
3185 
3186 /*
3187  * Change to AUTH state in 80211 state machine.  Roughly matches what
3188  * Linux does in bss_info_changed().
3189  */
3190 static int
3191 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3192 {
3193 	struct ieee80211_node *ni;
3194 	struct iwm_node *in;
3195 	struct iwm_vap *iv = IWM_VAP(vap);
3196 	uint32_t duration;
3197 	int error;
3198 
3199 	/*
3200 	 * XXX i have a feeling that the vap node is being
3201 	 * freed from underneath us. Grr.
3202 	 */
3203 	ni = ieee80211_ref_node(vap->iv_bss);
3204 	in = IWM_NODE(ni);
3205 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3206 	    "%s: called; vap=%p, bss ni=%p\n",
3207 	    __func__,
3208 	    vap,
3209 	    ni);
3210 
3211 	in->in_assoc = 0;
3212 
3213 	error = iwm_allow_mcast(vap, sc);
3214 	if (error) {
3215 		device_printf(sc->sc_dev,
3216 		    "%s: failed to set multicast\n", __func__);
3217 		goto out;
3218 	}
3219 
3220 	/*
3221 	 * This is where it deviates from what Linux does.
3222 	 *
3223 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3224 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3225 	 * and always does does a mac_ctx_changed().
3226 	 *
3227 	 * The openbsd port doesn't attempt to do that - it reset things
3228 	 * at odd states and does the add here.
3229 	 *
3230 	 * So, until the state handling is fixed (ie, we never reset
3231 	 * the NIC except for a firmware failure, which should drag
3232 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3233 	 * contexts that are required), let's do a dirty hack here.
3234 	 */
3235 	if (iv->is_uploaded) {
3236 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3237 			device_printf(sc->sc_dev,
3238 			    "%s: failed to update MAC\n", __func__);
3239 			goto out;
3240 		}
3241 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3242 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3243 			device_printf(sc->sc_dev,
3244 			    "%s: failed update phy ctxt\n", __func__);
3245 			goto out;
3246 		}
3247 		in->in_phyctxt = &sc->sc_phyctxt[0];
3248 
3249 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3250 			device_printf(sc->sc_dev,
3251 			    "%s: binding update cmd\n", __func__);
3252 			goto out;
3253 		}
3254 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3255 			device_printf(sc->sc_dev,
3256 			    "%s: failed to update sta\n", __func__);
3257 			goto out;
3258 		}
3259 	} else {
3260 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3261 			device_printf(sc->sc_dev,
3262 			    "%s: failed to add MAC\n", __func__);
3263 			goto out;
3264 		}
3265 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3266 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3267 			device_printf(sc->sc_dev,
3268 			    "%s: failed add phy ctxt!\n", __func__);
3269 			error = ETIMEDOUT;
3270 			goto out;
3271 		}
3272 		in->in_phyctxt = &sc->sc_phyctxt[0];
3273 
3274 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3275 			device_printf(sc->sc_dev,
3276 			    "%s: binding add cmd\n", __func__);
3277 			goto out;
3278 		}
3279 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3280 			device_printf(sc->sc_dev,
3281 			    "%s: failed to add sta\n", __func__);
3282 			goto out;
3283 		}
3284 	}
3285 
3286 	/*
3287 	 * Prevent the FW from wandering off channel during association
3288 	 * by "protecting" the session with a time event.
3289 	 */
3290 	/* XXX duration is in units of TU, not MS */
3291 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3292 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3293 	DELAY(100);
3294 
3295 	error = 0;
3296 out:
3297 	ieee80211_free_node(ni);
3298 	return (error);
3299 }
3300 
3301 static int
3302 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3303 {
3304 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3305 	int error;
3306 
3307 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3308 		device_printf(sc->sc_dev,
3309 		    "%s: failed to update STA\n", __func__);
3310 		return error;
3311 	}
3312 
3313 	in->in_assoc = 1;
3314 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3315 		device_printf(sc->sc_dev,
3316 		    "%s: failed to update MAC\n", __func__);
3317 		return error;
3318 	}
3319 
3320 	return 0;
3321 }
3322 
3323 static int
3324 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3325 {
3326 	/*
3327 	 * Ok, so *technically* the proper set of calls for going
3328 	 * from RUN back to SCAN is:
3329 	 *
3330 	 * iwm_mvm_power_mac_disable(sc, in);
3331 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3332 	 * iwm_mvm_rm_sta(sc, in);
3333 	 * iwm_mvm_update_quotas(sc, NULL);
3334 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3335 	 * iwm_mvm_binding_remove_vif(sc, in);
3336 	 * iwm_mvm_mac_ctxt_remove(sc, in);
3337 	 *
3338 	 * However, that freezes the device not matter which permutations
3339 	 * and modifications are attempted.  Obviously, this driver is missing
3340 	 * something since it works in the Linux driver, but figuring out what
3341 	 * is missing is a little more complicated.  Now, since we're going
3342 	 * back to nothing anyway, we'll just do a complete device reset.
3343 	 * Up your's, device!
3344 	 */
3345 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
3346 	iwm_stop_device(sc);
3347 	iwm_init_hw(sc);
3348 	if (in)
3349 		in->in_assoc = 0;
3350 	return 0;
3351 
3352 #if 0
3353 	int error;
3354 
3355 	iwm_mvm_power_mac_disable(sc, in);
3356 
3357 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3358 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3359 		return error;
3360 	}
3361 
3362 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3363 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3364 		return error;
3365 	}
3366 	error = iwm_mvm_rm_sta(sc, in);
3367 	in->in_assoc = 0;
3368 	iwm_mvm_update_quotas(sc, NULL);
3369 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3370 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3371 		return error;
3372 	}
3373 	iwm_mvm_binding_remove_vif(sc, in);
3374 
3375 	iwm_mvm_mac_ctxt_remove(sc, in);
3376 
3377 	return error;
3378 #endif
3379 }
3380 
3381 static struct ieee80211_node *
3382 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3383 {
3384 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
3385 	    M_NOWAIT | M_ZERO);
3386 }
3387 
3388 static void
3389 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3390 {
3391 	struct ieee80211_node *ni = &in->in_ni;
3392 	struct iwm_lq_cmd *lq = &in->in_lq;
3393 	int nrates = ni->ni_rates.rs_nrates;
3394 	int i, ridx, tab = 0;
3395 	int txant = 0;
3396 
3397 	if (nrates > nitems(lq->rs_table)) {
3398 		device_printf(sc->sc_dev,
3399 		    "%s: node supports %d rates, driver handles "
3400 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3401 		return;
3402 	}
3403 	if (nrates == 0) {
3404 		device_printf(sc->sc_dev,
3405 		    "%s: node supports 0 rates, odd!\n", __func__);
3406 		return;
3407 	}
3408 
3409 	/*
3410 	 * XXX .. and most of iwm_node is not initialised explicitly;
3411 	 * it's all just 0x0 passed to the firmware.
3412 	 */
3413 
3414 	/* first figure out which rates we should support */
3415 	/* XXX TODO: this isn't 11n aware /at all/ */
3416 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3417 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3418 	    "%s: nrates=%d\n", __func__, nrates);
3419 
3420 	/*
3421 	 * Loop over nrates and populate in_ridx from the highest
3422 	 * rate to the lowest rate.  Remember, in_ridx[] has
3423 	 * IEEE80211_RATE_MAXSIZE entries!
3424 	 */
3425 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3426 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3427 
3428 		/* Map 802.11 rate to HW rate index. */
3429 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3430 			if (iwm_rates[ridx].rate == rate)
3431 				break;
3432 		if (ridx > IWM_RIDX_MAX) {
3433 			device_printf(sc->sc_dev,
3434 			    "%s: WARNING: device rate for %d not found!\n",
3435 			    __func__, rate);
3436 		} else {
3437 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3438 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
3439 			    __func__,
3440 			    i,
3441 			    rate,
3442 			    ridx);
3443 			in->in_ridx[i] = ridx;
3444 		}
3445 	}
3446 
3447 	/* then construct a lq_cmd based on those */
3448 	memset(lq, 0, sizeof(*lq));
3449 	lq->sta_id = IWM_STATION_ID;
3450 
3451 	/*
3452 	 * are these used? (we don't do SISO or MIMO)
3453 	 * need to set them to non-zero, though, or we get an error.
3454 	 */
3455 	lq->single_stream_ant_msk = 1;
3456 	lq->dual_stream_ant_msk = 1;
3457 
3458 	/*
3459 	 * Build the actual rate selection table.
3460 	 * The lowest bits are the rates.  Additionally,
3461 	 * CCK needs bit 9 to be set.  The rest of the bits
3462 	 * we add to the table select the tx antenna
3463 	 * Note that we add the rates in the highest rate first
3464 	 * (opposite of ni_rates).
3465 	 */
3466 	/*
3467 	 * XXX TODO: this should be looping over the min of nrates
3468 	 * and LQ_MAX_RETRY_NUM.  Sigh.
3469 	 */
3470 	for (i = 0; i < nrates; i++) {
3471 		int nextant;
3472 
3473 		if (txant == 0)
3474 			txant = IWM_FW_VALID_TX_ANT(sc);
3475 		nextant = 1<<(ffs(txant)-1);
3476 		txant &= ~nextant;
3477 
3478 		/*
3479 		 * Map the rate id into a rate index into
3480 		 * our hardware table containing the
3481 		 * configuration to use for this rate.
3482 		 */
3483 		ridx = in->in_ridx[i];
3484 		tab = iwm_rates[ridx].plcp;
3485 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
3486 		if (IWM_RIDX_IS_CCK(ridx))
3487 			tab |= IWM_RATE_MCS_CCK_MSK;
3488 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3489 		    "station rate i=%d, rate=%d, hw=%x\n",
3490 		    i, iwm_rates[ridx].rate, tab);
3491 		lq->rs_table[i] = htole32(tab);
3492 	}
3493 	/* then fill the rest with the lowest possible rate */
3494 	for (i = nrates; i < nitems(lq->rs_table); i++) {
3495 		KASSERT(tab != 0, ("invalid tab"));
3496 		lq->rs_table[i] = htole32(tab);
3497 	}
3498 }
3499 
3500 static int
3501 iwm_media_change(struct ifnet *ifp)
3502 {
3503 	struct ieee80211vap *vap = ifp->if_softc;
3504 	struct ieee80211com *ic = vap->iv_ic;
3505 	struct iwm_softc *sc = ic->ic_softc;
3506 	int error;
3507 
3508 	error = ieee80211_media_change(ifp);
3509 	if (error != ENETRESET)
3510 		return error;
3511 
3512 	IWM_LOCK(sc);
3513 	if (ic->ic_nrunning > 0) {
3514 		iwm_stop(sc);
3515 		iwm_init(sc);
3516 	}
3517 	IWM_UNLOCK(sc);
3518 	return error;
3519 }
3520 
3521 
3522 static int
3523 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3524 {
3525 	struct iwm_vap *ivp = IWM_VAP(vap);
3526 	struct ieee80211com *ic = vap->iv_ic;
3527 	struct iwm_softc *sc = ic->ic_softc;
3528 	struct iwm_node *in;
3529 	int error;
3530 
3531 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3532 	    "switching state %s -> %s\n",
3533 	    ieee80211_state_name[vap->iv_state],
3534 	    ieee80211_state_name[nstate]);
3535 	IEEE80211_UNLOCK(ic);
3536 	IWM_LOCK(sc);
3537 
3538 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3539 		iwm_led_blink_stop(sc);
3540 
3541 	/* disable beacon filtering if we're hopping out of RUN */
3542 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3543 		iwm_mvm_disable_beacon_filter(sc);
3544 
3545 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3546 			in->in_assoc = 0;
3547 
3548 		iwm_release(sc, NULL);
3549 
3550 		/*
3551 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3552 		 * above then the card will be completely reinitialized,
3553 		 * so the driver must do everything necessary to bring the card
3554 		 * from INIT to SCAN.
3555 		 *
3556 		 * Additionally, upon receiving deauth frame from AP,
3557 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3558 		 * state. This will also fail with this driver, so bring the FSM
3559 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3560 		 *
3561 		 * XXX TODO: fix this for FreeBSD!
3562 		 */
3563 		if (nstate == IEEE80211_S_SCAN ||
3564 		    nstate == IEEE80211_S_AUTH ||
3565 		    nstate == IEEE80211_S_ASSOC) {
3566 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3567 			    "Force transition to INIT; MGT=%d\n", arg);
3568 			IWM_UNLOCK(sc);
3569 			IEEE80211_LOCK(ic);
3570 			vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3571 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3572 			    "Going INIT->SCAN\n");
3573 			nstate = IEEE80211_S_SCAN;
3574 			IEEE80211_UNLOCK(ic);
3575 			IWM_LOCK(sc);
3576 		}
3577 	}
3578 
3579 	switch (nstate) {
3580 	case IEEE80211_S_INIT:
3581 		sc->sc_scanband = 0;
3582 		break;
3583 
3584 	case IEEE80211_S_AUTH:
3585 		if ((error = iwm_auth(vap, sc)) != 0) {
3586 			device_printf(sc->sc_dev,
3587 			    "%s: could not move to auth state: %d\n",
3588 			    __func__, error);
3589 			break;
3590 		}
3591 		break;
3592 
3593 	case IEEE80211_S_ASSOC:
3594 		if ((error = iwm_assoc(vap, sc)) != 0) {
3595 			device_printf(sc->sc_dev,
3596 			    "%s: failed to associate: %d\n", __func__,
3597 			    error);
3598 			break;
3599 		}
3600 		break;
3601 
3602 	case IEEE80211_S_RUN:
3603 	{
3604 		struct iwm_host_cmd cmd = {
3605 			.id = IWM_LQ_CMD,
3606 			.len = { sizeof(in->in_lq), },
3607 			.flags = IWM_CMD_SYNC,
3608 		};
3609 
3610 		/* Update the association state, now we have it all */
3611 		/* (eg associd comes in at this point */
3612 		error = iwm_assoc(vap, sc);
3613 		if (error != 0) {
3614 			device_printf(sc->sc_dev,
3615 			    "%s: failed to update association state: %d\n",
3616 			    __func__,
3617 			    error);
3618 			break;
3619 		}
3620 
3621 		in = IWM_NODE(vap->iv_bss);
3622 		iwm_mvm_power_mac_update_mode(sc, in);
3623 		iwm_mvm_enable_beacon_filter(sc, in);
3624 		iwm_mvm_update_quotas(sc, in);
3625 		iwm_setrates(sc, in);
3626 
3627 		cmd.data[0] = &in->in_lq;
3628 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3629 			device_printf(sc->sc_dev,
3630 			    "%s: IWM_LQ_CMD failed\n", __func__);
3631 		}
3632 
3633 		break;
3634 	}
3635 
3636 	default:
3637 		break;
3638 	}
3639 	IWM_UNLOCK(sc);
3640 	IEEE80211_LOCK(ic);
3641 
3642 	return (ivp->iv_newstate(vap, nstate, arg));
3643 }
3644 
3645 void
3646 iwm_endscan_cb(void *arg, int pending)
3647 {
3648 	struct iwm_softc *sc = arg;
3649 	struct ieee80211com *ic = &sc->sc_ic;
3650 	int done;
3651 	int error;
3652 
3653 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3654 	    "%s: scan ended\n",
3655 	    __func__);
3656 
3657 	IWM_LOCK(sc);
3658 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3659 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
3660 		done = 0;
3661 		if ((error = iwm_mvm_scan_request(sc,
3662 		    IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3663 			device_printf(sc->sc_dev,
3664 			    "could not initiate 5 GHz scan\n");
3665 			done = 1;
3666 		}
3667 	} else {
3668 		done = 1;
3669 	}
3670 
3671 	if (done) {
3672 		IWM_UNLOCK(sc);
3673 		ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3674 		IWM_LOCK(sc);
3675 		sc->sc_scanband = 0;
3676 	}
3677 	IWM_UNLOCK(sc);
3678 }
3679 
3680 static int
3681 iwm_init_hw(struct iwm_softc *sc)
3682 {
3683 	struct ieee80211com *ic = &sc->sc_ic;
3684 	int error, i, qid;
3685 
3686 	if ((error = iwm_start_hw(sc)) != 0)
3687 		return error;
3688 
3689 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3690 		return error;
3691 	}
3692 
3693 	/*
3694 	 * should stop and start HW since that INIT
3695 	 * image just loaded
3696 	 */
3697 	iwm_stop_device(sc);
3698 	if ((error = iwm_start_hw(sc)) != 0) {
3699 		device_printf(sc->sc_dev, "could not initialize hardware\n");
3700 		return error;
3701 	}
3702 
3703 	/* omstart, this time with the regular firmware */
3704 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3705 	if (error) {
3706 		device_printf(sc->sc_dev, "could not load firmware\n");
3707 		goto error;
3708 	}
3709 
3710 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
3711 		goto error;
3712 
3713 	/* Send phy db control command and then phy db calibration*/
3714 	if ((error = iwm_send_phy_db_data(sc)) != 0)
3715 		goto error;
3716 
3717 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
3718 		goto error;
3719 
3720 	/* Add auxiliary station for scanning */
3721 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
3722 		goto error;
3723 
3724 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3725 		/*
3726 		 * The channel used here isn't relevant as it's
3727 		 * going to be overwritten in the other flows.
3728 		 * For now use the first channel we have.
3729 		 */
3730 		if ((error = iwm_mvm_phy_ctxt_add(sc,
3731 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3732 			goto error;
3733 	}
3734 
3735 	error = iwm_mvm_power_update_device(sc);
3736 	if (error)
3737 		goto error;
3738 
3739 	/* Mark TX rings as active. */
3740 	for (qid = 0; qid < 4; qid++) {
3741 		iwm_enable_txq(sc, qid, qid);
3742 	}
3743 
3744 	return 0;
3745 
3746  error:
3747 	iwm_stop_device(sc);
3748 	return error;
3749 }
3750 
3751 /* Allow multicast from our BSSID. */
3752 static int
3753 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3754 {
3755 	struct ieee80211_node *ni = vap->iv_bss;
3756 	struct iwm_mcast_filter_cmd *cmd;
3757 	size_t size;
3758 	int error;
3759 
3760 	size = roundup(sizeof(*cmd), 4);
3761 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3762 	if (cmd == NULL)
3763 		return ENOMEM;
3764 	cmd->filter_own = 1;
3765 	cmd->port_id = 0;
3766 	cmd->count = 0;
3767 	cmd->pass_all = 1;
3768 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3769 
3770 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3771 	    IWM_CMD_SYNC, size, cmd);
3772 	free(cmd, M_DEVBUF);
3773 
3774 	return (error);
3775 }
3776 
3777 static void
3778 iwm_init(struct iwm_softc *sc)
3779 {
3780 	int error;
3781 
3782 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3783 		return;
3784 	}
3785 	sc->sc_generation++;
3786 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
3787 
3788 	if ((error = iwm_init_hw(sc)) != 0) {
3789 		iwm_stop(sc);
3790 		return;
3791 	}
3792 
3793 	/*
3794  	 * Ok, firmware loaded and we are jogging
3795 	 */
3796 	sc->sc_flags |= IWM_FLAG_HW_INITED;
3797 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3798 }
3799 
3800 static int
3801 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3802 {
3803 	struct iwm_softc *sc;
3804 	int error;
3805 
3806 	sc = ic->ic_softc;
3807 
3808 	IWM_LOCK(sc);
3809 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3810 		IWM_UNLOCK(sc);
3811 		return (ENXIO);
3812 	}
3813 	error = mbufq_enqueue(&sc->sc_snd, m);
3814 	if (error) {
3815 		IWM_UNLOCK(sc);
3816 		return (error);
3817 	}
3818 	iwm_start(sc);
3819 	IWM_UNLOCK(sc);
3820 	return (0);
3821 }
3822 
3823 /*
3824  * Dequeue packets from sendq and call send.
3825  */
3826 static void
3827 iwm_start(struct iwm_softc *sc)
3828 {
3829 	struct ieee80211_node *ni;
3830 	struct mbuf *m;
3831 	int ac = 0;
3832 
3833 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3834 	while (sc->qfullmsk == 0 &&
3835 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3836 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3837 		if (iwm_tx(sc, m, ni, ac) != 0) {
3838 			if_inc_counter(ni->ni_vap->iv_ifp,
3839 			    IFCOUNTER_OERRORS, 1);
3840 			ieee80211_free_node(ni);
3841 			continue;
3842 		}
3843 		sc->sc_tx_timer = 15;
3844 	}
3845 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3846 }
3847 
3848 static void
3849 iwm_stop(struct iwm_softc *sc)
3850 {
3851 
3852 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3853 	sc->sc_flags |= IWM_FLAG_STOPPED;
3854 	sc->sc_generation++;
3855 	sc->sc_scanband = 0;
3856 	iwm_led_blink_stop(sc);
3857 	sc->sc_tx_timer = 0;
3858 	iwm_stop_device(sc);
3859 }
3860 
3861 static void
3862 iwm_watchdog(void *arg)
3863 {
3864 	struct iwm_softc *sc = arg;
3865 	struct ieee80211com *ic = &sc->sc_ic;
3866 
3867 	if (sc->sc_tx_timer > 0) {
3868 		if (--sc->sc_tx_timer == 0) {
3869 			device_printf(sc->sc_dev, "device timeout\n");
3870 #ifdef IWM_DEBUG
3871 			iwm_nic_error(sc);
3872 #endif
3873 			ieee80211_restart_all(ic);
3874 			counter_u64_add(ic->ic_oerrors, 1);
3875 			return;
3876 		}
3877 	}
3878 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3879 }
3880 
3881 static void
3882 iwm_parent(struct ieee80211com *ic)
3883 {
3884 	struct iwm_softc *sc = ic->ic_softc;
3885 	int startall = 0;
3886 
3887 	IWM_LOCK(sc);
3888 	if (ic->ic_nrunning > 0) {
3889 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3890 			iwm_init(sc);
3891 			startall = 1;
3892 		}
3893 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3894 		iwm_stop(sc);
3895 	IWM_UNLOCK(sc);
3896 	if (startall)
3897 		ieee80211_start_all(ic);
3898 }
3899 
3900 /*
3901  * The interrupt side of things
3902  */
3903 
3904 /*
3905  * error dumping routines are from iwlwifi/mvm/utils.c
3906  */
3907 
3908 /*
3909  * Note: This structure is read from the device with IO accesses,
3910  * and the reading already does the endian conversion. As it is
3911  * read with uint32_t-sized accesses, any members with a different size
3912  * need to be ordered correctly though!
3913  */
3914 struct iwm_error_event_table {
3915 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
3916 	uint32_t error_id;		/* type of error */
3917 	uint32_t pc;			/* program counter */
3918 	uint32_t blink1;		/* branch link */
3919 	uint32_t blink2;		/* branch link */
3920 	uint32_t ilink1;		/* interrupt link */
3921 	uint32_t ilink2;		/* interrupt link */
3922 	uint32_t data1;		/* error-specific data */
3923 	uint32_t data2;		/* error-specific data */
3924 	uint32_t data3;		/* error-specific data */
3925 	uint32_t bcon_time;		/* beacon timer */
3926 	uint32_t tsf_low;		/* network timestamp function timer */
3927 	uint32_t tsf_hi;		/* network timestamp function timer */
3928 	uint32_t gp1;		/* GP1 timer register */
3929 	uint32_t gp2;		/* GP2 timer register */
3930 	uint32_t gp3;		/* GP3 timer register */
3931 	uint32_t ucode_ver;		/* uCode version */
3932 	uint32_t hw_ver;		/* HW Silicon version */
3933 	uint32_t brd_ver;		/* HW board version */
3934 	uint32_t log_pc;		/* log program counter */
3935 	uint32_t frame_ptr;		/* frame pointer */
3936 	uint32_t stack_ptr;		/* stack pointer */
3937 	uint32_t hcmd;		/* last host command header */
3938 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
3939 				 * rxtx_flag */
3940 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
3941 				 * host_flag */
3942 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
3943 				 * enc_flag */
3944 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
3945 				 * time_flag */
3946 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
3947 				 * wico interrupt */
3948 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
3949 	uint32_t wait_event;		/* wait event() caller address */
3950 	uint32_t l2p_control;	/* L2pControlField */
3951 	uint32_t l2p_duration;	/* L2pDurationField */
3952 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
3953 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
3954 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
3955 				 * (LMPM_PMG_SEL) */
3956 	uint32_t u_timestamp;	/* indicate when the date and time of the
3957 				 * compilation */
3958 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
3959 } __packed;
3960 
3961 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
3962 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
3963 
3964 #ifdef IWM_DEBUG
3965 struct {
3966 	const char *name;
3967 	uint8_t num;
3968 } advanced_lookup[] = {
3969 	{ "NMI_INTERRUPT_WDG", 0x34 },
3970 	{ "SYSASSERT", 0x35 },
3971 	{ "UCODE_VERSION_MISMATCH", 0x37 },
3972 	{ "BAD_COMMAND", 0x38 },
3973 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
3974 	{ "FATAL_ERROR", 0x3D },
3975 	{ "NMI_TRM_HW_ERR", 0x46 },
3976 	{ "NMI_INTERRUPT_TRM", 0x4C },
3977 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
3978 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
3979 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
3980 	{ "NMI_INTERRUPT_HOST", 0x66 },
3981 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
3982 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
3983 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
3984 	{ "ADVANCED_SYSASSERT", 0 },
3985 };
3986 
3987 static const char *
3988 iwm_desc_lookup(uint32_t num)
3989 {
3990 	int i;
3991 
3992 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
3993 		if (advanced_lookup[i].num == num)
3994 			return advanced_lookup[i].name;
3995 
3996 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
3997 	return advanced_lookup[i].name;
3998 }
3999 
4000 /*
4001  * Support for dumping the error log seemed like a good idea ...
4002  * but it's mostly hex junk and the only sensible thing is the
4003  * hw/ucode revision (which we know anyway).  Since it's here,
4004  * I'll just leave it in, just in case e.g. the Intel guys want to
4005  * help us decipher some "ADVANCED_SYSASSERT" later.
4006  */
4007 static void
4008 iwm_nic_error(struct iwm_softc *sc)
4009 {
4010 	struct iwm_error_event_table table;
4011 	uint32_t base;
4012 
4013 	device_printf(sc->sc_dev, "dumping device error log\n");
4014 	base = sc->sc_uc.uc_error_event_table;
4015 	if (base < 0x800000 || base >= 0x80C000) {
4016 		device_printf(sc->sc_dev,
4017 		    "Not valid error log pointer 0x%08x\n", base);
4018 		return;
4019 	}
4020 
4021 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4022 		device_printf(sc->sc_dev, "reading errlog failed\n");
4023 		return;
4024 	}
4025 
4026 	if (!table.valid) {
4027 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
4028 		return;
4029 	}
4030 
4031 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4032 		device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4033 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4034 		    sc->sc_flags, table.valid);
4035 	}
4036 
4037 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4038 		iwm_desc_lookup(table.error_id));
4039 	device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4040 	device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4041 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4042 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4043 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4044 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4045 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4046 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4047 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4048 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4049 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4050 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4051 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4052 	device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4053 	device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4054 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4055 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4056 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4057 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4058 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4059 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4060 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4061 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4062 	device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4063 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4064 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4065 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4066 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4067 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4068 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4069 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4070 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4071 }
4072 #endif
4073 
4074 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
4075 do {									\
4076 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4077 	_var_ = (void *)((_pkt_)+1);					\
4078 } while (/*CONSTCOND*/0)
4079 
4080 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
4081 do {									\
4082 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4083 	_ptr_ = (void *)((_pkt_)+1);					\
4084 } while (/*CONSTCOND*/0)
4085 
4086 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4087 
4088 /*
4089  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4090  * Basic structure from if_iwn
4091  */
4092 static void
4093 iwm_notif_intr(struct iwm_softc *sc)
4094 {
4095 	uint16_t hw;
4096 
4097 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4098 	    BUS_DMASYNC_POSTREAD);
4099 
4100 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4101 	while (sc->rxq.cur != hw) {
4102 		struct iwm_rx_ring *ring = &sc->rxq;
4103 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4104 		struct iwm_rx_packet *pkt;
4105 		struct iwm_cmd_response *cresp;
4106 		int qid, idx;
4107 
4108 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4109 		    BUS_DMASYNC_POSTREAD);
4110 		pkt = mtod(data->m, struct iwm_rx_packet *);
4111 
4112 		qid = pkt->hdr.qid & ~0x80;
4113 		idx = pkt->hdr.idx;
4114 
4115 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4116 		    "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4117 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4118 		    pkt->hdr.code, sc->rxq.cur, hw);
4119 
4120 		/*
4121 		 * randomly get these from the firmware, no idea why.
4122 		 * they at least seem harmless, so just ignore them for now
4123 		 */
4124 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4125 		    || pkt->len_n_flags == htole32(0x55550000))) {
4126 			ADVANCE_RXQ(sc);
4127 			continue;
4128 		}
4129 
4130 		switch (pkt->hdr.code) {
4131 		case IWM_REPLY_RX_PHY_CMD:
4132 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4133 			break;
4134 
4135 		case IWM_REPLY_RX_MPDU_CMD:
4136 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4137 			break;
4138 
4139 		case IWM_TX_CMD:
4140 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
4141 			break;
4142 
4143 		case IWM_MISSED_BEACONS_NOTIFICATION: {
4144 			struct iwm_missed_beacons_notif *resp;
4145 			int missed;
4146 
4147 			/* XXX look at mac_id to determine interface ID */
4148 			struct ieee80211com *ic = &sc->sc_ic;
4149 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4150 
4151 			SYNC_RESP_STRUCT(resp, pkt);
4152 			missed = le32toh(resp->consec_missed_beacons);
4153 
4154 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4155 			    "%s: MISSED_BEACON: mac_id=%d, "
4156 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4157 			    "num_rx=%d\n",
4158 			    __func__,
4159 			    le32toh(resp->mac_id),
4160 			    le32toh(resp->consec_missed_beacons_since_last_rx),
4161 			    le32toh(resp->consec_missed_beacons),
4162 			    le32toh(resp->num_expected_beacons),
4163 			    le32toh(resp->num_recvd_beacons));
4164 
4165 			/* Be paranoid */
4166 			if (vap == NULL)
4167 				break;
4168 
4169 			/* XXX no net80211 locking? */
4170 			if (vap->iv_state == IEEE80211_S_RUN &&
4171 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4172 				if (missed > vap->iv_bmissthreshold) {
4173 					/* XXX bad locking; turn into task */
4174 					IWM_UNLOCK(sc);
4175 					ieee80211_beacon_miss(ic);
4176 					IWM_LOCK(sc);
4177 				}
4178 			}
4179 
4180 			break; }
4181 
4182 		case IWM_MVM_ALIVE: {
4183 			struct iwm_mvm_alive_resp *resp;
4184 			SYNC_RESP_STRUCT(resp, pkt);
4185 
4186 			sc->sc_uc.uc_error_event_table
4187 			    = le32toh(resp->error_event_table_ptr);
4188 			sc->sc_uc.uc_log_event_table
4189 			    = le32toh(resp->log_event_table_ptr);
4190 			sc->sched_base = le32toh(resp->scd_base_ptr);
4191 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4192 
4193 			sc->sc_uc.uc_intr = 1;
4194 			wakeup(&sc->sc_uc);
4195 			break; }
4196 
4197 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
4198 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
4199 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
4200 
4201 			iwm_phy_db_set_section(sc, phy_db_notif);
4202 
4203 			break; }
4204 
4205 		case IWM_STATISTICS_NOTIFICATION: {
4206 			struct iwm_notif_statistics *stats;
4207 			SYNC_RESP_STRUCT(stats, pkt);
4208 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4209 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
4210 			break; }
4211 
4212 		case IWM_NVM_ACCESS_CMD:
4213 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4214 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4215 				    BUS_DMASYNC_POSTREAD);
4216 				memcpy(sc->sc_cmd_resp,
4217 				    pkt, sizeof(sc->sc_cmd_resp));
4218 			}
4219 			break;
4220 
4221 		case IWM_PHY_CONFIGURATION_CMD:
4222 		case IWM_TX_ANT_CONFIGURATION_CMD:
4223 		case IWM_ADD_STA:
4224 		case IWM_MAC_CONTEXT_CMD:
4225 		case IWM_REPLY_SF_CFG_CMD:
4226 		case IWM_POWER_TABLE_CMD:
4227 		case IWM_PHY_CONTEXT_CMD:
4228 		case IWM_BINDING_CONTEXT_CMD:
4229 		case IWM_TIME_EVENT_CMD:
4230 		case IWM_SCAN_REQUEST_CMD:
4231 		case IWM_REPLY_BEACON_FILTERING_CMD:
4232 		case IWM_MAC_PM_POWER_TABLE:
4233 		case IWM_TIME_QUOTA_CMD:
4234 		case IWM_REMOVE_STA:
4235 		case IWM_TXPATH_FLUSH:
4236 		case IWM_LQ_CMD:
4237 			SYNC_RESP_STRUCT(cresp, pkt);
4238 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4239 				memcpy(sc->sc_cmd_resp,
4240 				    pkt, sizeof(*pkt)+sizeof(*cresp));
4241 			}
4242 			break;
4243 
4244 		/* ignore */
4245 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4246 			break;
4247 
4248 		case IWM_INIT_COMPLETE_NOTIF:
4249 			sc->sc_init_complete = 1;
4250 			wakeup(&sc->sc_init_complete);
4251 			break;
4252 
4253 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
4254 			struct iwm_scan_complete_notif *notif;
4255 			SYNC_RESP_STRUCT(notif, pkt);
4256 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4257 			break; }
4258 
4259 		case IWM_REPLY_ERROR: {
4260 			struct iwm_error_resp *resp;
4261 			SYNC_RESP_STRUCT(resp, pkt);
4262 
4263 			device_printf(sc->sc_dev,
4264 			    "firmware error 0x%x, cmd 0x%x\n",
4265 			    le32toh(resp->error_type),
4266 			    resp->cmd_id);
4267 			break; }
4268 
4269 		case IWM_TIME_EVENT_NOTIFICATION: {
4270 			struct iwm_time_event_notif *notif;
4271 			SYNC_RESP_STRUCT(notif, pkt);
4272 
4273 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4274 			    "TE notif status = 0x%x action = 0x%x\n",
4275 			        notif->status, notif->action);
4276 			break; }
4277 
4278 		case IWM_MCAST_FILTER_CMD:
4279 			break;
4280 
4281 		default:
4282 			device_printf(sc->sc_dev,
4283 			    "frame %d/%d %x UNHANDLED (this should "
4284 			    "not happen)\n", qid, idx,
4285 			    pkt->len_n_flags);
4286 			break;
4287 		}
4288 
4289 		/*
4290 		 * Why test bit 0x80?  The Linux driver:
4291 		 *
4292 		 * There is one exception:  uCode sets bit 15 when it
4293 		 * originates the response/notification, i.e. when the
4294 		 * response/notification is not a direct response to a
4295 		 * command sent by the driver.  For example, uCode issues
4296 		 * IWM_REPLY_RX when it sends a received frame to the driver;
4297 		 * it is not a direct response to any driver command.
4298 		 *
4299 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
4300 		 * uses a slightly different format for pkt->hdr, and "qid"
4301 		 * is actually the upper byte of a two-byte field.
4302 		 */
4303 		if (!(pkt->hdr.qid & (1 << 7))) {
4304 			iwm_cmd_done(sc, pkt);
4305 		}
4306 
4307 		ADVANCE_RXQ(sc);
4308 	}
4309 
4310 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4311 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4312 
4313 	/*
4314 	 * Tell the firmware what we have processed.
4315 	 * Seems like the hardware gets upset unless we align
4316 	 * the write by 8??
4317 	 */
4318 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4319 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4320 }
4321 
4322 static void
4323 iwm_intr(void *arg)
4324 {
4325 	struct iwm_softc *sc = arg;
4326 	int handled = 0;
4327 	int r1, r2, rv = 0;
4328 	int isperiodic = 0;
4329 
4330 	IWM_LOCK(sc);
4331 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4332 
4333 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4334 		uint32_t *ict = sc->ict_dma.vaddr;
4335 		int tmp;
4336 
4337 		tmp = htole32(ict[sc->ict_cur]);
4338 		if (!tmp)
4339 			goto out_ena;
4340 
4341 		/*
4342 		 * ok, there was something.  keep plowing until we have all.
4343 		 */
4344 		r1 = r2 = 0;
4345 		while (tmp) {
4346 			r1 |= tmp;
4347 			ict[sc->ict_cur] = 0;
4348 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4349 			tmp = htole32(ict[sc->ict_cur]);
4350 		}
4351 
4352 		/* this is where the fun begins.  don't ask */
4353 		if (r1 == 0xffffffff)
4354 			r1 = 0;
4355 
4356 		/* i am not expected to understand this */
4357 		if (r1 & 0xc0000)
4358 			r1 |= 0x8000;
4359 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4360 	} else {
4361 		r1 = IWM_READ(sc, IWM_CSR_INT);
4362 		/* "hardware gone" (where, fishing?) */
4363 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4364 			goto out;
4365 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4366 	}
4367 	if (r1 == 0 && r2 == 0) {
4368 		goto out_ena;
4369 	}
4370 
4371 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4372 
4373 	/* ignored */
4374 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4375 
4376 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4377 		int i;
4378 		struct ieee80211com *ic = &sc->sc_ic;
4379 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4380 
4381 #ifdef IWM_DEBUG
4382 		iwm_nic_error(sc);
4383 #endif
4384 		/* Dump driver status (TX and RX rings) while we're here. */
4385 		device_printf(sc->sc_dev, "driver status:\n");
4386 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4387 			struct iwm_tx_ring *ring = &sc->txq[i];
4388 			device_printf(sc->sc_dev,
4389 			    "  tx ring %2d: qid=%-2d cur=%-3d "
4390 			    "queued=%-3d\n",
4391 			    i, ring->qid, ring->cur, ring->queued);
4392 		}
4393 		device_printf(sc->sc_dev,
4394 		    "  rx ring: cur=%d\n", sc->rxq.cur);
4395 		device_printf(sc->sc_dev,
4396 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4397 
4398 		/* Don't stop the device; just do a VAP restart */
4399 		IWM_UNLOCK(sc);
4400 
4401 		if (vap == NULL) {
4402 			printf("%s: null vap\n", __func__);
4403 			return;
4404 		}
4405 
4406 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4407 		    "restarting\n", __func__, vap->iv_state);
4408 
4409 		/* XXX TODO: turn this into a callout/taskqueue */
4410 		ieee80211_restart_all(ic);
4411 		return;
4412 	}
4413 
4414 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4415 		handled |= IWM_CSR_INT_BIT_HW_ERR;
4416 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
4417 		iwm_stop(sc);
4418 		rv = 1;
4419 		goto out;
4420 	}
4421 
4422 	/* firmware chunk loaded */
4423 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4424 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4425 		handled |= IWM_CSR_INT_BIT_FH_TX;
4426 		sc->sc_fw_chunk_done = 1;
4427 		wakeup(&sc->sc_fw);
4428 	}
4429 
4430 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4431 		handled |= IWM_CSR_INT_BIT_RF_KILL;
4432 		if (iwm_check_rfkill(sc)) {
4433 			device_printf(sc->sc_dev,
4434 			    "%s: rfkill switch, disabling interface\n",
4435 			    __func__);
4436 			iwm_stop(sc);
4437 		}
4438 	}
4439 
4440 	/*
4441 	 * The Linux driver uses periodic interrupts to avoid races.
4442 	 * We cargo-cult like it's going out of fashion.
4443 	 */
4444 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4445 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4446 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4447 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4448 			IWM_WRITE_1(sc,
4449 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4450 		isperiodic = 1;
4451 	}
4452 
4453 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4454 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4455 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4456 
4457 		iwm_notif_intr(sc);
4458 
4459 		/* enable periodic interrupt, see above */
4460 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4461 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4462 			    IWM_CSR_INT_PERIODIC_ENA);
4463 	}
4464 
4465 	if (__predict_false(r1 & ~handled))
4466 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4467 		    "%s: unhandled interrupts: %x\n", __func__, r1);
4468 	rv = 1;
4469 
4470  out_ena:
4471 	iwm_restore_interrupts(sc);
4472  out:
4473 	IWM_UNLOCK(sc);
4474 	return;
4475 }
4476 
4477 /*
4478  * Autoconf glue-sniffing
4479  */
4480 #define	PCI_VENDOR_INTEL		0x8086
4481 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
4482 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
4483 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
4484 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
4485 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
4486 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
4487 
4488 static const struct iwm_devices {
4489 	uint16_t	device;
4490 	const char	*name;
4491 } iwm_devices[] = {
4492 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4493 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4494 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4495 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4496 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4497 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4498 };
4499 
4500 static int
4501 iwm_probe(device_t dev)
4502 {
4503 	int i;
4504 
4505 	for (i = 0; i < nitems(iwm_devices); i++)
4506 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4507 		    pci_get_device(dev) == iwm_devices[i].device) {
4508 			device_set_desc(dev, iwm_devices[i].name);
4509 			return (BUS_PROBE_DEFAULT);
4510 		}
4511 
4512 	return (ENXIO);
4513 }
4514 
4515 static int
4516 iwm_dev_check(device_t dev)
4517 {
4518 	struct iwm_softc *sc;
4519 
4520 	sc = device_get_softc(dev);
4521 
4522 	switch (pci_get_device(dev)) {
4523 	case PCI_PRODUCT_INTEL_WL_3160_1:
4524 	case PCI_PRODUCT_INTEL_WL_3160_2:
4525 		sc->sc_fwname = "iwm3160fw";
4526 		sc->host_interrupt_operation_mode = 1;
4527 		return (0);
4528 	case PCI_PRODUCT_INTEL_WL_7260_1:
4529 	case PCI_PRODUCT_INTEL_WL_7260_2:
4530 		sc->sc_fwname = "iwm7260fw";
4531 		sc->host_interrupt_operation_mode = 1;
4532 		return (0);
4533 	case PCI_PRODUCT_INTEL_WL_7265_1:
4534 	case PCI_PRODUCT_INTEL_WL_7265_2:
4535 		sc->sc_fwname = "iwm7265fw";
4536 		sc->host_interrupt_operation_mode = 0;
4537 		return (0);
4538 	default:
4539 		device_printf(dev, "unknown adapter type\n");
4540 		return ENXIO;
4541 	}
4542 }
4543 
4544 static int
4545 iwm_pci_attach(device_t dev)
4546 {
4547 	struct iwm_softc *sc;
4548 	int count, error, rid;
4549 	uint16_t reg;
4550 
4551 	sc = device_get_softc(dev);
4552 
4553 	/* Clear device-specific "PCI retry timeout" register (41h). */
4554 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4555 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4556 
4557 	/* Enable bus-mastering and hardware bug workaround. */
4558 	pci_enable_busmaster(dev);
4559 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4560 	/* if !MSI */
4561 	if (reg & PCIM_STATUS_INTxSTATE) {
4562 		reg &= ~PCIM_STATUS_INTxSTATE;
4563 	}
4564 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4565 
4566 	rid = PCIR_BAR(0);
4567 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4568 	    RF_ACTIVE);
4569 	if (sc->sc_mem == NULL) {
4570 		device_printf(sc->sc_dev, "can't map mem space\n");
4571 		return (ENXIO);
4572 	}
4573 	sc->sc_st = rman_get_bustag(sc->sc_mem);
4574 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4575 
4576 	/* Install interrupt handler. */
4577 	count = 1;
4578 	rid = 0;
4579 	if (pci_alloc_msi(dev, &count) == 0)
4580 		rid = 1;
4581 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4582 	    (rid != 0 ? 0 : RF_SHAREABLE));
4583 	if (sc->sc_irq == NULL) {
4584 		device_printf(dev, "can't map interrupt\n");
4585 			return (ENXIO);
4586 	}
4587 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4588 	    NULL, iwm_intr, sc, &sc->sc_ih);
4589 	if (sc->sc_ih == NULL) {
4590 		device_printf(dev, "can't establish interrupt");
4591 			return (ENXIO);
4592 	}
4593 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4594 
4595 	return (0);
4596 }
4597 
4598 static void
4599 iwm_pci_detach(device_t dev)
4600 {
4601 	struct iwm_softc *sc = device_get_softc(dev);
4602 
4603 	if (sc->sc_irq != NULL) {
4604 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4605 		bus_release_resource(dev, SYS_RES_IRQ,
4606 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
4607 		pci_release_msi(dev);
4608         }
4609 	if (sc->sc_mem != NULL)
4610 		bus_release_resource(dev, SYS_RES_MEMORY,
4611 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
4612 }
4613 
4614 
4615 
4616 static int
4617 iwm_attach(device_t dev)
4618 {
4619 	struct iwm_softc *sc = device_get_softc(dev);
4620 	struct ieee80211com *ic = &sc->sc_ic;
4621 	int error;
4622 	int txq_i, i;
4623 
4624 	sc->sc_dev = dev;
4625 	IWM_LOCK_INIT(sc);
4626 	mbufq_init(&sc->sc_snd, ifqmaxlen);
4627 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4628 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
4629 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4630 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4631             taskqueue_thread_enqueue, &sc->sc_tq);
4632         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4633         if (error != 0) {
4634                 device_printf(dev, "can't start threads, error %d\n",
4635 		    error);
4636 		goto fail;
4637         }
4638 
4639 	/* PCI attach */
4640 	error = iwm_pci_attach(dev);
4641 	if (error != 0)
4642 		goto fail;
4643 
4644 	sc->sc_wantresp = -1;
4645 
4646 	/* Check device type */
4647 	error = iwm_dev_check(dev);
4648 	if (error != 0)
4649 		goto fail;
4650 
4651 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4652 
4653 	/*
4654 	 * We now start fiddling with the hardware
4655 	 */
4656 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4657 	if (iwm_prepare_card_hw(sc) != 0) {
4658 		device_printf(dev, "could not initialize hardware\n");
4659 		goto fail;
4660 	}
4661 
4662 	/* Allocate DMA memory for firmware transfers. */
4663 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
4664 		device_printf(dev, "could not allocate memory for firmware\n");
4665 		goto fail;
4666 	}
4667 
4668 	/* Allocate "Keep Warm" page. */
4669 	if ((error = iwm_alloc_kw(sc)) != 0) {
4670 		device_printf(dev, "could not allocate keep warm page\n");
4671 		goto fail;
4672 	}
4673 
4674 	/* We use ICT interrupts */
4675 	if ((error = iwm_alloc_ict(sc)) != 0) {
4676 		device_printf(dev, "could not allocate ICT table\n");
4677 		goto fail;
4678 	}
4679 
4680 	/* Allocate TX scheduler "rings". */
4681 	if ((error = iwm_alloc_sched(sc)) != 0) {
4682 		device_printf(dev, "could not allocate TX scheduler rings\n");
4683 		goto fail;
4684 	}
4685 
4686 	/* Allocate TX rings */
4687 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4688 		if ((error = iwm_alloc_tx_ring(sc,
4689 		    &sc->txq[txq_i], txq_i)) != 0) {
4690 			device_printf(dev,
4691 			    "could not allocate TX ring %d\n",
4692 			    txq_i);
4693 			goto fail;
4694 		}
4695 	}
4696 
4697 	/* Allocate RX ring. */
4698 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4699 		device_printf(dev, "could not allocate RX ring\n");
4700 		goto fail;
4701 	}
4702 
4703 	/* Clear pending interrupts. */
4704 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4705 
4706 	ic->ic_softc = sc;
4707 	ic->ic_name = device_get_nameunit(sc->sc_dev);
4708 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
4709 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
4710 
4711 	/* Set device capabilities. */
4712 	ic->ic_caps =
4713 	    IEEE80211_C_STA |
4714 	    IEEE80211_C_WPA |		/* WPA/RSN */
4715 	    IEEE80211_C_WME |
4716 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
4717 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
4718 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
4719 	    ;
4720 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4721 		sc->sc_phyctxt[i].id = i;
4722 		sc->sc_phyctxt[i].color = 0;
4723 		sc->sc_phyctxt[i].ref = 0;
4724 		sc->sc_phyctxt[i].channel = NULL;
4725 	}
4726 
4727 	/* Max RSSI */
4728 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4729 	sc->sc_preinit_hook.ich_func = iwm_preinit;
4730 	sc->sc_preinit_hook.ich_arg = sc;
4731 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4732 		device_printf(dev, "config_intrhook_establish failed\n");
4733 		goto fail;
4734 	}
4735 
4736 #ifdef IWM_DEBUG
4737 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4738 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4739 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4740 #endif
4741 
4742 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4743 	    "<-%s\n", __func__);
4744 
4745 	return 0;
4746 
4747 	/* Free allocated memory if something failed during attachment. */
4748 fail:
4749 	iwm_detach_local(sc, 0);
4750 
4751 	return ENXIO;
4752 }
4753 
4754 static int
4755 iwm_update_edca(struct ieee80211com *ic)
4756 {
4757 	struct iwm_softc *sc = ic->ic_softc;
4758 
4759 	device_printf(sc->sc_dev, "%s: called\n", __func__);
4760 	return (0);
4761 }
4762 
4763 static void
4764 iwm_preinit(void *arg)
4765 {
4766 	struct iwm_softc *sc = arg;
4767 	device_t dev = sc->sc_dev;
4768 	struct ieee80211com *ic = &sc->sc_ic;
4769 	int error;
4770 
4771 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4772 	    "->%s\n", __func__);
4773 
4774 	IWM_LOCK(sc);
4775 	if ((error = iwm_start_hw(sc)) != 0) {
4776 		device_printf(dev, "could not initialize hardware\n");
4777 		IWM_UNLOCK(sc);
4778 		goto fail;
4779 	}
4780 
4781 	error = iwm_run_init_mvm_ucode(sc, 1);
4782 	iwm_stop_device(sc);
4783 	if (error) {
4784 		IWM_UNLOCK(sc);
4785 		goto fail;
4786 	}
4787 	device_printf(dev,
4788 	    "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
4789 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4790 	    IWM_UCODE_MAJOR(sc->sc_fwver),
4791 	    IWM_UCODE_MINOR(sc->sc_fwver),
4792 	    IWM_UCODE_API(sc->sc_fwver));
4793 
4794 	/* not all hardware can do 5GHz band */
4795 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4796 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4797 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4798 	IWM_UNLOCK(sc);
4799 
4800 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4801 	    ic->ic_channels);
4802 
4803 	/*
4804 	 * At this point we've committed - if we fail to do setup,
4805 	 * we now also have to tear down the net80211 state.
4806 	 */
4807 	ieee80211_ifattach(ic);
4808 	ic->ic_vap_create = iwm_vap_create;
4809 	ic->ic_vap_delete = iwm_vap_delete;
4810 	ic->ic_raw_xmit = iwm_raw_xmit;
4811 	ic->ic_node_alloc = iwm_node_alloc;
4812 	ic->ic_scan_start = iwm_scan_start;
4813 	ic->ic_scan_end = iwm_scan_end;
4814 	ic->ic_update_mcast = iwm_update_mcast;
4815 	ic->ic_getradiocaps = iwm_init_channel_map;
4816 	ic->ic_set_channel = iwm_set_channel;
4817 	ic->ic_scan_curchan = iwm_scan_curchan;
4818 	ic->ic_scan_mindwell = iwm_scan_mindwell;
4819 	ic->ic_wme.wme_update = iwm_update_edca;
4820 	ic->ic_parent = iwm_parent;
4821 	ic->ic_transmit = iwm_transmit;
4822 	iwm_radiotap_attach(sc);
4823 	if (bootverbose)
4824 		ieee80211_announce(ic);
4825 
4826 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4827 	    "<-%s\n", __func__);
4828 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4829 
4830 	return;
4831 fail:
4832 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4833 	iwm_detach_local(sc, 0);
4834 }
4835 
4836 /*
4837  * Attach the interface to 802.11 radiotap.
4838  */
4839 static void
4840 iwm_radiotap_attach(struct iwm_softc *sc)
4841 {
4842         struct ieee80211com *ic = &sc->sc_ic;
4843 
4844 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4845 	    "->%s begin\n", __func__);
4846         ieee80211_radiotap_attach(ic,
4847             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4848                 IWM_TX_RADIOTAP_PRESENT,
4849             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4850                 IWM_RX_RADIOTAP_PRESENT);
4851 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4852 	    "->%s end\n", __func__);
4853 }
4854 
4855 static struct ieee80211vap *
4856 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4857     enum ieee80211_opmode opmode, int flags,
4858     const uint8_t bssid[IEEE80211_ADDR_LEN],
4859     const uint8_t mac[IEEE80211_ADDR_LEN])
4860 {
4861 	struct iwm_vap *ivp;
4862 	struct ieee80211vap *vap;
4863 
4864 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
4865 		return NULL;
4866 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
4867 	vap = &ivp->iv_vap;
4868 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
4869 	vap->iv_bmissthreshold = 10;            /* override default */
4870 	/* Override with driver methods. */
4871 	ivp->iv_newstate = vap->iv_newstate;
4872 	vap->iv_newstate = iwm_newstate;
4873 
4874 	ieee80211_ratectl_init(vap);
4875 	/* Complete setup. */
4876 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
4877 	    mac);
4878 	ic->ic_opmode = opmode;
4879 
4880 	return vap;
4881 }
4882 
4883 static void
4884 iwm_vap_delete(struct ieee80211vap *vap)
4885 {
4886 	struct iwm_vap *ivp = IWM_VAP(vap);
4887 
4888 	ieee80211_ratectl_deinit(vap);
4889 	ieee80211_vap_detach(vap);
4890 	free(ivp, M_80211_VAP);
4891 }
4892 
4893 static void
4894 iwm_scan_start(struct ieee80211com *ic)
4895 {
4896 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4897         struct iwm_softc *sc = ic->ic_softc;
4898 	int error;
4899 
4900 	if (sc->sc_scanband)
4901 		return;
4902 	IWM_LOCK(sc);
4903 	error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
4904 	if (error) {
4905 		device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
4906 		IWM_UNLOCK(sc);
4907 		ieee80211_cancel_scan(vap);
4908 		sc->sc_scanband = 0;
4909 	} else {
4910 		iwm_led_blink_start(sc);
4911 		IWM_UNLOCK(sc);
4912 	}
4913 }
4914 
4915 static void
4916 iwm_scan_end(struct ieee80211com *ic)
4917 {
4918 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4919 	struct iwm_softc *sc = ic->ic_softc;
4920 
4921 	IWM_LOCK(sc);
4922 	iwm_led_blink_stop(sc);
4923 	if (vap->iv_state == IEEE80211_S_RUN)
4924 		iwm_mvm_led_enable(sc);
4925 	IWM_UNLOCK(sc);
4926 }
4927 
4928 static void
4929 iwm_update_mcast(struct ieee80211com *ic)
4930 {
4931 }
4932 
4933 static void
4934 iwm_set_channel(struct ieee80211com *ic)
4935 {
4936 }
4937 
4938 static void
4939 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
4940 {
4941 }
4942 
4943 static void
4944 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
4945 {
4946 	return;
4947 }
4948 
4949 void
4950 iwm_init_task(void *arg1)
4951 {
4952 	struct iwm_softc *sc = arg1;
4953 
4954 	IWM_LOCK(sc);
4955 	while (sc->sc_flags & IWM_FLAG_BUSY)
4956 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
4957 	sc->sc_flags |= IWM_FLAG_BUSY;
4958 	iwm_stop(sc);
4959 	if (sc->sc_ic.ic_nrunning > 0)
4960 		iwm_init(sc);
4961 	sc->sc_flags &= ~IWM_FLAG_BUSY;
4962 	wakeup(&sc->sc_flags);
4963 	IWM_UNLOCK(sc);
4964 }
4965 
4966 static int
4967 iwm_resume(device_t dev)
4968 {
4969 	struct iwm_softc *sc = device_get_softc(dev);
4970 	int do_reinit = 0;
4971 	uint16_t reg;
4972 
4973 	/* Clear device-specific "PCI retry timeout" register (41h). */
4974 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4975 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4976 	iwm_init_task(device_get_softc(dev));
4977 
4978 	IWM_LOCK(sc);
4979 	if (sc->sc_flags & IWM_FLAG_DORESUME) {
4980 		sc->sc_flags &= ~IWM_FLAG_DORESUME;
4981 		do_reinit = 1;
4982 	}
4983 	IWM_UNLOCK(sc);
4984 
4985 	if (do_reinit)
4986 		ieee80211_resume_all(&sc->sc_ic);
4987 
4988 	return 0;
4989 }
4990 
4991 static int
4992 iwm_suspend(device_t dev)
4993 {
4994 	int do_stop = 0;
4995 	struct iwm_softc *sc = device_get_softc(dev);
4996 
4997 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
4998 
4999 	ieee80211_suspend_all(&sc->sc_ic);
5000 
5001 	if (do_stop) {
5002 		IWM_LOCK(sc);
5003 		iwm_stop(sc);
5004 		sc->sc_flags |= IWM_FLAG_DORESUME;
5005 		IWM_UNLOCK(sc);
5006 	}
5007 
5008 	return (0);
5009 }
5010 
5011 static int
5012 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5013 {
5014 	struct iwm_fw_info *fw = &sc->sc_fw;
5015 	device_t dev = sc->sc_dev;
5016 	int i;
5017 
5018 	if (sc->sc_tq) {
5019 		taskqueue_drain_all(sc->sc_tq);
5020 		taskqueue_free(sc->sc_tq);
5021 	}
5022 	callout_drain(&sc->sc_led_blink_to);
5023 	callout_drain(&sc->sc_watchdog_to);
5024 	iwm_stop_device(sc);
5025 	if (do_net80211)
5026 		ieee80211_ifdetach(&sc->sc_ic);
5027 
5028 	iwm_phy_db_free(sc);
5029 
5030 	/* Free descriptor rings */
5031 	iwm_free_rx_ring(sc, &sc->rxq);
5032 	for (i = 0; i < nitems(sc->txq); i++)
5033 		iwm_free_tx_ring(sc, &sc->txq[i]);
5034 
5035 	/* Free firmware */
5036 	if (fw->fw_fp != NULL)
5037 		iwm_fw_info_free(fw);
5038 
5039 	/* Free scheduler */
5040 	iwm_free_sched(sc);
5041 	if (sc->ict_dma.vaddr != NULL)
5042 		iwm_free_ict(sc);
5043 	if (sc->kw_dma.vaddr != NULL)
5044 		iwm_free_kw(sc);
5045 	if (sc->fw_dma.vaddr != NULL)
5046 		iwm_free_fwmem(sc);
5047 
5048 	/* Finished with the hardware - detach things */
5049 	iwm_pci_detach(dev);
5050 
5051 	mbufq_drain(&sc->sc_snd);
5052 	IWM_LOCK_DESTROY(sc);
5053 
5054 	return (0);
5055 }
5056 
5057 static int
5058 iwm_detach(device_t dev)
5059 {
5060 	struct iwm_softc *sc = device_get_softc(dev);
5061 
5062 	return (iwm_detach_local(sc, 1));
5063 }
5064 
5065 static device_method_t iwm_pci_methods[] = {
5066         /* Device interface */
5067         DEVMETHOD(device_probe,         iwm_probe),
5068         DEVMETHOD(device_attach,        iwm_attach),
5069         DEVMETHOD(device_detach,        iwm_detach),
5070         DEVMETHOD(device_suspend,       iwm_suspend),
5071         DEVMETHOD(device_resume,        iwm_resume),
5072 
5073         DEVMETHOD_END
5074 };
5075 
5076 static driver_t iwm_pci_driver = {
5077         "iwm",
5078         iwm_pci_methods,
5079         sizeof (struct iwm_softc)
5080 };
5081 
5082 static devclass_t iwm_devclass;
5083 
5084 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5085 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5086 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5087 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
5088