xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 0183e0151669735d62584fbba9125ed90716af5e)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171 
172 const uint8_t iwm_nvm_channels[] = {
173 	/* 2.4 GHz */
174 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
175 	/* 5 GHz */
176 	36, 40, 44, 48, 52, 56, 60, 64,
177 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
178 	149, 153, 157, 161, 165
179 };
180 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
181     "IWM_NUM_CHANNELS is too small");
182 
183 const uint8_t iwm_nvm_channels_8000[] = {
184 	/* 2.4 GHz */
185 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
186 	/* 5 GHz */
187 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
188 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
189 	149, 153, 157, 161, 165, 169, 173, 177, 181
190 };
191 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
192     "IWM_NUM_CHANNELS_8000 is too small");
193 
194 #define IWM_NUM_2GHZ_CHANNELS	14
195 #define IWM_N_HW_ADDR_MASK	0xF
196 
197 /*
198  * XXX For now, there's simply a fixed set of rate table entries
199  * that are populated.
200  */
201 const struct iwm_rate {
202 	uint8_t rate;
203 	uint8_t plcp;
204 } iwm_rates[] = {
205 	{   2,	IWM_RATE_1M_PLCP  },
206 	{   4,	IWM_RATE_2M_PLCP  },
207 	{  11,	IWM_RATE_5M_PLCP  },
208 	{  22,	IWM_RATE_11M_PLCP },
209 	{  12,	IWM_RATE_6M_PLCP  },
210 	{  18,	IWM_RATE_9M_PLCP  },
211 	{  24,	IWM_RATE_12M_PLCP },
212 	{  36,	IWM_RATE_18M_PLCP },
213 	{  48,	IWM_RATE_24M_PLCP },
214 	{  72,	IWM_RATE_36M_PLCP },
215 	{  96,	IWM_RATE_48M_PLCP },
216 	{ 108,	IWM_RATE_54M_PLCP },
217 };
218 #define IWM_RIDX_CCK	0
219 #define IWM_RIDX_OFDM	4
220 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
221 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
222 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
223 
224 struct iwm_nvm_section {
225 	uint16_t length;
226 	uint8_t *data;
227 };
228 
229 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
230 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
231 
232 struct iwm_mvm_alive_data {
233 	int valid;
234 	uint32_t scd_base_addr;
235 };
236 
237 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
238 static int	iwm_firmware_store_section(struct iwm_softc *,
239                                            enum iwm_ucode_type,
240                                            const uint8_t *, size_t);
241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
242 static void	iwm_fw_info_free(struct iwm_fw_info *);
243 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
244 static int	iwm_alloc_fwmem(struct iwm_softc *);
245 static int	iwm_alloc_sched(struct iwm_softc *);
246 static int	iwm_alloc_kw(struct iwm_softc *);
247 static int	iwm_alloc_ict(struct iwm_softc *);
248 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
250 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
251 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
252                                   int);
253 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
255 static void	iwm_enable_interrupts(struct iwm_softc *);
256 static void	iwm_restore_interrupts(struct iwm_softc *);
257 static void	iwm_disable_interrupts(struct iwm_softc *);
258 static void	iwm_ict_reset(struct iwm_softc *);
259 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
260 static void	iwm_stop_device(struct iwm_softc *);
261 static void	iwm_mvm_nic_config(struct iwm_softc *);
262 static int	iwm_nic_rx_init(struct iwm_softc *);
263 static int	iwm_nic_tx_init(struct iwm_softc *);
264 static int	iwm_nic_init(struct iwm_softc *);
265 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
266 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
267 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
268                                    uint16_t, uint8_t *, uint16_t *);
269 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
270 				     uint16_t *, uint32_t);
271 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
272 static void	iwm_add_channel_band(struct iwm_softc *,
273 		    struct ieee80211_channel[], int, int *, int, size_t,
274 		    const uint8_t[]);
275 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
276 		    struct ieee80211_channel[]);
277 static struct iwm_nvm_data *
278 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
279 			   const uint16_t *, const uint16_t *,
280 			   const uint16_t *, const uint16_t *,
281 			   const uint16_t *);
282 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
283 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
284 					       struct iwm_nvm_data *,
285 					       const uint16_t *,
286 					       const uint16_t *);
287 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
288 			    const uint16_t *);
289 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
290 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
291 				  const uint16_t *);
292 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
293 				   const uint16_t *);
294 static void	iwm_set_radio_cfg(const struct iwm_softc *,
295 				  struct iwm_nvm_data *, uint32_t);
296 static struct iwm_nvm_data *
297 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
298 static int	iwm_nvm_init(struct iwm_softc *);
299 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
300 				      const struct iwm_fw_desc *);
301 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
302 					     bus_addr_t, uint32_t);
303 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
304 						const struct iwm_fw_sects *,
305 						int, int *);
306 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
307 					   const struct iwm_fw_sects *,
308 					   int, int *);
309 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
310 					       const struct iwm_fw_sects *);
311 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
312 					  const struct iwm_fw_sects *);
313 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
314 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
315 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
316 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
317                                               enum iwm_ucode_type);
318 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
319 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
320 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
321 					    struct iwm_rx_phy_info *);
322 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
323                                       struct iwm_rx_packet *,
324                                       struct iwm_rx_data *);
325 static int	iwm_get_noise(struct iwm_softc *sc,
326 		    const struct iwm_mvm_statistics_rx_non_phy *);
327 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
328 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
329                                          struct iwm_rx_packet *,
330 				         struct iwm_node *);
331 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
332                                   struct iwm_rx_data *);
333 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
334 #if 0
335 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
336                                  uint16_t);
337 #endif
338 static const struct iwm_rate *
339 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
340 			struct mbuf *, struct iwm_tx_cmd *);
341 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
342                        struct ieee80211_node *, int);
343 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
344 			     const struct ieee80211_bpf_params *);
345 static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
346 				      uint32_t tfd_msk, uint32_t flags);
347 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
348 					        struct iwm_mvm_add_sta_cmd *,
349                                                 int *);
350 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
351                                        int);
352 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
353 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
354 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
355                                            struct iwm_int_sta *,
356 				           const uint8_t *, uint16_t, uint16_t);
357 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
358 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
359 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
360 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
361 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
362 static struct ieee80211_node *
363 		iwm_node_alloc(struct ieee80211vap *,
364 		               const uint8_t[IEEE80211_ADDR_LEN]);
365 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
366 static int	iwm_media_change(struct ifnet *);
367 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
368 static void	iwm_endscan_cb(void *, int);
369 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
370 					struct iwm_sf_cfg_cmd *,
371 					struct ieee80211_node *);
372 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
373 static int	iwm_send_bt_init_conf(struct iwm_softc *);
374 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
375 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
376 static int	iwm_init_hw(struct iwm_softc *);
377 static void	iwm_init(struct iwm_softc *);
378 static void	iwm_start(struct iwm_softc *);
379 static void	iwm_stop(struct iwm_softc *);
380 static void	iwm_watchdog(void *);
381 static void	iwm_parent(struct ieee80211com *);
382 #ifdef IWM_DEBUG
383 static const char *
384 		iwm_desc_lookup(uint32_t);
385 static void	iwm_nic_error(struct iwm_softc *);
386 static void	iwm_nic_umac_error(struct iwm_softc *);
387 #endif
388 static void	iwm_notif_intr(struct iwm_softc *);
389 static void	iwm_intr(void *);
390 static int	iwm_attach(device_t);
391 static int	iwm_is_valid_ether_addr(uint8_t *);
392 static void	iwm_preinit(void *);
393 static int	iwm_detach_local(struct iwm_softc *sc, int);
394 static void	iwm_init_task(void *);
395 static void	iwm_radiotap_attach(struct iwm_softc *);
396 static struct ieee80211vap *
397 		iwm_vap_create(struct ieee80211com *,
398 		               const char [IFNAMSIZ], int,
399 		               enum ieee80211_opmode, int,
400 		               const uint8_t [IEEE80211_ADDR_LEN],
401 		               const uint8_t [IEEE80211_ADDR_LEN]);
402 static void	iwm_vap_delete(struct ieee80211vap *);
403 static void	iwm_scan_start(struct ieee80211com *);
404 static void	iwm_scan_end(struct ieee80211com *);
405 static void	iwm_update_mcast(struct ieee80211com *);
406 static void	iwm_set_channel(struct ieee80211com *);
407 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
408 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
409 static int	iwm_detach(device_t);
410 
411 /*
412  * Firmware parser.
413  */
414 
415 static int
416 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
417 {
418 	const struct iwm_fw_cscheme_list *l = (const void *)data;
419 
420 	if (dlen < sizeof(*l) ||
421 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
422 		return EINVAL;
423 
424 	/* we don't actually store anything for now, always use s/w crypto */
425 
426 	return 0;
427 }
428 
429 static int
430 iwm_firmware_store_section(struct iwm_softc *sc,
431     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
432 {
433 	struct iwm_fw_sects *fws;
434 	struct iwm_fw_desc *fwone;
435 
436 	if (type >= IWM_UCODE_TYPE_MAX)
437 		return EINVAL;
438 	if (dlen < sizeof(uint32_t))
439 		return EINVAL;
440 
441 	fws = &sc->sc_fw.fw_sects[type];
442 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
443 		return EINVAL;
444 
445 	fwone = &fws->fw_sect[fws->fw_count];
446 
447 	/* first 32bit are device load offset */
448 	memcpy(&fwone->offset, data, sizeof(uint32_t));
449 
450 	/* rest is data */
451 	fwone->data = data + sizeof(uint32_t);
452 	fwone->len = dlen - sizeof(uint32_t);
453 
454 	fws->fw_count++;
455 
456 	return 0;
457 }
458 
459 #define IWM_DEFAULT_SCAN_CHANNELS 40
460 
461 /* iwlwifi: iwl-drv.c */
462 struct iwm_tlv_calib_data {
463 	uint32_t ucode_type;
464 	struct iwm_tlv_calib_ctrl calib;
465 } __packed;
466 
467 static int
468 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
469 {
470 	const struct iwm_tlv_calib_data *def_calib = data;
471 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
472 
473 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474 		device_printf(sc->sc_dev,
475 		    "Wrong ucode_type %u for default "
476 		    "calibration.\n", ucode_type);
477 		return EINVAL;
478 	}
479 
480 	sc->sc_default_calib[ucode_type].flow_trigger =
481 	    def_calib->calib.flow_trigger;
482 	sc->sc_default_calib[ucode_type].event_trigger =
483 	    def_calib->calib.event_trigger;
484 
485 	return 0;
486 }
487 
488 static int
489 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
490 			struct iwm_ucode_capabilities *capa)
491 {
492 	const struct iwm_ucode_api *ucode_api = (const void *)data;
493 	uint32_t api_index = le32toh(ucode_api->api_index);
494 	uint32_t api_flags = le32toh(ucode_api->api_flags);
495 	int i;
496 
497 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
498 		device_printf(sc->sc_dev,
499 		    "api flags index %d larger than supported by driver\n",
500 		    api_index);
501 		/* don't return an error so we can load FW that has more bits */
502 		return 0;
503 	}
504 
505 	for (i = 0; i < 32; i++) {
506 		if (api_flags & (1U << i))
507 			setbit(capa->enabled_api, i + 32 * api_index);
508 	}
509 
510 	return 0;
511 }
512 
513 static int
514 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
515 			   struct iwm_ucode_capabilities *capa)
516 {
517 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
518 	uint32_t api_index = le32toh(ucode_capa->api_index);
519 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
520 	int i;
521 
522 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
523 		device_printf(sc->sc_dev,
524 		    "capa flags index %d larger than supported by driver\n",
525 		    api_index);
526 		/* don't return an error so we can load FW that has more bits */
527 		return 0;
528 	}
529 
530 	for (i = 0; i < 32; i++) {
531 		if (api_flags & (1U << i))
532 			setbit(capa->enabled_capa, i + 32 * api_index);
533 	}
534 
535 	return 0;
536 }
537 
538 static void
539 iwm_fw_info_free(struct iwm_fw_info *fw)
540 {
541 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
542 	fw->fw_fp = NULL;
543 	/* don't touch fw->fw_status */
544 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
545 }
546 
547 static int
548 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
549 {
550 	struct iwm_fw_info *fw = &sc->sc_fw;
551 	const struct iwm_tlv_ucode_header *uhdr;
552 	struct iwm_ucode_tlv tlv;
553 	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
554 	enum iwm_ucode_tlv_type tlv_type;
555 	const struct firmware *fwp;
556 	const uint8_t *data;
557 	uint32_t usniffer_img;
558 	uint32_t paging_mem_size;
559 	int num_of_cpus;
560 	int error = 0;
561 	size_t len;
562 
563 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
564 	    ucode_type != IWM_UCODE_INIT)
565 		return 0;
566 
567 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
568 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
569 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
570 
571 	if (fw->fw_fp != NULL)
572 		iwm_fw_info_free(fw);
573 
574 	/*
575 	 * Load firmware into driver memory.
576 	 * fw_fp will be set.
577 	 */
578 	IWM_UNLOCK(sc);
579 	fwp = firmware_get(sc->cfg->fw_name);
580 	IWM_LOCK(sc);
581 	if (fwp == NULL) {
582 		device_printf(sc->sc_dev,
583 		    "could not read firmware %s (error %d)\n",
584 		    sc->cfg->fw_name, error);
585 		goto out;
586 	}
587 	fw->fw_fp = fwp;
588 
589 	/* (Re-)Initialize default values. */
590 	capa->flags = 0;
591 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
592 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
593 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
594 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
595 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
596 
597 	/*
598 	 * Parse firmware contents
599 	 */
600 
601 	uhdr = (const void *)fw->fw_fp->data;
602 	if (*(const uint32_t *)fw->fw_fp->data != 0
603 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
604 		device_printf(sc->sc_dev, "invalid firmware %s\n",
605 		    sc->cfg->fw_name);
606 		error = EINVAL;
607 		goto out;
608 	}
609 
610 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
611 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
612 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
613 	    IWM_UCODE_API(le32toh(uhdr->ver)));
614 	data = uhdr->data;
615 	len = fw->fw_fp->datasize - sizeof(*uhdr);
616 
617 	while (len >= sizeof(tlv)) {
618 		size_t tlv_len;
619 		const void *tlv_data;
620 
621 		memcpy(&tlv, data, sizeof(tlv));
622 		tlv_len = le32toh(tlv.length);
623 		tlv_type = le32toh(tlv.type);
624 
625 		len -= sizeof(tlv);
626 		data += sizeof(tlv);
627 		tlv_data = data;
628 
629 		if (len < tlv_len) {
630 			device_printf(sc->sc_dev,
631 			    "firmware too short: %zu bytes\n",
632 			    len);
633 			error = EINVAL;
634 			goto parse_out;
635 		}
636 
637 		switch ((int)tlv_type) {
638 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
639 			if (tlv_len < sizeof(uint32_t)) {
640 				device_printf(sc->sc_dev,
641 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
642 				    __func__,
643 				    (int) tlv_len);
644 				error = EINVAL;
645 				goto parse_out;
646 			}
647 			capa->max_probe_length =
648 			    le32toh(*(const uint32_t *)tlv_data);
649 			/* limit it to something sensible */
650 			if (capa->max_probe_length >
651 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
652 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
653 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
654 				    "ridiculous\n", __func__);
655 				error = EINVAL;
656 				goto parse_out;
657 			}
658 			break;
659 		case IWM_UCODE_TLV_PAN:
660 			if (tlv_len) {
661 				device_printf(sc->sc_dev,
662 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
663 				    __func__,
664 				    (int) tlv_len);
665 				error = EINVAL;
666 				goto parse_out;
667 			}
668 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
669 			break;
670 		case IWM_UCODE_TLV_FLAGS:
671 			if (tlv_len < sizeof(uint32_t)) {
672 				device_printf(sc->sc_dev,
673 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
674 				    __func__,
675 				    (int) tlv_len);
676 				error = EINVAL;
677 				goto parse_out;
678 			}
679 			/*
680 			 * Apparently there can be many flags, but Linux driver
681 			 * parses only the first one, and so do we.
682 			 *
683 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
684 			 * Intentional or a bug?  Observations from
685 			 * current firmware file:
686 			 *  1) TLV_PAN is parsed first
687 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
688 			 * ==> this resets TLV_PAN to itself... hnnnk
689 			 */
690 			capa->flags = le32toh(*(const uint32_t *)tlv_data);
691 			break;
692 		case IWM_UCODE_TLV_CSCHEME:
693 			if ((error = iwm_store_cscheme(sc,
694 			    tlv_data, tlv_len)) != 0) {
695 				device_printf(sc->sc_dev,
696 				    "%s: iwm_store_cscheme(): returned %d\n",
697 				    __func__,
698 				    error);
699 				goto parse_out;
700 			}
701 			break;
702 		case IWM_UCODE_TLV_NUM_OF_CPU:
703 			if (tlv_len != sizeof(uint32_t)) {
704 				device_printf(sc->sc_dev,
705 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
706 				    __func__,
707 				    (int) tlv_len);
708 				error = EINVAL;
709 				goto parse_out;
710 			}
711 			num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
712 			if (num_of_cpus == 2) {
713 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
714 					TRUE;
715 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
716 					TRUE;
717 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
718 					TRUE;
719 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
720 				device_printf(sc->sc_dev,
721 				    "%s: Driver supports only 1 or 2 CPUs\n",
722 				    __func__);
723 				error = EINVAL;
724 				goto parse_out;
725 			}
726 			break;
727 		case IWM_UCODE_TLV_SEC_RT:
728 			if ((error = iwm_firmware_store_section(sc,
729 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
730 				device_printf(sc->sc_dev,
731 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
732 				    __func__,
733 				    error);
734 				goto parse_out;
735 			}
736 			break;
737 		case IWM_UCODE_TLV_SEC_INIT:
738 			if ((error = iwm_firmware_store_section(sc,
739 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
740 				device_printf(sc->sc_dev,
741 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
742 				    __func__,
743 				    error);
744 				goto parse_out;
745 			}
746 			break;
747 		case IWM_UCODE_TLV_SEC_WOWLAN:
748 			if ((error = iwm_firmware_store_section(sc,
749 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
750 				device_printf(sc->sc_dev,
751 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
752 				    __func__,
753 				    error);
754 				goto parse_out;
755 			}
756 			break;
757 		case IWM_UCODE_TLV_DEF_CALIB:
758 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
759 				device_printf(sc->sc_dev,
760 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
761 				    __func__,
762 				    (int) tlv_len,
763 				    (int) sizeof(struct iwm_tlv_calib_data));
764 				error = EINVAL;
765 				goto parse_out;
766 			}
767 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
768 				device_printf(sc->sc_dev,
769 				    "%s: iwm_set_default_calib() failed: %d\n",
770 				    __func__,
771 				    error);
772 				goto parse_out;
773 			}
774 			break;
775 		case IWM_UCODE_TLV_PHY_SKU:
776 			if (tlv_len != sizeof(uint32_t)) {
777 				error = EINVAL;
778 				device_printf(sc->sc_dev,
779 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
780 				    __func__,
781 				    (int) tlv_len);
782 				goto parse_out;
783 			}
784 			sc->sc_fw.phy_config =
785 			    le32toh(*(const uint32_t *)tlv_data);
786 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
787 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
788 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
789 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
790 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
791 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
792 			break;
793 
794 		case IWM_UCODE_TLV_API_CHANGES_SET: {
795 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
796 				error = EINVAL;
797 				goto parse_out;
798 			}
799 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
800 				error = EINVAL;
801 				goto parse_out;
802 			}
803 			break;
804 		}
805 
806 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
807 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
812 				error = EINVAL;
813 				goto parse_out;
814 			}
815 			break;
816 		}
817 
818 		case 48: /* undocumented TLV */
819 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
820 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
821 			/* ignore, not used by current driver */
822 			break;
823 
824 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
825 			if ((error = iwm_firmware_store_section(sc,
826 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
827 			    tlv_len)) != 0)
828 				goto parse_out;
829 			break;
830 
831 		case IWM_UCODE_TLV_PAGING:
832 			if (tlv_len != sizeof(uint32_t)) {
833 				error = EINVAL;
834 				goto parse_out;
835 			}
836 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
837 
838 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
839 			    "%s: Paging: paging enabled (size = %u bytes)\n",
840 			    __func__, paging_mem_size);
841 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
842 				device_printf(sc->sc_dev,
843 					"%s: Paging: driver supports up to %u bytes for paging image\n",
844 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
845 				error = EINVAL;
846 				goto out;
847 			}
848 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
849 				device_printf(sc->sc_dev,
850 				    "%s: Paging: image isn't multiple %u\n",
851 				    __func__, IWM_FW_PAGING_SIZE);
852 				error = EINVAL;
853 				goto out;
854 			}
855 
856 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
857 			    paging_mem_size;
858 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
859 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
860 			    paging_mem_size;
861 			break;
862 
863 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
864 			if (tlv_len != sizeof(uint32_t)) {
865 				error = EINVAL;
866 				goto parse_out;
867 			}
868 			capa->n_scan_channels =
869 			    le32toh(*(const uint32_t *)tlv_data);
870 			break;
871 
872 		case IWM_UCODE_TLV_FW_VERSION:
873 			if (tlv_len != sizeof(uint32_t) * 3) {
874 				error = EINVAL;
875 				goto parse_out;
876 			}
877 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
878 			    "%d.%d.%d",
879 			    le32toh(((const uint32_t *)tlv_data)[0]),
880 			    le32toh(((const uint32_t *)tlv_data)[1]),
881 			    le32toh(((const uint32_t *)tlv_data)[2]));
882 			break;
883 
884 		case IWM_UCODE_TLV_FW_MEM_SEG:
885 			break;
886 
887 		default:
888 			device_printf(sc->sc_dev,
889 			    "%s: unknown firmware section %d, abort\n",
890 			    __func__, tlv_type);
891 			error = EINVAL;
892 			goto parse_out;
893 		}
894 
895 		len -= roundup(tlv_len, 4);
896 		data += roundup(tlv_len, 4);
897 	}
898 
899 	KASSERT(error == 0, ("unhandled error"));
900 
901  parse_out:
902 	if (error) {
903 		device_printf(sc->sc_dev, "firmware parse error %d, "
904 		    "section type %d\n", error, tlv_type);
905 	}
906 
907  out:
908 	if (error) {
909 		fw->fw_status = IWM_FW_STATUS_NONE;
910 		if (fw->fw_fp != NULL)
911 			iwm_fw_info_free(fw);
912 	} else
913 		fw->fw_status = IWM_FW_STATUS_DONE;
914 	wakeup(&sc->sc_fw);
915 
916 	return error;
917 }
918 
919 /*
920  * DMA resource routines
921  */
922 
923 /* fwmem is used to load firmware onto the card */
924 static int
925 iwm_alloc_fwmem(struct iwm_softc *sc)
926 {
927 	/* Must be aligned on a 16-byte boundary. */
928 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
929 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
930 }
931 
932 /* tx scheduler rings.  not used? */
933 static int
934 iwm_alloc_sched(struct iwm_softc *sc)
935 {
936 	/* TX scheduler rings must be aligned on a 1KB boundary. */
937 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
938 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
939 }
940 
941 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
942 static int
943 iwm_alloc_kw(struct iwm_softc *sc)
944 {
945 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
946 }
947 
948 /* interrupt cause table */
949 static int
950 iwm_alloc_ict(struct iwm_softc *sc)
951 {
952 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
953 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
954 }
955 
956 static int
957 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
958 {
959 	bus_size_t size;
960 	int i, error;
961 
962 	ring->cur = 0;
963 
964 	/* Allocate RX descriptors (256-byte aligned). */
965 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
966 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
967 	if (error != 0) {
968 		device_printf(sc->sc_dev,
969 		    "could not allocate RX ring DMA memory\n");
970 		goto fail;
971 	}
972 	ring->desc = ring->desc_dma.vaddr;
973 
974 	/* Allocate RX status area (16-byte aligned). */
975 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
976 	    sizeof(*ring->stat), 16);
977 	if (error != 0) {
978 		device_printf(sc->sc_dev,
979 		    "could not allocate RX status DMA memory\n");
980 		goto fail;
981 	}
982 	ring->stat = ring->stat_dma.vaddr;
983 
984         /* Create RX buffer DMA tag. */
985         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
986             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
987             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
988         if (error != 0) {
989                 device_printf(sc->sc_dev,
990                     "%s: could not create RX buf DMA tag, error %d\n",
991                     __func__, error);
992                 goto fail;
993         }
994 
995 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
996 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
997 	if (error != 0) {
998 		device_printf(sc->sc_dev,
999 		    "%s: could not create RX buf DMA map, error %d\n",
1000 		    __func__, error);
1001 		goto fail;
1002 	}
1003 	/*
1004 	 * Allocate and map RX buffers.
1005 	 */
1006 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1007 		struct iwm_rx_data *data = &ring->data[i];
1008 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1009 		if (error != 0) {
1010 			device_printf(sc->sc_dev,
1011 			    "%s: could not create RX buf DMA map, error %d\n",
1012 			    __func__, error);
1013 			goto fail;
1014 		}
1015 		data->m = NULL;
1016 
1017 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1018 			goto fail;
1019 		}
1020 	}
1021 	return 0;
1022 
1023 fail:	iwm_free_rx_ring(sc, ring);
1024 	return error;
1025 }
1026 
1027 static void
1028 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1029 {
1030 	/* Reset the ring state */
1031 	ring->cur = 0;
1032 
1033 	/*
1034 	 * The hw rx ring index in shared memory must also be cleared,
1035 	 * otherwise the discrepancy can cause reprocessing chaos.
1036 	 */
1037 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1038 }
1039 
1040 static void
1041 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1042 {
1043 	int i;
1044 
1045 	iwm_dma_contig_free(&ring->desc_dma);
1046 	iwm_dma_contig_free(&ring->stat_dma);
1047 
1048 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049 		struct iwm_rx_data *data = &ring->data[i];
1050 
1051 		if (data->m != NULL) {
1052 			bus_dmamap_sync(ring->data_dmat, data->map,
1053 			    BUS_DMASYNC_POSTREAD);
1054 			bus_dmamap_unload(ring->data_dmat, data->map);
1055 			m_freem(data->m);
1056 			data->m = NULL;
1057 		}
1058 		if (data->map != NULL) {
1059 			bus_dmamap_destroy(ring->data_dmat, data->map);
1060 			data->map = NULL;
1061 		}
1062 	}
1063 	if (ring->spare_map != NULL) {
1064 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1065 		ring->spare_map = NULL;
1066 	}
1067 	if (ring->data_dmat != NULL) {
1068 		bus_dma_tag_destroy(ring->data_dmat);
1069 		ring->data_dmat = NULL;
1070 	}
1071 }
1072 
1073 static int
1074 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1075 {
1076 	bus_addr_t paddr;
1077 	bus_size_t size;
1078 	size_t maxsize;
1079 	int nsegments;
1080 	int i, error;
1081 
1082 	ring->qid = qid;
1083 	ring->queued = 0;
1084 	ring->cur = 0;
1085 
1086 	/* Allocate TX descriptors (256-byte aligned). */
1087 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1088 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1089 	if (error != 0) {
1090 		device_printf(sc->sc_dev,
1091 		    "could not allocate TX ring DMA memory\n");
1092 		goto fail;
1093 	}
1094 	ring->desc = ring->desc_dma.vaddr;
1095 
1096 	/*
1097 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1098 	 * to allocate commands space for other rings.
1099 	 */
1100 	if (qid > IWM_MVM_CMD_QUEUE)
1101 		return 0;
1102 
1103 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1104 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1105 	if (error != 0) {
1106 		device_printf(sc->sc_dev,
1107 		    "could not allocate TX cmd DMA memory\n");
1108 		goto fail;
1109 	}
1110 	ring->cmd = ring->cmd_dma.vaddr;
1111 
1112 	/* FW commands may require more mapped space than packets. */
1113 	if (qid == IWM_MVM_CMD_QUEUE) {
1114 		maxsize = IWM_RBUF_SIZE;
1115 		nsegments = 1;
1116 	} else {
1117 		maxsize = MCLBYTES;
1118 		nsegments = IWM_MAX_SCATTER - 2;
1119 	}
1120 
1121 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1122 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1123             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1124 	if (error != 0) {
1125 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1126 		goto fail;
1127 	}
1128 
1129 	paddr = ring->cmd_dma.paddr;
1130 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1131 		struct iwm_tx_data *data = &ring->data[i];
1132 
1133 		data->cmd_paddr = paddr;
1134 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1135 		    + offsetof(struct iwm_tx_cmd, scratch);
1136 		paddr += sizeof(struct iwm_device_cmd);
1137 
1138 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1139 		if (error != 0) {
1140 			device_printf(sc->sc_dev,
1141 			    "could not create TX buf DMA map\n");
1142 			goto fail;
1143 		}
1144 	}
1145 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1146 	    ("invalid physical address"));
1147 	return 0;
1148 
1149 fail:	iwm_free_tx_ring(sc, ring);
1150 	return error;
1151 }
1152 
1153 static void
1154 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1155 {
1156 	int i;
1157 
1158 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1159 		struct iwm_tx_data *data = &ring->data[i];
1160 
1161 		if (data->m != NULL) {
1162 			bus_dmamap_sync(ring->data_dmat, data->map,
1163 			    BUS_DMASYNC_POSTWRITE);
1164 			bus_dmamap_unload(ring->data_dmat, data->map);
1165 			m_freem(data->m);
1166 			data->m = NULL;
1167 		}
1168 	}
1169 	/* Clear TX descriptors. */
1170 	memset(ring->desc, 0, ring->desc_dma.size);
1171 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1172 	    BUS_DMASYNC_PREWRITE);
1173 	sc->qfullmsk &= ~(1 << ring->qid);
1174 	ring->queued = 0;
1175 	ring->cur = 0;
1176 
1177 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1178 		iwm_pcie_clear_cmd_in_flight(sc);
1179 }
1180 
1181 static void
1182 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1183 {
1184 	int i;
1185 
1186 	iwm_dma_contig_free(&ring->desc_dma);
1187 	iwm_dma_contig_free(&ring->cmd_dma);
1188 
1189 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1190 		struct iwm_tx_data *data = &ring->data[i];
1191 
1192 		if (data->m != NULL) {
1193 			bus_dmamap_sync(ring->data_dmat, data->map,
1194 			    BUS_DMASYNC_POSTWRITE);
1195 			bus_dmamap_unload(ring->data_dmat, data->map);
1196 			m_freem(data->m);
1197 			data->m = NULL;
1198 		}
1199 		if (data->map != NULL) {
1200 			bus_dmamap_destroy(ring->data_dmat, data->map);
1201 			data->map = NULL;
1202 		}
1203 	}
1204 	if (ring->data_dmat != NULL) {
1205 		bus_dma_tag_destroy(ring->data_dmat);
1206 		ring->data_dmat = NULL;
1207 	}
1208 }
1209 
1210 /*
1211  * High-level hardware frobbing routines
1212  */
1213 
1214 static void
1215 iwm_enable_interrupts(struct iwm_softc *sc)
1216 {
1217 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1218 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1219 }
1220 
1221 static void
1222 iwm_restore_interrupts(struct iwm_softc *sc)
1223 {
1224 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1225 }
1226 
1227 static void
1228 iwm_disable_interrupts(struct iwm_softc *sc)
1229 {
1230 	/* disable interrupts */
1231 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1232 
1233 	/* acknowledge all interrupts */
1234 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1235 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1236 }
1237 
1238 static void
1239 iwm_ict_reset(struct iwm_softc *sc)
1240 {
1241 	iwm_disable_interrupts(sc);
1242 
1243 	/* Reset ICT table. */
1244 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1245 	sc->ict_cur = 0;
1246 
1247 	/* Set physical address of ICT table (4KB aligned). */
1248 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1249 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1250 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1251 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1252 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1253 
1254 	/* Switch to ICT interrupt mode in driver. */
1255 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1256 
1257 	/* Re-enable interrupts. */
1258 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1259 	iwm_enable_interrupts(sc);
1260 }
1261 
1262 /* iwlwifi pcie/trans.c */
1263 
1264 /*
1265  * Since this .. hard-resets things, it's time to actually
1266  * mark the first vap (if any) as having no mac context.
1267  * It's annoying, but since the driver is potentially being
1268  * stop/start'ed whilst active (thanks openbsd port!) we
1269  * have to correctly track this.
1270  */
1271 static void
1272 iwm_stop_device(struct iwm_softc *sc)
1273 {
1274 	struct ieee80211com *ic = &sc->sc_ic;
1275 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1276 	int chnl, qid;
1277 	uint32_t mask = 0;
1278 
1279 	/* tell the device to stop sending interrupts */
1280 	iwm_disable_interrupts(sc);
1281 
1282 	/*
1283 	 * FreeBSD-local: mark the first vap as not-uploaded,
1284 	 * so the next transition through auth/assoc
1285 	 * will correctly populate the MAC context.
1286 	 */
1287 	if (vap) {
1288 		struct iwm_vap *iv = IWM_VAP(vap);
1289 		iv->phy_ctxt = NULL;
1290 		iv->is_uploaded = 0;
1291 	}
1292 
1293 	/* device going down, Stop using ICT table */
1294 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1295 
1296 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1297 
1298 	if (iwm_nic_lock(sc)) {
1299 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1300 
1301 		/* Stop each Tx DMA channel */
1302 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1303 			IWM_WRITE(sc,
1304 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1305 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1306 		}
1307 
1308 		/* Wait for DMA channels to be idle */
1309 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1310 		    5000)) {
1311 			device_printf(sc->sc_dev,
1312 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1313 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1314 		}
1315 		iwm_nic_unlock(sc);
1316 	}
1317 	iwm_pcie_rx_stop(sc);
1318 
1319 	/* Stop RX ring. */
1320 	iwm_reset_rx_ring(sc, &sc->rxq);
1321 
1322 	/* Reset all TX rings. */
1323 	for (qid = 0; qid < nitems(sc->txq); qid++)
1324 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1325 
1326 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1327 		/* Power-down device's busmaster DMA clocks */
1328 		if (iwm_nic_lock(sc)) {
1329 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1330 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1331 			iwm_nic_unlock(sc);
1332 		}
1333 		DELAY(5);
1334 	}
1335 
1336 	/* Make sure (redundant) we've released our request to stay awake */
1337 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1338 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1339 
1340 	/* Stop the device, and put it in low power state */
1341 	iwm_apm_stop(sc);
1342 
1343 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1344 	 * Clean again the interrupt here
1345 	 */
1346 	iwm_disable_interrupts(sc);
1347 	/* stop and reset the on-board processor */
1348 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1349 
1350 	/*
1351 	 * Even if we stop the HW, we still want the RF kill
1352 	 * interrupt
1353 	 */
1354 	iwm_enable_rfkill_int(sc);
1355 	iwm_check_rfkill(sc);
1356 }
1357 
1358 /* iwlwifi: mvm/ops.c */
1359 static void
1360 iwm_mvm_nic_config(struct iwm_softc *sc)
1361 {
1362 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1363 	uint32_t reg_val = 0;
1364 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1365 
1366 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1367 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1368 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1369 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1370 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1371 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1372 
1373 	/* SKU control */
1374 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1375 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1376 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1377 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1378 
1379 	/* radio configuration */
1380 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1381 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1382 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1383 
1384 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1385 
1386 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1387 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1388 	    radio_cfg_step, radio_cfg_dash);
1389 
1390 	/*
1391 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1392 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1393 	 * to lose ownership and not being able to obtain it back.
1394 	 */
1395 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1396 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1397 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1398 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1399 	}
1400 }
1401 
1402 static int
1403 iwm_nic_rx_init(struct iwm_softc *sc)
1404 {
1405 	/*
1406 	 * Initialize RX ring.  This is from the iwn driver.
1407 	 */
1408 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1409 
1410 	/* Stop Rx DMA */
1411 	iwm_pcie_rx_stop(sc);
1412 
1413 	if (!iwm_nic_lock(sc))
1414 		return EBUSY;
1415 
1416 	/* reset and flush pointers */
1417 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1418 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1419 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1420 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1421 
1422 	/* Set physical address of RX ring (256-byte aligned). */
1423 	IWM_WRITE(sc,
1424 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1425 
1426 	/* Set physical address of RX status (16-byte aligned). */
1427 	IWM_WRITE(sc,
1428 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1429 
1430 	/* Enable RX. */
1431 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1432 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1433 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1434 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1435 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1436 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1437 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1438 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1439 
1440 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1441 
1442 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1443 	if (sc->cfg->host_interrupt_operation_mode)
1444 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1445 
1446 	/*
1447 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1448 	 *
1449 	 * This value should initially be 0 (before preparing any
1450 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1451 	 */
1452 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1453 
1454 	iwm_nic_unlock(sc);
1455 
1456 	return 0;
1457 }
1458 
1459 static int
1460 iwm_nic_tx_init(struct iwm_softc *sc)
1461 {
1462 	int qid;
1463 
1464 	if (!iwm_nic_lock(sc))
1465 		return EBUSY;
1466 
1467 	/* Deactivate TX scheduler. */
1468 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1469 
1470 	/* Set physical address of "keep warm" page (16-byte aligned). */
1471 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1472 
1473 	/* Initialize TX rings. */
1474 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1475 		struct iwm_tx_ring *txq = &sc->txq[qid];
1476 
1477 		/* Set physical address of TX ring (256-byte aligned). */
1478 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1479 		    txq->desc_dma.paddr >> 8);
1480 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1481 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1482 		    __func__,
1483 		    qid, txq->desc,
1484 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1485 	}
1486 
1487 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1488 
1489 	iwm_nic_unlock(sc);
1490 
1491 	return 0;
1492 }
1493 
1494 static int
1495 iwm_nic_init(struct iwm_softc *sc)
1496 {
1497 	int error;
1498 
1499 	iwm_apm_init(sc);
1500 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1501 		iwm_set_pwr(sc);
1502 
1503 	iwm_mvm_nic_config(sc);
1504 
1505 	if ((error = iwm_nic_rx_init(sc)) != 0)
1506 		return error;
1507 
1508 	/*
1509 	 * Ditto for TX, from iwn
1510 	 */
1511 	if ((error = iwm_nic_tx_init(sc)) != 0)
1512 		return error;
1513 
1514 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1515 	    "%s: shadow registers enabled\n", __func__);
1516 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1517 
1518 	return 0;
1519 }
1520 
1521 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1522 	IWM_MVM_TX_FIFO_VO,
1523 	IWM_MVM_TX_FIFO_VI,
1524 	IWM_MVM_TX_FIFO_BE,
1525 	IWM_MVM_TX_FIFO_BK,
1526 };
1527 
1528 static int
1529 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1530 {
1531 	if (!iwm_nic_lock(sc)) {
1532 		device_printf(sc->sc_dev,
1533 		    "%s: cannot enable txq %d\n",
1534 		    __func__,
1535 		    qid);
1536 		return EBUSY;
1537 	}
1538 
1539 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1540 
1541 	if (qid == IWM_MVM_CMD_QUEUE) {
1542 		/* unactivate before configuration */
1543 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1544 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1545 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1546 
1547 		iwm_nic_unlock(sc);
1548 
1549 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1550 
1551 		if (!iwm_nic_lock(sc)) {
1552 			device_printf(sc->sc_dev,
1553 			    "%s: cannot enable txq %d\n", __func__, qid);
1554 			return EBUSY;
1555 		}
1556 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1557 		iwm_nic_unlock(sc);
1558 
1559 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1560 		/* Set scheduler window size and frame limit. */
1561 		iwm_write_mem32(sc,
1562 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1563 		    sizeof(uint32_t),
1564 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1565 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1566 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1567 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1568 
1569 		if (!iwm_nic_lock(sc)) {
1570 			device_printf(sc->sc_dev,
1571 			    "%s: cannot enable txq %d\n", __func__, qid);
1572 			return EBUSY;
1573 		}
1574 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1575 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1576 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1577 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1578 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1579 	} else {
1580 		struct iwm_scd_txq_cfg_cmd cmd;
1581 		int error;
1582 
1583 		iwm_nic_unlock(sc);
1584 
1585 		memset(&cmd, 0, sizeof(cmd));
1586 		cmd.scd_queue = qid;
1587 		cmd.enable = 1;
1588 		cmd.sta_id = sta_id;
1589 		cmd.tx_fifo = fifo;
1590 		cmd.aggregate = 0;
1591 		cmd.window = IWM_FRAME_LIMIT;
1592 
1593 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1594 		    sizeof(cmd), &cmd);
1595 		if (error) {
1596 			device_printf(sc->sc_dev,
1597 			    "cannot enable txq %d\n", qid);
1598 			return error;
1599 		}
1600 
1601 		if (!iwm_nic_lock(sc))
1602 			return EBUSY;
1603 	}
1604 
1605 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1606 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1607 
1608 	iwm_nic_unlock(sc);
1609 
1610 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1611 	    __func__, qid, fifo);
1612 
1613 	return 0;
1614 }
1615 
1616 static int
1617 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1618 {
1619 	int error, chnl;
1620 
1621 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1622 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1623 
1624 	if (!iwm_nic_lock(sc))
1625 		return EBUSY;
1626 
1627 	iwm_ict_reset(sc);
1628 
1629 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1630 	if (scd_base_addr != 0 &&
1631 	    scd_base_addr != sc->scd_base_addr) {
1632 		device_printf(sc->sc_dev,
1633 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1634 		    __func__, sc->scd_base_addr, scd_base_addr);
1635 	}
1636 
1637 	iwm_nic_unlock(sc);
1638 
1639 	/* reset context data, TX status and translation data */
1640 	error = iwm_write_mem(sc,
1641 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1642 	    NULL, clear_dwords);
1643 	if (error)
1644 		return EBUSY;
1645 
1646 	if (!iwm_nic_lock(sc))
1647 		return EBUSY;
1648 
1649 	/* Set physical address of TX scheduler rings (1KB aligned). */
1650 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1651 
1652 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1653 
1654 	iwm_nic_unlock(sc);
1655 
1656 	/* enable command channel */
1657 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1658 	if (error)
1659 		return error;
1660 
1661 	if (!iwm_nic_lock(sc))
1662 		return EBUSY;
1663 
1664 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1665 
1666 	/* Enable DMA channels. */
1667 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1668 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1669 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1670 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1671 	}
1672 
1673 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1674 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1675 
1676 	iwm_nic_unlock(sc);
1677 
1678 	/* Enable L1-Active */
1679 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1680 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1681 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1682 	}
1683 
1684 	return error;
1685 }
1686 
1687 /*
1688  * NVM read access and content parsing.  We do not support
1689  * external NVM or writing NVM.
1690  * iwlwifi/mvm/nvm.c
1691  */
1692 
1693 /* Default NVM size to read */
1694 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1695 
1696 #define IWM_NVM_WRITE_OPCODE 1
1697 #define IWM_NVM_READ_OPCODE 0
1698 
1699 /* load nvm chunk response */
1700 enum {
1701 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1702 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1703 };
1704 
1705 static int
1706 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1707 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1708 {
1709 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1710 		.offset = htole16(offset),
1711 		.length = htole16(length),
1712 		.type = htole16(section),
1713 		.op_code = IWM_NVM_READ_OPCODE,
1714 	};
1715 	struct iwm_nvm_access_resp *nvm_resp;
1716 	struct iwm_rx_packet *pkt;
1717 	struct iwm_host_cmd cmd = {
1718 		.id = IWM_NVM_ACCESS_CMD,
1719 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1720 		.data = { &nvm_access_cmd, },
1721 	};
1722 	int ret, bytes_read, offset_read;
1723 	uint8_t *resp_data;
1724 
1725 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1726 
1727 	ret = iwm_send_cmd(sc, &cmd);
1728 	if (ret) {
1729 		device_printf(sc->sc_dev,
1730 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1731 		return ret;
1732 	}
1733 
1734 	pkt = cmd.resp_pkt;
1735 
1736 	/* Extract NVM response */
1737 	nvm_resp = (void *)pkt->data;
1738 	ret = le16toh(nvm_resp->status);
1739 	bytes_read = le16toh(nvm_resp->length);
1740 	offset_read = le16toh(nvm_resp->offset);
1741 	resp_data = nvm_resp->data;
1742 	if (ret) {
1743 		if ((offset != 0) &&
1744 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1745 			/*
1746 			 * meaning of NOT_VALID_ADDRESS:
1747 			 * driver try to read chunk from address that is
1748 			 * multiple of 2K and got an error since addr is empty.
1749 			 * meaning of (offset != 0): driver already
1750 			 * read valid data from another chunk so this case
1751 			 * is not an error.
1752 			 */
1753 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1754 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1755 				    offset);
1756 			*len = 0;
1757 			ret = 0;
1758 		} else {
1759 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1760 				    "NVM access command failed with status %d\n", ret);
1761 			ret = EIO;
1762 		}
1763 		goto exit;
1764 	}
1765 
1766 	if (offset_read != offset) {
1767 		device_printf(sc->sc_dev,
1768 		    "NVM ACCESS response with invalid offset %d\n",
1769 		    offset_read);
1770 		ret = EINVAL;
1771 		goto exit;
1772 	}
1773 
1774 	if (bytes_read > length) {
1775 		device_printf(sc->sc_dev,
1776 		    "NVM ACCESS response with too much data "
1777 		    "(%d bytes requested, %d bytes received)\n",
1778 		    length, bytes_read);
1779 		ret = EINVAL;
1780 		goto exit;
1781 	}
1782 
1783 	/* Write data to NVM */
1784 	memcpy(data + offset, resp_data, bytes_read);
1785 	*len = bytes_read;
1786 
1787  exit:
1788 	iwm_free_resp(sc, &cmd);
1789 	return ret;
1790 }
1791 
1792 /*
1793  * Reads an NVM section completely.
1794  * NICs prior to 7000 family don't have a real NVM, but just read
1795  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1796  * by uCode, we need to manually check in this case that we don't
1797  * overflow and try to read more than the EEPROM size.
1798  * For 7000 family NICs, we supply the maximal size we can read, and
1799  * the uCode fills the response with as much data as we can,
1800  * without overflowing, so no check is needed.
1801  */
1802 static int
1803 iwm_nvm_read_section(struct iwm_softc *sc,
1804 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1805 {
1806 	uint16_t seglen, length, offset = 0;
1807 	int ret;
1808 
1809 	/* Set nvm section read length */
1810 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1811 
1812 	seglen = length;
1813 
1814 	/* Read the NVM until exhausted (reading less than requested) */
1815 	while (seglen == length) {
1816 		/* Check no memory assumptions fail and cause an overflow */
1817 		if ((size_read + offset + length) >
1818 		    sc->cfg->eeprom_size) {
1819 			device_printf(sc->sc_dev,
1820 			    "EEPROM size is too small for NVM\n");
1821 			return ENOBUFS;
1822 		}
1823 
1824 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1825 		if (ret) {
1826 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1827 				    "Cannot read NVM from section %d offset %d, length %d\n",
1828 				    section, offset, length);
1829 			return ret;
1830 		}
1831 		offset += seglen;
1832 	}
1833 
1834 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1835 		    "NVM section %d read completed\n", section);
1836 	*len = offset;
1837 	return 0;
1838 }
1839 
1840 /*
1841  * BEGIN IWM_NVM_PARSE
1842  */
1843 
1844 /* iwlwifi/iwl-nvm-parse.c */
1845 
1846 /* NVM offsets (in words) definitions */
1847 enum iwm_nvm_offsets {
1848 	/* NVM HW-Section offset (in words) definitions */
1849 	IWM_HW_ADDR = 0x15,
1850 
1851 /* NVM SW-Section offset (in words) definitions */
1852 	IWM_NVM_SW_SECTION = 0x1C0,
1853 	IWM_NVM_VERSION = 0,
1854 	IWM_RADIO_CFG = 1,
1855 	IWM_SKU = 2,
1856 	IWM_N_HW_ADDRS = 3,
1857 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1858 
1859 /* NVM calibration section offset (in words) definitions */
1860 	IWM_NVM_CALIB_SECTION = 0x2B8,
1861 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1862 };
1863 
1864 enum iwm_8000_nvm_offsets {
1865 	/* NVM HW-Section offset (in words) definitions */
1866 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1867 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1868 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1869 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1870 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1871 
1872 	/* NVM SW-Section offset (in words) definitions */
1873 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1874 	IWM_NVM_VERSION_8000 = 0,
1875 	IWM_RADIO_CFG_8000 = 0,
1876 	IWM_SKU_8000 = 2,
1877 	IWM_N_HW_ADDRS_8000 = 3,
1878 
1879 	/* NVM REGULATORY -Section offset (in words) definitions */
1880 	IWM_NVM_CHANNELS_8000 = 0,
1881 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1882 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1883 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1884 
1885 	/* NVM calibration section offset (in words) definitions */
1886 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1887 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1888 };
1889 
1890 /* SKU Capabilities (actual values from NVM definition) */
1891 enum nvm_sku_bits {
1892 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1893 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1894 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1895 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1896 };
1897 
1898 /* radio config bits (actual values from NVM definition) */
1899 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1900 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1901 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1902 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1903 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1904 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1905 
1906 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1907 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1908 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1909 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1910 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1911 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1912 
1913 #define DEFAULT_MAX_TX_POWER 16
1914 
1915 /**
1916  * enum iwm_nvm_channel_flags - channel flags in NVM
1917  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1918  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1919  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1920  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1921  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1922  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1923  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1924  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1925  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1926  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1927  */
1928 enum iwm_nvm_channel_flags {
1929 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1930 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1931 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1932 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1933 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1934 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1935 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1936 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1937 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1938 };
1939 
1940 /*
1941  * Translate EEPROM flags to net80211.
1942  */
1943 static uint32_t
1944 iwm_eeprom_channel_flags(uint16_t ch_flags)
1945 {
1946 	uint32_t nflags;
1947 
1948 	nflags = 0;
1949 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1950 		nflags |= IEEE80211_CHAN_PASSIVE;
1951 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1952 		nflags |= IEEE80211_CHAN_NOADHOC;
1953 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1954 		nflags |= IEEE80211_CHAN_DFS;
1955 		/* Just in case. */
1956 		nflags |= IEEE80211_CHAN_NOADHOC;
1957 	}
1958 
1959 	return (nflags);
1960 }
1961 
1962 static void
1963 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1964     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1965     const uint8_t bands[])
1966 {
1967 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1968 	uint32_t nflags;
1969 	uint16_t ch_flags;
1970 	uint8_t ieee;
1971 	int error;
1972 
1973 	for (; ch_idx < ch_num; ch_idx++) {
1974 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1975 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1976 			ieee = iwm_nvm_channels[ch_idx];
1977 		else
1978 			ieee = iwm_nvm_channels_8000[ch_idx];
1979 
1980 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1981 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1982 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1983 			    ieee, ch_flags,
1984 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1985 			    "5.2" : "2.4");
1986 			continue;
1987 		}
1988 
1989 		nflags = iwm_eeprom_channel_flags(ch_flags);
1990 		error = ieee80211_add_channel(chans, maxchans, nchans,
1991 		    ieee, 0, 0, nflags, bands);
1992 		if (error != 0)
1993 			break;
1994 
1995 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1996 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1997 		    ieee, ch_flags,
1998 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1999 		    "5.2" : "2.4");
2000 	}
2001 }
2002 
2003 static void
2004 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2005     struct ieee80211_channel chans[])
2006 {
2007 	struct iwm_softc *sc = ic->ic_softc;
2008 	struct iwm_nvm_data *data = sc->nvm_data;
2009 	uint8_t bands[IEEE80211_MODE_BYTES];
2010 	size_t ch_num;
2011 
2012 	memset(bands, 0, sizeof(bands));
2013 	/* 1-13: 11b/g channels. */
2014 	setbit(bands, IEEE80211_MODE_11B);
2015 	setbit(bands, IEEE80211_MODE_11G);
2016 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2017 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2018 
2019 	/* 14: 11b channel only. */
2020 	clrbit(bands, IEEE80211_MODE_11G);
2021 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2022 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2023 
2024 	if (data->sku_cap_band_52GHz_enable) {
2025 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2026 			ch_num = nitems(iwm_nvm_channels);
2027 		else
2028 			ch_num = nitems(iwm_nvm_channels_8000);
2029 		memset(bands, 0, sizeof(bands));
2030 		setbit(bands, IEEE80211_MODE_11A);
2031 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2032 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2033 	}
2034 }
2035 
2036 static void
2037 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2038 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2039 {
2040 	const uint8_t *hw_addr;
2041 
2042 	if (mac_override) {
2043 		static const uint8_t reserved_mac[] = {
2044 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2045 		};
2046 
2047 		hw_addr = (const uint8_t *)(mac_override +
2048 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2049 
2050 		/*
2051 		 * Store the MAC address from MAO section.
2052 		 * No byte swapping is required in MAO section
2053 		 */
2054 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2055 
2056 		/*
2057 		 * Force the use of the OTP MAC address in case of reserved MAC
2058 		 * address in the NVM, or if address is given but invalid.
2059 		 */
2060 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2061 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2062 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2063 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2064 			return;
2065 
2066 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2067 		    "%s: mac address from nvm override section invalid\n",
2068 		    __func__);
2069 	}
2070 
2071 	if (nvm_hw) {
2072 		/* read the mac address from WFMP registers */
2073 		uint32_t mac_addr0 =
2074 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2075 		uint32_t mac_addr1 =
2076 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2077 
2078 		hw_addr = (const uint8_t *)&mac_addr0;
2079 		data->hw_addr[0] = hw_addr[3];
2080 		data->hw_addr[1] = hw_addr[2];
2081 		data->hw_addr[2] = hw_addr[1];
2082 		data->hw_addr[3] = hw_addr[0];
2083 
2084 		hw_addr = (const uint8_t *)&mac_addr1;
2085 		data->hw_addr[4] = hw_addr[1];
2086 		data->hw_addr[5] = hw_addr[0];
2087 
2088 		return;
2089 	}
2090 
2091 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2092 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2093 }
2094 
2095 static int
2096 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2097 	    const uint16_t *phy_sku)
2098 {
2099 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2100 		return le16_to_cpup(nvm_sw + IWM_SKU);
2101 
2102 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2103 }
2104 
2105 static int
2106 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2107 {
2108 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2109 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2110 	else
2111 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2112 						IWM_NVM_VERSION_8000));
2113 }
2114 
2115 static int
2116 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2117 		  const uint16_t *phy_sku)
2118 {
2119         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2120                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2121 
2122         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2123 }
2124 
2125 static int
2126 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2127 {
2128 	int n_hw_addr;
2129 
2130 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2131 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2132 
2133 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2134 
2135         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2136 }
2137 
2138 static void
2139 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2140 		  uint32_t radio_cfg)
2141 {
2142 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2143 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2144 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2145 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2146 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2147 		return;
2148 	}
2149 
2150 	/* set the radio configuration for family 8000 */
2151 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2152 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2153 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2154 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2155 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2156 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2157 }
2158 
2159 static int
2160 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2161 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2162 {
2163 #ifdef notyet /* for FAMILY 9000 */
2164 	if (cfg->mac_addr_from_csr) {
2165 		iwm_set_hw_address_from_csr(sc, data);
2166         } else
2167 #endif
2168 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2169 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2170 
2171 		/* The byte order is little endian 16 bit, meaning 214365 */
2172 		data->hw_addr[0] = hw_addr[1];
2173 		data->hw_addr[1] = hw_addr[0];
2174 		data->hw_addr[2] = hw_addr[3];
2175 		data->hw_addr[3] = hw_addr[2];
2176 		data->hw_addr[4] = hw_addr[5];
2177 		data->hw_addr[5] = hw_addr[4];
2178 	} else {
2179 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2180 	}
2181 
2182 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2183 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2184 		return EINVAL;
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 static struct iwm_nvm_data *
2191 iwm_parse_nvm_data(struct iwm_softc *sc,
2192 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2193 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2194 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2195 {
2196 	struct iwm_nvm_data *data;
2197 	uint32_t sku, radio_cfg;
2198 
2199 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2200 		data = malloc(sizeof(*data) +
2201 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2202 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2203 	} else {
2204 		data = malloc(sizeof(*data) +
2205 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2206 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2207 	}
2208 	if (!data)
2209 		return NULL;
2210 
2211 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2212 
2213 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2214 	iwm_set_radio_cfg(sc, data, radio_cfg);
2215 
2216 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2217 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2218 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2219 	data->sku_cap_11n_enable = 0;
2220 
2221 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2222 
2223 	/* If no valid mac address was found - bail out */
2224 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2225 		free(data, M_DEVBUF);
2226 		return NULL;
2227 	}
2228 
2229 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2230 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2231 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2232 	} else {
2233 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2234 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2235 	}
2236 
2237 	return data;
2238 }
2239 
2240 static void
2241 iwm_free_nvm_data(struct iwm_nvm_data *data)
2242 {
2243 	if (data != NULL)
2244 		free(data, M_DEVBUF);
2245 }
2246 
2247 static struct iwm_nvm_data *
2248 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2249 {
2250 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2251 
2252 	/* Checking for required sections */
2253 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2254 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2255 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2256 			device_printf(sc->sc_dev,
2257 			    "Can't parse empty OTP/NVM sections\n");
2258 			return NULL;
2259 		}
2260 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2261 		/* SW and REGULATORY sections are mandatory */
2262 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2263 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2264 			device_printf(sc->sc_dev,
2265 			    "Can't parse empty OTP/NVM sections\n");
2266 			return NULL;
2267 		}
2268 		/* MAC_OVERRIDE or at least HW section must exist */
2269 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2270 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2271 			device_printf(sc->sc_dev,
2272 			    "Can't parse mac_address, empty sections\n");
2273 			return NULL;
2274 		}
2275 
2276 		/* PHY_SKU section is mandatory in B0 */
2277 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2278 			device_printf(sc->sc_dev,
2279 			    "Can't parse phy_sku in B0, empty sections\n");
2280 			return NULL;
2281 		}
2282 	} else {
2283 		panic("unknown device family %d\n", sc->cfg->device_family);
2284 	}
2285 
2286 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2287 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2288 	calib = (const uint16_t *)
2289 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2290 	regulatory = (const uint16_t *)
2291 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2292 	mac_override = (const uint16_t *)
2293 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2294 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2295 
2296 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2297 	    phy_sku, regulatory);
2298 }
2299 
2300 static int
2301 iwm_nvm_init(struct iwm_softc *sc)
2302 {
2303 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2304 	int i, ret, section;
2305 	uint32_t size_read = 0;
2306 	uint8_t *nvm_buffer, *temp;
2307 	uint16_t len;
2308 
2309 	memset(nvm_sections, 0, sizeof(nvm_sections));
2310 
2311 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2312 		return EINVAL;
2313 
2314 	/* load NVM values from nic */
2315 	/* Read From FW NVM */
2316 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2317 
2318 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2319 	if (!nvm_buffer)
2320 		return ENOMEM;
2321 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2322 		/* we override the constness for initial read */
2323 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2324 					   &len, size_read);
2325 		if (ret)
2326 			continue;
2327 		size_read += len;
2328 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2329 		if (!temp) {
2330 			ret = ENOMEM;
2331 			break;
2332 		}
2333 		memcpy(temp, nvm_buffer, len);
2334 
2335 		nvm_sections[section].data = temp;
2336 		nvm_sections[section].length = len;
2337 	}
2338 	if (!size_read)
2339 		device_printf(sc->sc_dev, "OTP is blank\n");
2340 	free(nvm_buffer, M_DEVBUF);
2341 
2342 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2343 	if (!sc->nvm_data)
2344 		return EINVAL;
2345 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2346 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2347 
2348 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2349 		if (nvm_sections[i].data != NULL)
2350 			free(nvm_sections[i].data, M_DEVBUF);
2351 	}
2352 
2353 	return 0;
2354 }
2355 
2356 static int
2357 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2358 	const struct iwm_fw_desc *section)
2359 {
2360 	struct iwm_dma_info *dma = &sc->fw_dma;
2361 	uint8_t *v_addr;
2362 	bus_addr_t p_addr;
2363 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2364 	int ret = 0;
2365 
2366 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2367 		    "%s: [%d] uCode section being loaded...\n",
2368 		    __func__, section_num);
2369 
2370 	v_addr = dma->vaddr;
2371 	p_addr = dma->paddr;
2372 
2373 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2374 		uint32_t copy_size, dst_addr;
2375 		int extended_addr = FALSE;
2376 
2377 		copy_size = MIN(chunk_sz, section->len - offset);
2378 		dst_addr = section->offset + offset;
2379 
2380 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2381 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2382 			extended_addr = TRUE;
2383 
2384 		if (extended_addr)
2385 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2386 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2387 
2388 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2389 		    copy_size);
2390 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2391 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2392 						   copy_size);
2393 
2394 		if (extended_addr)
2395 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2396 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2397 
2398 		if (ret) {
2399 			device_printf(sc->sc_dev,
2400 			    "%s: Could not load the [%d] uCode section\n",
2401 			    __func__, section_num);
2402 			break;
2403 		}
2404 	}
2405 
2406 	return ret;
2407 }
2408 
2409 /*
2410  * ucode
2411  */
2412 static int
2413 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2414 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2415 {
2416 	int ret;
2417 
2418 	sc->sc_fw_chunk_done = 0;
2419 
2420 	if (!iwm_nic_lock(sc))
2421 		return EBUSY;
2422 
2423 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2424 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2425 
2426 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2427 	    dst_addr);
2428 
2429 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2430 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2431 
2432 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2433 	    (iwm_get_dma_hi_addr(phy_addr)
2434 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2435 
2436 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2437 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2438 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2439 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2440 
2441 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2442 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2443 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2444 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2445 
2446 	iwm_nic_unlock(sc);
2447 
2448 	/* wait up to 5s for this segment to load */
2449 	ret = 0;
2450 	while (!sc->sc_fw_chunk_done) {
2451 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2452 		if (ret)
2453 			break;
2454 	}
2455 
2456 	if (ret != 0) {
2457 		device_printf(sc->sc_dev,
2458 		    "fw chunk addr 0x%x len %d failed to load\n",
2459 		    dst_addr, byte_cnt);
2460 		return ETIMEDOUT;
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 static int
2467 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2468 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2469 {
2470 	int shift_param;
2471 	int i, ret = 0, sec_num = 0x1;
2472 	uint32_t val, last_read_idx = 0;
2473 
2474 	if (cpu == 1) {
2475 		shift_param = 0;
2476 		*first_ucode_section = 0;
2477 	} else {
2478 		shift_param = 16;
2479 		(*first_ucode_section)++;
2480 	}
2481 
2482 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2483 		last_read_idx = i;
2484 
2485 		/*
2486 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2487 		 * CPU1 to CPU2.
2488 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2489 		 * CPU2 non paged to CPU2 paging sec.
2490 		 */
2491 		if (!image->fw_sect[i].data ||
2492 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2493 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2494 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2495 				    "Break since Data not valid or Empty section, sec = %d\n",
2496 				    i);
2497 			break;
2498 		}
2499 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2500 		if (ret)
2501 			return ret;
2502 
2503 		/* Notify the ucode of the loaded section number and status */
2504 		if (iwm_nic_lock(sc)) {
2505 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2506 			val = val | (sec_num << shift_param);
2507 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2508 			sec_num = (sec_num << 1) | 0x1;
2509 			iwm_nic_unlock(sc);
2510 		}
2511 	}
2512 
2513 	*first_ucode_section = last_read_idx;
2514 
2515 	iwm_enable_interrupts(sc);
2516 
2517 	if (iwm_nic_lock(sc)) {
2518 		if (cpu == 1)
2519 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2520 		else
2521 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2522 		iwm_nic_unlock(sc);
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 static int
2529 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2530 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2531 {
2532 	int shift_param;
2533 	int i, ret = 0;
2534 	uint32_t last_read_idx = 0;
2535 
2536 	if (cpu == 1) {
2537 		shift_param = 0;
2538 		*first_ucode_section = 0;
2539 	} else {
2540 		shift_param = 16;
2541 		(*first_ucode_section)++;
2542 	}
2543 
2544 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2545 		last_read_idx = i;
2546 
2547 		/*
2548 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2549 		 * CPU1 to CPU2.
2550 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2551 		 * CPU2 non paged to CPU2 paging sec.
2552 		 */
2553 		if (!image->fw_sect[i].data ||
2554 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2555 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2556 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2557 				    "Break since Data not valid or Empty section, sec = %d\n",
2558 				     i);
2559 			break;
2560 		}
2561 
2562 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2563 		if (ret)
2564 			return ret;
2565 	}
2566 
2567 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2568 		iwm_set_bits_prph(sc,
2569 				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2570 				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2571 				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2572 				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2573 					shift_param);
2574 
2575 	*first_ucode_section = last_read_idx;
2576 
2577 	return 0;
2578 
2579 }
2580 
2581 static int
2582 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2583 	const struct iwm_fw_sects *image)
2584 {
2585 	int ret = 0;
2586 	int first_ucode_section;
2587 
2588 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2589 		     image->is_dual_cpus ? "Dual" : "Single");
2590 
2591 	/* load to FW the binary non secured sections of CPU1 */
2592 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2593 	if (ret)
2594 		return ret;
2595 
2596 	if (image->is_dual_cpus) {
2597 		/* set CPU2 header address */
2598 		if (iwm_nic_lock(sc)) {
2599 			iwm_write_prph(sc,
2600 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2601 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2602 			iwm_nic_unlock(sc);
2603 		}
2604 
2605 		/* load to FW the binary sections of CPU2 */
2606 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2607 						 &first_ucode_section);
2608 		if (ret)
2609 			return ret;
2610 	}
2611 
2612 	iwm_enable_interrupts(sc);
2613 
2614 	/* release CPU reset */
2615 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2616 
2617 	return 0;
2618 }
2619 
2620 int
2621 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2622 	const struct iwm_fw_sects *image)
2623 {
2624 	int ret = 0;
2625 	int first_ucode_section;
2626 
2627 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2628 		    image->is_dual_cpus ? "Dual" : "Single");
2629 
2630 	/* configure the ucode to be ready to get the secured image */
2631 	/* release CPU reset */
2632 	if (iwm_nic_lock(sc)) {
2633 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2634 		    IWM_RELEASE_CPU_RESET_BIT);
2635 		iwm_nic_unlock(sc);
2636 	}
2637 
2638 	/* load to FW the binary Secured sections of CPU1 */
2639 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2640 	    &first_ucode_section);
2641 	if (ret)
2642 		return ret;
2643 
2644 	/* load to FW the binary sections of CPU2 */
2645 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2646 	    &first_ucode_section);
2647 }
2648 
2649 /* XXX Get rid of this definition */
2650 static inline void
2651 iwm_enable_fw_load_int(struct iwm_softc *sc)
2652 {
2653 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2654 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2655 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2656 }
2657 
2658 /* XXX Add proper rfkill support code */
2659 static int
2660 iwm_start_fw(struct iwm_softc *sc,
2661 	const struct iwm_fw_sects *fw)
2662 {
2663 	int ret;
2664 
2665 	/* This may fail if AMT took ownership of the device */
2666 	if (iwm_prepare_card_hw(sc)) {
2667 		device_printf(sc->sc_dev,
2668 		    "%s: Exit HW not ready\n", __func__);
2669 		ret = EIO;
2670 		goto out;
2671 	}
2672 
2673 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2674 
2675 	iwm_disable_interrupts(sc);
2676 
2677 	/* make sure rfkill handshake bits are cleared */
2678 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2679 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2680 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2681 
2682 	/* clear (again), then enable host interrupts */
2683 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2684 
2685 	ret = iwm_nic_init(sc);
2686 	if (ret) {
2687 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2688 		goto out;
2689 	}
2690 
2691 	/*
2692 	 * Now, we load the firmware and don't want to be interrupted, even
2693 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2694 	 * FH_TX interrupt which is needed to load the firmware). If the
2695 	 * RF-Kill switch is toggled, we will find out after having loaded
2696 	 * the firmware and return the proper value to the caller.
2697 	 */
2698 	iwm_enable_fw_load_int(sc);
2699 
2700 	/* really make sure rfkill handshake bits are cleared */
2701 	/* maybe we should write a few times more?  just to make sure */
2702 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2704 
2705 	/* Load the given image to the HW */
2706 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2707 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2708 	else
2709 		ret = iwm_pcie_load_given_ucode(sc, fw);
2710 
2711 	/* XXX re-check RF-Kill state */
2712 
2713 out:
2714 	return ret;
2715 }
2716 
2717 static int
2718 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2719 {
2720 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2721 		.valid = htole32(valid_tx_ant),
2722 	};
2723 
2724 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2725 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2726 }
2727 
2728 /* iwlwifi: mvm/fw.c */
2729 static int
2730 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2731 {
2732 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2733 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2734 
2735 	/* Set parameters */
2736 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2737 	phy_cfg_cmd.calib_control.event_trigger =
2738 	    sc->sc_default_calib[ucode_type].event_trigger;
2739 	phy_cfg_cmd.calib_control.flow_trigger =
2740 	    sc->sc_default_calib[ucode_type].flow_trigger;
2741 
2742 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2743 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2744 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2745 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2746 }
2747 
2748 static int
2749 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2750 {
2751 	struct iwm_mvm_alive_data *alive_data = data;
2752 	struct iwm_mvm_alive_resp_ver1 *palive1;
2753 	struct iwm_mvm_alive_resp_ver2 *palive2;
2754 	struct iwm_mvm_alive_resp *palive;
2755 
2756 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2757 		palive1 = (void *)pkt->data;
2758 
2759 		sc->support_umac_log = FALSE;
2760                 sc->error_event_table =
2761                         le32toh(palive1->error_event_table_ptr);
2762                 sc->log_event_table =
2763                         le32toh(palive1->log_event_table_ptr);
2764                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2765 
2766                 alive_data->valid = le16toh(palive1->status) ==
2767                                     IWM_ALIVE_STATUS_OK;
2768                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2769 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2770 			     le16toh(palive1->status), palive1->ver_type,
2771                              palive1->ver_subtype, palive1->flags);
2772 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2773 		palive2 = (void *)pkt->data;
2774 		sc->error_event_table =
2775 			le32toh(palive2->error_event_table_ptr);
2776 		sc->log_event_table =
2777 			le32toh(palive2->log_event_table_ptr);
2778 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2779 		sc->umac_error_event_table =
2780                         le32toh(palive2->error_info_addr);
2781 
2782 		alive_data->valid = le16toh(palive2->status) ==
2783 				    IWM_ALIVE_STATUS_OK;
2784 		if (sc->umac_error_event_table)
2785 			sc->support_umac_log = TRUE;
2786 
2787 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2788 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2789 			    le16toh(palive2->status), palive2->ver_type,
2790 			    palive2->ver_subtype, palive2->flags);
2791 
2792 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2793 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2794 			    palive2->umac_major, palive2->umac_minor);
2795 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2796 		palive = (void *)pkt->data;
2797 
2798 		sc->error_event_table =
2799 			le32toh(palive->error_event_table_ptr);
2800 		sc->log_event_table =
2801 			le32toh(palive->log_event_table_ptr);
2802 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2803 		sc->umac_error_event_table =
2804 			le32toh(palive->error_info_addr);
2805 
2806 		alive_data->valid = le16toh(palive->status) ==
2807 				    IWM_ALIVE_STATUS_OK;
2808 		if (sc->umac_error_event_table)
2809 			sc->support_umac_log = TRUE;
2810 
2811 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2812 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2813 			    le16toh(palive->status), palive->ver_type,
2814 			    palive->ver_subtype, palive->flags);
2815 
2816 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2817 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2818 			    le32toh(palive->umac_major),
2819 			    le32toh(palive->umac_minor));
2820 	}
2821 
2822 	return TRUE;
2823 }
2824 
2825 static int
2826 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2827 	struct iwm_rx_packet *pkt, void *data)
2828 {
2829 	struct iwm_phy_db *phy_db = data;
2830 
2831 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2832 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2833 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2834 			    __func__, pkt->hdr.code);
2835 		}
2836 		return TRUE;
2837 	}
2838 
2839 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2840 		device_printf(sc->sc_dev,
2841 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2842 	}
2843 
2844 	return FALSE;
2845 }
2846 
2847 static int
2848 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2849 	enum iwm_ucode_type ucode_type)
2850 {
2851 	struct iwm_notification_wait alive_wait;
2852 	struct iwm_mvm_alive_data alive_data;
2853 	const struct iwm_fw_sects *fw;
2854 	enum iwm_ucode_type old_type = sc->cur_ucode;
2855 	int error;
2856 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2857 
2858 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2859 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2860 			error);
2861 		return error;
2862 	}
2863 	fw = &sc->sc_fw.fw_sects[ucode_type];
2864 	sc->cur_ucode = ucode_type;
2865 	sc->ucode_loaded = FALSE;
2866 
2867 	memset(&alive_data, 0, sizeof(alive_data));
2868 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2869 				   alive_cmd, nitems(alive_cmd),
2870 				   iwm_alive_fn, &alive_data);
2871 
2872 	error = iwm_start_fw(sc, fw);
2873 	if (error) {
2874 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2875 		sc->cur_ucode = old_type;
2876 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2877 		return error;
2878 	}
2879 
2880 	/*
2881 	 * Some things may run in the background now, but we
2882 	 * just wait for the ALIVE notification here.
2883 	 */
2884 	IWM_UNLOCK(sc);
2885 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2886 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2887 	IWM_LOCK(sc);
2888 	if (error) {
2889 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2890 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2891 			if (iwm_nic_lock(sc)) {
2892 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2893 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2894 				iwm_nic_unlock(sc);
2895 			}
2896 			device_printf(sc->sc_dev,
2897 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2898 			    a, b);
2899 		}
2900 		sc->cur_ucode = old_type;
2901 		return error;
2902 	}
2903 
2904 	if (!alive_data.valid) {
2905 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2906 		    __func__);
2907 		sc->cur_ucode = old_type;
2908 		return EIO;
2909 	}
2910 
2911 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2912 
2913 	/*
2914 	 * configure and operate fw paging mechanism.
2915 	 * driver configures the paging flow only once, CPU2 paging image
2916 	 * included in the IWM_UCODE_INIT image.
2917 	 */
2918 	if (fw->paging_mem_size) {
2919 		error = iwm_save_fw_paging(sc, fw);
2920 		if (error) {
2921 			device_printf(sc->sc_dev,
2922 			    "%s: failed to save the FW paging image\n",
2923 			    __func__);
2924 			return error;
2925 		}
2926 
2927 		error = iwm_send_paging_cmd(sc, fw);
2928 		if (error) {
2929 			device_printf(sc->sc_dev,
2930 			    "%s: failed to send the paging cmd\n", __func__);
2931 			iwm_free_fw_paging(sc);
2932 			return error;
2933 		}
2934 	}
2935 
2936 	if (!error)
2937 		sc->ucode_loaded = TRUE;
2938 	return error;
2939 }
2940 
2941 /*
2942  * mvm misc bits
2943  */
2944 
2945 /*
2946  * follows iwlwifi/fw.c
2947  */
2948 static int
2949 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2950 {
2951 	struct iwm_notification_wait calib_wait;
2952 	static const uint16_t init_complete[] = {
2953 		IWM_INIT_COMPLETE_NOTIF,
2954 		IWM_CALIB_RES_NOTIF_PHY_DB
2955 	};
2956 	int ret;
2957 
2958 	/* do not operate with rfkill switch turned on */
2959 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2960 		device_printf(sc->sc_dev,
2961 		    "radio is disabled by hardware switch\n");
2962 		return EPERM;
2963 	}
2964 
2965 	iwm_init_notification_wait(sc->sc_notif_wait,
2966 				   &calib_wait,
2967 				   init_complete,
2968 				   nitems(init_complete),
2969 				   iwm_wait_phy_db_entry,
2970 				   sc->sc_phy_db);
2971 
2972 	/* Will also start the device */
2973 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2974 	if (ret) {
2975 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2976 		    ret);
2977 		goto error;
2978 	}
2979 
2980 	if (justnvm) {
2981 		/* Read nvm */
2982 		ret = iwm_nvm_init(sc);
2983 		if (ret) {
2984 			device_printf(sc->sc_dev, "failed to read nvm\n");
2985 			goto error;
2986 		}
2987 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2988 		goto error;
2989 	}
2990 
2991 	ret = iwm_send_bt_init_conf(sc);
2992 	if (ret) {
2993 		device_printf(sc->sc_dev,
2994 		    "failed to send bt coex configuration: %d\n", ret);
2995 		goto error;
2996 	}
2997 
2998 	/* Init Smart FIFO. */
2999 	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3000 	if (ret)
3001 		goto error;
3002 
3003 	/* Send TX valid antennas before triggering calibrations */
3004 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3005 	if (ret) {
3006 		device_printf(sc->sc_dev,
3007 		    "failed to send antennas before calibration: %d\n", ret);
3008 		goto error;
3009 	}
3010 
3011 	/*
3012 	 * Send phy configurations command to init uCode
3013 	 * to start the 16.0 uCode init image internal calibrations.
3014 	 */
3015 	ret = iwm_send_phy_cfg_cmd(sc);
3016 	if (ret) {
3017 		device_printf(sc->sc_dev,
3018 		    "%s: Failed to run INIT calibrations: %d\n",
3019 		    __func__, ret);
3020 		goto error;
3021 	}
3022 
3023 	/*
3024 	 * Nothing to do but wait for the init complete notification
3025 	 * from the firmware.
3026 	 */
3027 	IWM_UNLOCK(sc);
3028 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3029 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3030 	IWM_LOCK(sc);
3031 
3032 
3033 	goto out;
3034 
3035 error:
3036 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3037 out:
3038 	return ret;
3039 }
3040 
3041 /*
3042  * receive side
3043  */
3044 
3045 /* (re)stock rx ring, called at init-time and at runtime */
3046 static int
3047 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3048 {
3049 	struct iwm_rx_ring *ring = &sc->rxq;
3050 	struct iwm_rx_data *data = &ring->data[idx];
3051 	struct mbuf *m;
3052 	bus_dmamap_t dmamap;
3053 	bus_dma_segment_t seg;
3054 	int nsegs, error;
3055 
3056 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3057 	if (m == NULL)
3058 		return ENOBUFS;
3059 
3060 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3061 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3062 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3063 	if (error != 0) {
3064 		device_printf(sc->sc_dev,
3065 		    "%s: can't map mbuf, error %d\n", __func__, error);
3066 		m_freem(m);
3067 		return error;
3068 	}
3069 
3070 	if (data->m != NULL)
3071 		bus_dmamap_unload(ring->data_dmat, data->map);
3072 
3073 	/* Swap ring->spare_map with data->map */
3074 	dmamap = data->map;
3075 	data->map = ring->spare_map;
3076 	ring->spare_map = dmamap;
3077 
3078 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3079 	data->m = m;
3080 
3081 	/* Update RX descriptor. */
3082 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3083 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3084 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3085 	    BUS_DMASYNC_PREWRITE);
3086 
3087 	return 0;
3088 }
3089 
3090 /* iwlwifi: mvm/rx.c */
3091 /*
3092  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3093  * values are reported by the fw as positive values - need to negate
3094  * to obtain their dBM.  Account for missing antennas by replacing 0
3095  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3096  */
3097 static int
3098 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3099 {
3100 	int energy_a, energy_b, energy_c, max_energy;
3101 	uint32_t val;
3102 
3103 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3104 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3105 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3106 	energy_a = energy_a ? -energy_a : -256;
3107 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3108 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3109 	energy_b = energy_b ? -energy_b : -256;
3110 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3111 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3112 	energy_c = energy_c ? -energy_c : -256;
3113 	max_energy = MAX(energy_a, energy_b);
3114 	max_energy = MAX(max_energy, energy_c);
3115 
3116 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3117 	    "energy In A %d B %d C %d , and max %d\n",
3118 	    energy_a, energy_b, energy_c, max_energy);
3119 
3120 	return max_energy;
3121 }
3122 
3123 static void
3124 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3125 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3126 {
3127 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3128 
3129 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3130 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3131 
3132 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3133 }
3134 
3135 /*
3136  * Retrieve the average noise (in dBm) among receivers.
3137  */
3138 static int
3139 iwm_get_noise(struct iwm_softc *sc,
3140     const struct iwm_mvm_statistics_rx_non_phy *stats)
3141 {
3142 	int i, total, nbant, noise;
3143 
3144 	total = nbant = noise = 0;
3145 	for (i = 0; i < 3; i++) {
3146 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3147 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3148 		    __func__,
3149 		    i,
3150 		    noise);
3151 
3152 		if (noise) {
3153 			total += noise;
3154 			nbant++;
3155 		}
3156 	}
3157 
3158 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3159 	    __func__, nbant, total);
3160 #if 0
3161 	/* There should be at least one antenna but check anyway. */
3162 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3163 #else
3164 	/* For now, just hard-code it to -96 to be safe */
3165 	return (-96);
3166 #endif
3167 }
3168 
3169 /*
3170  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3171  *
3172  * Handles the actual data of the Rx packet from the fw
3173  */
3174 static void
3175 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3176 {
3177 	struct ieee80211com *ic = &sc->sc_ic;
3178 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3179 	struct ieee80211_frame *wh;
3180 	struct ieee80211_node *ni;
3181 	struct ieee80211_rx_stats rxs;
3182 	struct iwm_rx_phy_info *phy_info;
3183 	struct iwm_rx_mpdu_res_start *rx_res;
3184 	struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3185 	uint32_t len;
3186 	uint32_t rx_pkt_status;
3187 	int rssi;
3188 
3189 	phy_info = &sc->sc_last_phy_info;
3190 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3191 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3192 	len = le16toh(rx_res->byte_count);
3193 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3194 
3195 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3196 		device_printf(sc->sc_dev,
3197 		    "dsp size out of range [0,20]: %d\n",
3198 		    phy_info->cfg_phy_cnt);
3199 		goto fail;
3200 	}
3201 
3202 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3203 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3204 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3205 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3206 		goto fail;
3207 	}
3208 
3209 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3210 
3211 	/* Map it to relative value */
3212 	rssi = rssi - sc->sc_noise;
3213 
3214 	/* replenish ring for the buffer we're going to feed to the sharks */
3215 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3216 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3217 		    __func__);
3218 		goto fail;
3219 	}
3220 
3221 	m->m_data = pkt->data + sizeof(*rx_res);
3222 	m->m_pkthdr.len = m->m_len = len;
3223 
3224 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3225 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3226 
3227 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3228 
3229 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3230 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3231 	    __func__,
3232 	    le16toh(phy_info->channel),
3233 	    le16toh(phy_info->phy_flags));
3234 
3235 	/*
3236 	 * Populate an RX state struct with the provided information.
3237 	 */
3238 	bzero(&rxs, sizeof(rxs));
3239 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3240 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3241 	rxs.c_ieee = le16toh(phy_info->channel);
3242 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3243 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3244 	} else {
3245 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3246 	}
3247 
3248 	/* rssi is in 1/2db units */
3249 	rxs.c_rssi = rssi * 2;
3250 	rxs.c_nf = sc->sc_noise;
3251 	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3252 		if (ni)
3253 			ieee80211_free_node(ni);
3254 		goto fail;
3255 	}
3256 
3257 	if (ieee80211_radiotap_active_vap(vap)) {
3258 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3259 
3260 		tap->wr_flags = 0;
3261 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3262 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3263 		tap->wr_chan_freq = htole16(rxs.c_freq);
3264 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3265 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3266 		tap->wr_dbm_antsignal = (int8_t)rssi;
3267 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3268 		tap->wr_tsft = phy_info->system_timestamp;
3269 		switch (phy_info->rate) {
3270 		/* CCK rates. */
3271 		case  10: tap->wr_rate =   2; break;
3272 		case  20: tap->wr_rate =   4; break;
3273 		case  55: tap->wr_rate =  11; break;
3274 		case 110: tap->wr_rate =  22; break;
3275 		/* OFDM rates. */
3276 		case 0xd: tap->wr_rate =  12; break;
3277 		case 0xf: tap->wr_rate =  18; break;
3278 		case 0x5: tap->wr_rate =  24; break;
3279 		case 0x7: tap->wr_rate =  36; break;
3280 		case 0x9: tap->wr_rate =  48; break;
3281 		case 0xb: tap->wr_rate =  72; break;
3282 		case 0x1: tap->wr_rate =  96; break;
3283 		case 0x3: tap->wr_rate = 108; break;
3284 		/* Unknown rate: should not happen. */
3285 		default:  tap->wr_rate =   0;
3286 		}
3287 	}
3288 
3289 	IWM_UNLOCK(sc);
3290 	if (ni != NULL) {
3291 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3292 		ieee80211_input_mimo(ni, m);
3293 		ieee80211_free_node(ni);
3294 	} else {
3295 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3296 		ieee80211_input_mimo_all(ic, m);
3297 	}
3298 	IWM_LOCK(sc);
3299 
3300 	return;
3301 
3302 fail:
3303 	counter_u64_add(ic->ic_ierrors, 1);
3304 }
3305 
3306 static int
3307 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3308 	struct iwm_node *in)
3309 {
3310 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3311 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3312 	struct ieee80211_node *ni = &in->in_ni;
3313 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3314 
3315 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3316 
3317 	/* Update rate control statistics. */
3318 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3319 	    __func__,
3320 	    (int) le16toh(tx_resp->status.status),
3321 	    (int) le16toh(tx_resp->status.sequence),
3322 	    tx_resp->frame_count,
3323 	    tx_resp->bt_kill_count,
3324 	    tx_resp->failure_rts,
3325 	    tx_resp->failure_frame,
3326 	    le32toh(tx_resp->initial_rate),
3327 	    (int) le16toh(tx_resp->wireless_media_time));
3328 
3329 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3330 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3331 	txs->short_retries = tx_resp->failure_rts;
3332 	txs->long_retries = tx_resp->failure_frame;
3333 	if (status != IWM_TX_STATUS_SUCCESS &&
3334 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3335 		switch (status) {
3336 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3337 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3338 			break;
3339 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3340 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3341 			break;
3342 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3343 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3344 			break;
3345 		default:
3346 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3347 			break;
3348 		}
3349 	} else {
3350 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3351 	}
3352 	ieee80211_ratectl_tx_complete(ni, txs);
3353 
3354 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3355 }
3356 
3357 static void
3358 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3359 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3360 {
3361 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3362 	int idx = cmd_hdr->idx;
3363 	int qid = cmd_hdr->qid;
3364 	struct iwm_tx_ring *ring = &sc->txq[qid];
3365 	struct iwm_tx_data *txd = &ring->data[idx];
3366 	struct iwm_node *in = txd->in;
3367 	struct mbuf *m = txd->m;
3368 	int status;
3369 
3370 	KASSERT(txd->done == 0, ("txd not done"));
3371 	KASSERT(txd->in != NULL, ("txd without node"));
3372 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3373 
3374 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3375 
3376 	sc->sc_tx_timer = 0;
3377 
3378 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3379 
3380 	/* Unmap and free mbuf. */
3381 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3382 	bus_dmamap_unload(ring->data_dmat, txd->map);
3383 
3384 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3385 	    "free txd %p, in %p\n", txd, txd->in);
3386 	txd->done = 1;
3387 	txd->m = NULL;
3388 	txd->in = NULL;
3389 
3390 	ieee80211_tx_complete(&in->in_ni, m, status);
3391 
3392 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3393 		sc->qfullmsk &= ~(1 << ring->qid);
3394 		if (sc->qfullmsk == 0) {
3395 			iwm_start(sc);
3396 		}
3397 	}
3398 }
3399 
3400 /*
3401  * transmit side
3402  */
3403 
3404 /*
3405  * Process a "command done" firmware notification.  This is where we wakeup
3406  * processes waiting for a synchronous command completion.
3407  * from if_iwn
3408  */
3409 static void
3410 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3411 {
3412 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3413 	struct iwm_tx_data *data;
3414 
3415 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3416 		return;	/* Not a command ack. */
3417 	}
3418 
3419 	/* XXX wide commands? */
3420 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3421 	    "cmd notification type 0x%x qid %d idx %d\n",
3422 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3423 
3424 	data = &ring->data[pkt->hdr.idx];
3425 
3426 	/* If the command was mapped in an mbuf, free it. */
3427 	if (data->m != NULL) {
3428 		bus_dmamap_sync(ring->data_dmat, data->map,
3429 		    BUS_DMASYNC_POSTWRITE);
3430 		bus_dmamap_unload(ring->data_dmat, data->map);
3431 		m_freem(data->m);
3432 		data->m = NULL;
3433 	}
3434 	wakeup(&ring->desc[pkt->hdr.idx]);
3435 
3436 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3437 		device_printf(sc->sc_dev,
3438 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3439 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3440 		/* XXX call iwm_force_nmi() */
3441 	}
3442 
3443 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3444 	ring->queued--;
3445 	if (ring->queued == 0)
3446 		iwm_pcie_clear_cmd_in_flight(sc);
3447 }
3448 
3449 #if 0
3450 /*
3451  * necessary only for block ack mode
3452  */
3453 void
3454 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3455 	uint16_t len)
3456 {
3457 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3458 	uint16_t w_val;
3459 
3460 	scd_bc_tbl = sc->sched_dma.vaddr;
3461 
3462 	len += 8; /* magic numbers came naturally from paris */
3463 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3464 		len = roundup(len, 4) / 4;
3465 
3466 	w_val = htole16(sta_id << 12 | len);
3467 
3468 	/* Update TX scheduler. */
3469 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3470 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3471 	    BUS_DMASYNC_PREWRITE);
3472 
3473 	/* I really wonder what this is ?!? */
3474 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3475 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3476 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3477 		    BUS_DMASYNC_PREWRITE);
3478 	}
3479 }
3480 #endif
3481 
3482 /*
3483  * Take an 802.11 (non-n) rate, find the relevant rate
3484  * table entry.  return the index into in_ridx[].
3485  *
3486  * The caller then uses that index back into in_ridx
3487  * to figure out the rate index programmed /into/
3488  * the firmware for this given node.
3489  */
3490 static int
3491 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3492     uint8_t rate)
3493 {
3494 	int i;
3495 	uint8_t r;
3496 
3497 	for (i = 0; i < nitems(in->in_ridx); i++) {
3498 		r = iwm_rates[in->in_ridx[i]].rate;
3499 		if (rate == r)
3500 			return (i);
3501 	}
3502 
3503 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3504 	    "%s: couldn't find an entry for rate=%d\n",
3505 	    __func__,
3506 	    rate);
3507 
3508 	/* XXX Return the first */
3509 	/* XXX TODO: have it return the /lowest/ */
3510 	return (0);
3511 }
3512 
3513 static int
3514 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3515 {
3516 	int i;
3517 
3518 	for (i = 0; i < nitems(iwm_rates); i++) {
3519 		if (iwm_rates[i].rate == rate)
3520 			return (i);
3521 	}
3522 	/* XXX error? */
3523 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3524 	    "%s: couldn't find an entry for rate=%d\n",
3525 	    __func__,
3526 	    rate);
3527 	return (0);
3528 }
3529 
3530 /*
3531  * Fill in the rate related information for a transmit command.
3532  */
3533 static const struct iwm_rate *
3534 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3535 	struct mbuf *m, struct iwm_tx_cmd *tx)
3536 {
3537 	struct ieee80211_node *ni = &in->in_ni;
3538 	struct ieee80211_frame *wh;
3539 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3540 	const struct iwm_rate *rinfo;
3541 	int type;
3542 	int ridx, rate_flags;
3543 
3544 	wh = mtod(m, struct ieee80211_frame *);
3545 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3546 
3547 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3548 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3549 
3550 	if (type == IEEE80211_FC0_TYPE_MGT ||
3551 	    type == IEEE80211_FC0_TYPE_CTL ||
3552 	    (m->m_flags & M_EAPOL) != 0) {
3553 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3554 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3555 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3556 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3557 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3558 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3559 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3560 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3561 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3562 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3563 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3564 	} else {
3565 		int i;
3566 
3567 		/* for data frames, use RS table */
3568 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3569 		/* XXX pass pktlen */
3570 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3571 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3572 		ridx = in->in_ridx[i];
3573 
3574 		/* This is the index into the programmed table */
3575 		tx->initial_rate_index = i;
3576 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3577 
3578 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3579 		    "%s: start with i=%d, txrate %d\n",
3580 		    __func__, i, iwm_rates[ridx].rate);
3581 	}
3582 
3583 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3584 	    "%s: frame type=%d txrate %d\n",
3585 	        __func__, type, iwm_rates[ridx].rate);
3586 
3587 	rinfo = &iwm_rates[ridx];
3588 
3589 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3590 	    __func__, ridx,
3591 	    rinfo->rate,
3592 	    !! (IWM_RIDX_IS_CCK(ridx))
3593 	    );
3594 
3595 	/* XXX TODO: hard-coded TX antenna? */
3596 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3597 	if (IWM_RIDX_IS_CCK(ridx))
3598 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3599 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3600 
3601 	return rinfo;
3602 }
3603 
3604 #define TB0_SIZE 16
3605 static int
3606 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3607 {
3608 	struct ieee80211com *ic = &sc->sc_ic;
3609 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3610 	struct iwm_node *in = IWM_NODE(ni);
3611 	struct iwm_tx_ring *ring;
3612 	struct iwm_tx_data *data;
3613 	struct iwm_tfd *desc;
3614 	struct iwm_device_cmd *cmd;
3615 	struct iwm_tx_cmd *tx;
3616 	struct ieee80211_frame *wh;
3617 	struct ieee80211_key *k = NULL;
3618 	struct mbuf *m1;
3619 	const struct iwm_rate *rinfo;
3620 	uint32_t flags;
3621 	u_int hdrlen;
3622 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3623 	int nsegs;
3624 	uint8_t tid, type;
3625 	int i, totlen, error, pad;
3626 
3627 	wh = mtod(m, struct ieee80211_frame *);
3628 	hdrlen = ieee80211_anyhdrsize(wh);
3629 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3630 	tid = 0;
3631 	ring = &sc->txq[ac];
3632 	desc = &ring->desc[ring->cur];
3633 	memset(desc, 0, sizeof(*desc));
3634 	data = &ring->data[ring->cur];
3635 
3636 	/* Fill out iwm_tx_cmd to send to the firmware */
3637 	cmd = &ring->cmd[ring->cur];
3638 	cmd->hdr.code = IWM_TX_CMD;
3639 	cmd->hdr.flags = 0;
3640 	cmd->hdr.qid = ring->qid;
3641 	cmd->hdr.idx = ring->cur;
3642 
3643 	tx = (void *)cmd->data;
3644 	memset(tx, 0, sizeof(*tx));
3645 
3646 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3647 
3648 	/* Encrypt the frame if need be. */
3649 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3650 		/* Retrieve key for TX && do software encryption. */
3651 		k = ieee80211_crypto_encap(ni, m);
3652 		if (k == NULL) {
3653 			m_freem(m);
3654 			return (ENOBUFS);
3655 		}
3656 		/* 802.11 header may have moved. */
3657 		wh = mtod(m, struct ieee80211_frame *);
3658 	}
3659 
3660 	if (ieee80211_radiotap_active_vap(vap)) {
3661 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3662 
3663 		tap->wt_flags = 0;
3664 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3665 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3666 		tap->wt_rate = rinfo->rate;
3667 		if (k != NULL)
3668 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3669 		ieee80211_radiotap_tx(vap, m);
3670 	}
3671 
3672 
3673 	totlen = m->m_pkthdr.len;
3674 
3675 	flags = 0;
3676 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3677 		flags |= IWM_TX_CMD_FLG_ACK;
3678 	}
3679 
3680 	if (type == IEEE80211_FC0_TYPE_DATA
3681 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3682 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3683 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3684 	}
3685 
3686 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3687 	    type != IEEE80211_FC0_TYPE_DATA)
3688 		tx->sta_id = sc->sc_aux_sta.sta_id;
3689 	else
3690 		tx->sta_id = IWM_STATION_ID;
3691 
3692 	if (type == IEEE80211_FC0_TYPE_MGT) {
3693 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3694 
3695 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3696 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3697 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3698 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3699 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3700 		} else {
3701 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3702 		}
3703 	} else {
3704 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3705 	}
3706 
3707 	if (hdrlen & 3) {
3708 		/* First segment length must be a multiple of 4. */
3709 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3710 		pad = 4 - (hdrlen & 3);
3711 	} else
3712 		pad = 0;
3713 
3714 	tx->driver_txop = 0;
3715 	tx->next_frame_len = 0;
3716 
3717 	tx->len = htole16(totlen);
3718 	tx->tid_tspec = tid;
3719 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3720 
3721 	/* Set physical address of "scratch area". */
3722 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3723 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3724 
3725 	/* Copy 802.11 header in TX command. */
3726 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3727 
3728 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3729 
3730 	tx->sec_ctl = 0;
3731 	tx->tx_flags |= htole32(flags);
3732 
3733 	/* Trim 802.11 header. */
3734 	m_adj(m, hdrlen);
3735 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3736 	    segs, &nsegs, BUS_DMA_NOWAIT);
3737 	if (error != 0) {
3738 		if (error != EFBIG) {
3739 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3740 			    error);
3741 			m_freem(m);
3742 			return error;
3743 		}
3744 		/* Too many DMA segments, linearize mbuf. */
3745 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3746 		if (m1 == NULL) {
3747 			device_printf(sc->sc_dev,
3748 			    "%s: could not defrag mbuf\n", __func__);
3749 			m_freem(m);
3750 			return (ENOBUFS);
3751 		}
3752 		m = m1;
3753 
3754 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3755 		    segs, &nsegs, BUS_DMA_NOWAIT);
3756 		if (error != 0) {
3757 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3758 			    error);
3759 			m_freem(m);
3760 			return error;
3761 		}
3762 	}
3763 	data->m = m;
3764 	data->in = in;
3765 	data->done = 0;
3766 
3767 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3768 	    "sending txd %p, in %p\n", data, data->in);
3769 	KASSERT(data->in != NULL, ("node is NULL"));
3770 
3771 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3772 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3773 	    ring->qid, ring->cur, totlen, nsegs,
3774 	    le32toh(tx->tx_flags),
3775 	    le32toh(tx->rate_n_flags),
3776 	    tx->initial_rate_index
3777 	    );
3778 
3779 	/* Fill TX descriptor. */
3780 	desc->num_tbs = 2 + nsegs;
3781 
3782 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3783 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3784 	    (TB0_SIZE << 4);
3785 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3786 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3787 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3788 	      + hdrlen + pad - TB0_SIZE) << 4);
3789 
3790 	/* Other DMA segments are for data payload. */
3791 	for (i = 0; i < nsegs; i++) {
3792 		seg = &segs[i];
3793 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3794 		desc->tbs[i+2].hi_n_len = \
3795 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3796 		    | ((seg->ds_len) << 4);
3797 	}
3798 
3799 	bus_dmamap_sync(ring->data_dmat, data->map,
3800 	    BUS_DMASYNC_PREWRITE);
3801 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3802 	    BUS_DMASYNC_PREWRITE);
3803 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3804 	    BUS_DMASYNC_PREWRITE);
3805 
3806 #if 0
3807 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3808 #endif
3809 
3810 	/* Kick TX ring. */
3811 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3812 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3813 
3814 	/* Mark TX ring as full if we reach a certain threshold. */
3815 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3816 		sc->qfullmsk |= 1 << ring->qid;
3817 	}
3818 
3819 	return 0;
3820 }
3821 
3822 static int
3823 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3824     const struct ieee80211_bpf_params *params)
3825 {
3826 	struct ieee80211com *ic = ni->ni_ic;
3827 	struct iwm_softc *sc = ic->ic_softc;
3828 	int error = 0;
3829 
3830 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3831 	    "->%s begin\n", __func__);
3832 
3833 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3834 		m_freem(m);
3835 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3836 		    "<-%s not RUNNING\n", __func__);
3837 		return (ENETDOWN);
3838         }
3839 
3840 	IWM_LOCK(sc);
3841 	/* XXX fix this */
3842         if (params == NULL) {
3843 		error = iwm_tx(sc, m, ni, 0);
3844 	} else {
3845 		error = iwm_tx(sc, m, ni, 0);
3846 	}
3847 	sc->sc_tx_timer = 5;
3848 	IWM_UNLOCK(sc);
3849 
3850         return (error);
3851 }
3852 
3853 /*
3854  * mvm/tx.c
3855  */
3856 
3857 /*
3858  * Note that there are transports that buffer frames before they reach
3859  * the firmware. This means that after flush_tx_path is called, the
3860  * queue might not be empty. The race-free way to handle this is to:
3861  * 1) set the station as draining
3862  * 2) flush the Tx path
3863  * 3) wait for the transport queues to be empty
3864  */
3865 int
3866 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3867 {
3868 	int ret;
3869 	struct iwm_tx_path_flush_cmd flush_cmd = {
3870 		.queues_ctl = htole32(tfd_msk),
3871 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3872 	};
3873 
3874 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3875 	    sizeof(flush_cmd), &flush_cmd);
3876 	if (ret)
3877                 device_printf(sc->sc_dev,
3878 		    "Flushing tx queue failed: %d\n", ret);
3879 	return ret;
3880 }
3881 
3882 /*
3883  * BEGIN mvm/sta.c
3884  */
3885 
3886 static int
3887 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3888 	struct iwm_mvm_add_sta_cmd *cmd, int *status)
3889 {
3890 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3891 	    cmd, status);
3892 }
3893 
3894 /* send station add/update command to firmware */
3895 static int
3896 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3897 {
3898 	struct iwm_mvm_add_sta_cmd add_sta_cmd;
3899 	int ret;
3900 	uint32_t status;
3901 
3902 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3903 
3904 	add_sta_cmd.sta_id = IWM_STATION_ID;
3905 	add_sta_cmd.mac_id_n_color
3906 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3907 	        IWM_DEFAULT_COLOR));
3908 	if (!update) {
3909 		int ac;
3910 		for (ac = 0; ac < WME_NUM_AC; ac++) {
3911 			add_sta_cmd.tfd_queue_msk |=
3912 			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3913 		}
3914 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3915 	}
3916 	add_sta_cmd.add_modify = update ? 1 : 0;
3917 	add_sta_cmd.station_flags_msk
3918 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3919 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3920 	if (update)
3921 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3922 
3923 	status = IWM_ADD_STA_SUCCESS;
3924 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3925 	if (ret)
3926 		return ret;
3927 
3928 	switch (status) {
3929 	case IWM_ADD_STA_SUCCESS:
3930 		break;
3931 	default:
3932 		ret = EIO;
3933 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3934 		break;
3935 	}
3936 
3937 	return ret;
3938 }
3939 
3940 static int
3941 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3942 {
3943 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3944 }
3945 
3946 static int
3947 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3948 {
3949 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3950 }
3951 
3952 static int
3953 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3954 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3955 {
3956 	struct iwm_mvm_add_sta_cmd cmd;
3957 	int ret;
3958 	uint32_t status;
3959 
3960 	memset(&cmd, 0, sizeof(cmd));
3961 	cmd.sta_id = sta->sta_id;
3962 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3963 
3964 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3965 	cmd.tid_disable_tx = htole16(0xffff);
3966 
3967 	if (addr)
3968 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3969 
3970 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3971 	if (ret)
3972 		return ret;
3973 
3974 	switch (status) {
3975 	case IWM_ADD_STA_SUCCESS:
3976 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3977 		    "%s: Internal station added.\n", __func__);
3978 		return 0;
3979 	default:
3980 		device_printf(sc->sc_dev,
3981 		    "%s: Add internal station failed, status=0x%x\n",
3982 		    __func__, status);
3983 		ret = EIO;
3984 		break;
3985 	}
3986 	return ret;
3987 }
3988 
3989 static int
3990 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3991 {
3992 	int ret;
3993 
3994 	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3995 	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3996 
3997 	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3998 	if (ret)
3999 		return ret;
4000 
4001 	ret = iwm_mvm_add_int_sta_common(sc,
4002 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4003 
4004 	if (ret)
4005 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4006 	return ret;
4007 }
4008 
4009 /*
4010  * END mvm/sta.c
4011  */
4012 
4013 /*
4014  * BEGIN mvm/quota.c
4015  */
4016 
4017 static int
4018 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4019 {
4020 	struct iwm_time_quota_cmd cmd;
4021 	int i, idx, ret, num_active_macs, quota, quota_rem;
4022 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4023 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4024 	uint16_t id;
4025 
4026 	memset(&cmd, 0, sizeof(cmd));
4027 
4028 	/* currently, PHY ID == binding ID */
4029 	if (ivp) {
4030 		id = ivp->phy_ctxt->id;
4031 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4032 		colors[id] = ivp->phy_ctxt->color;
4033 
4034 		if (1)
4035 			n_ifs[id] = 1;
4036 	}
4037 
4038 	/*
4039 	 * The FW's scheduling session consists of
4040 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4041 	 * equally between all the bindings that require quota
4042 	 */
4043 	num_active_macs = 0;
4044 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4045 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4046 		num_active_macs += n_ifs[i];
4047 	}
4048 
4049 	quota = 0;
4050 	quota_rem = 0;
4051 	if (num_active_macs) {
4052 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4053 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4054 	}
4055 
4056 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4057 		if (colors[i] < 0)
4058 			continue;
4059 
4060 		cmd.quotas[idx].id_and_color =
4061 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4062 
4063 		if (n_ifs[i] <= 0) {
4064 			cmd.quotas[idx].quota = htole32(0);
4065 			cmd.quotas[idx].max_duration = htole32(0);
4066 		} else {
4067 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4068 			cmd.quotas[idx].max_duration = htole32(0);
4069 		}
4070 		idx++;
4071 	}
4072 
4073 	/* Give the remainder of the session to the first binding */
4074 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4075 
4076 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4077 	    sizeof(cmd), &cmd);
4078 	if (ret)
4079 		device_printf(sc->sc_dev,
4080 		    "%s: Failed to send quota: %d\n", __func__, ret);
4081 	return ret;
4082 }
4083 
4084 /*
4085  * END mvm/quota.c
4086  */
4087 
4088 /*
4089  * ieee80211 routines
4090  */
4091 
4092 /*
4093  * Change to AUTH state in 80211 state machine.  Roughly matches what
4094  * Linux does in bss_info_changed().
4095  */
4096 static int
4097 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4098 {
4099 	struct ieee80211_node *ni;
4100 	struct iwm_node *in;
4101 	struct iwm_vap *iv = IWM_VAP(vap);
4102 	uint32_t duration;
4103 	int error;
4104 
4105 	/*
4106 	 * XXX i have a feeling that the vap node is being
4107 	 * freed from underneath us. Grr.
4108 	 */
4109 	ni = ieee80211_ref_node(vap->iv_bss);
4110 	in = IWM_NODE(ni);
4111 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4112 	    "%s: called; vap=%p, bss ni=%p\n",
4113 	    __func__,
4114 	    vap,
4115 	    ni);
4116 
4117 	in->in_assoc = 0;
4118 
4119 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4120 	if (error != 0)
4121 		return error;
4122 
4123 	error = iwm_allow_mcast(vap, sc);
4124 	if (error) {
4125 		device_printf(sc->sc_dev,
4126 		    "%s: failed to set multicast\n", __func__);
4127 		goto out;
4128 	}
4129 
4130 	/*
4131 	 * This is where it deviates from what Linux does.
4132 	 *
4133 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4134 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4135 	 * and always does a mac_ctx_changed().
4136 	 *
4137 	 * The openbsd port doesn't attempt to do that - it reset things
4138 	 * at odd states and does the add here.
4139 	 *
4140 	 * So, until the state handling is fixed (ie, we never reset
4141 	 * the NIC except for a firmware failure, which should drag
4142 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4143 	 * contexts that are required), let's do a dirty hack here.
4144 	 */
4145 	if (iv->is_uploaded) {
4146 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4147 			device_printf(sc->sc_dev,
4148 			    "%s: failed to update MAC\n", __func__);
4149 			goto out;
4150 		}
4151 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4152 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4153 			device_printf(sc->sc_dev,
4154 			    "%s: failed update phy ctxt\n", __func__);
4155 			goto out;
4156 		}
4157 		iv->phy_ctxt = &sc->sc_phyctxt[0];
4158 
4159 		if ((error = iwm_mvm_binding_update(sc, iv)) != 0) {
4160 			device_printf(sc->sc_dev,
4161 			    "%s: binding update cmd\n", __func__);
4162 			goto out;
4163 		}
4164 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4165 			device_printf(sc->sc_dev,
4166 			    "%s: failed to update sta\n", __func__);
4167 			goto out;
4168 		}
4169 	} else {
4170 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4171 			device_printf(sc->sc_dev,
4172 			    "%s: failed to add MAC\n", __func__);
4173 			goto out;
4174 		}
4175 		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4176 			device_printf(sc->sc_dev,
4177 			    "%s: failed to update power management\n",
4178 			    __func__);
4179 			goto out;
4180 		}
4181 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4182 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4183 			device_printf(sc->sc_dev,
4184 			    "%s: failed add phy ctxt!\n", __func__);
4185 			error = ETIMEDOUT;
4186 			goto out;
4187 		}
4188 		iv->phy_ctxt = &sc->sc_phyctxt[0];
4189 
4190 		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4191 			device_printf(sc->sc_dev,
4192 			    "%s: binding add cmd\n", __func__);
4193 			goto out;
4194 		}
4195 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4196 			device_printf(sc->sc_dev,
4197 			    "%s: failed to add sta\n", __func__);
4198 			goto out;
4199 		}
4200 	}
4201 
4202 	/*
4203 	 * Prevent the FW from wandering off channel during association
4204 	 * by "protecting" the session with a time event.
4205 	 */
4206 	/* XXX duration is in units of TU, not MS */
4207 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4208 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4209 	DELAY(100);
4210 
4211 	error = 0;
4212 out:
4213 	ieee80211_free_node(ni);
4214 	return (error);
4215 }
4216 
4217 static int
4218 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4219 {
4220 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4221 	int error;
4222 
4223 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4224 		device_printf(sc->sc_dev,
4225 		    "%s: failed to update STA\n", __func__);
4226 		return error;
4227 	}
4228 
4229 	in->in_assoc = 1;
4230 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4231 		device_printf(sc->sc_dev,
4232 		    "%s: failed to update MAC\n", __func__);
4233 		return error;
4234 	}
4235 
4236 	return 0;
4237 }
4238 
4239 static int
4240 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4241 {
4242 	uint32_t tfd_msk;
4243 
4244 	/*
4245 	 * Ok, so *technically* the proper set of calls for going
4246 	 * from RUN back to SCAN is:
4247 	 *
4248 	 * iwm_mvm_power_mac_disable(sc, in);
4249 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4250 	 * iwm_mvm_rm_sta(sc, in);
4251 	 * iwm_mvm_update_quotas(sc, NULL);
4252 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4253 	 * iwm_mvm_binding_remove_vif(sc, in);
4254 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4255 	 *
4256 	 * However, that freezes the device not matter which permutations
4257 	 * and modifications are attempted.  Obviously, this driver is missing
4258 	 * something since it works in the Linux driver, but figuring out what
4259 	 * is missing is a little more complicated.  Now, since we're going
4260 	 * back to nothing anyway, we'll just do a complete device reset.
4261 	 * Up your's, device!
4262 	 */
4263 	/*
4264 	 * Just using 0xf for the queues mask is fine as long as we only
4265 	 * get here from RUN state.
4266 	 */
4267 	tfd_msk = 0xf;
4268 	mbufq_drain(&sc->sc_snd);
4269 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4270 	/*
4271 	 * We seem to get away with just synchronously sending the
4272 	 * IWM_TXPATH_FLUSH command.
4273 	 */
4274 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4275 	iwm_stop_device(sc);
4276 	iwm_init_hw(sc);
4277 	if (in)
4278 		in->in_assoc = 0;
4279 	return 0;
4280 
4281 #if 0
4282 	int error;
4283 
4284 	iwm_mvm_power_mac_disable(sc, in);
4285 
4286 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4287 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4288 		return error;
4289 	}
4290 
4291 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4292 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4293 		return error;
4294 	}
4295 	error = iwm_mvm_rm_sta(sc, in);
4296 	in->in_assoc = 0;
4297 	iwm_mvm_update_quotas(sc, NULL);
4298 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4299 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4300 		return error;
4301 	}
4302 	iwm_mvm_binding_remove_vif(sc, in);
4303 
4304 	iwm_mvm_mac_ctxt_remove(sc, in);
4305 
4306 	return error;
4307 #endif
4308 }
4309 
4310 static struct ieee80211_node *
4311 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4312 {
4313 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4314 	    M_NOWAIT | M_ZERO);
4315 }
4316 
4317 uint8_t
4318 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4319 {
4320 	int i;
4321 	uint8_t rval;
4322 
4323 	for (i = 0; i < rs->rs_nrates; i++) {
4324 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4325 		if (rval == iwm_rates[ridx].rate)
4326 			return rs->rs_rates[i];
4327 	}
4328 
4329 	return 0;
4330 }
4331 
4332 static void
4333 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4334 {
4335 	struct ieee80211_node *ni = &in->in_ni;
4336 	struct iwm_lq_cmd *lq = &in->in_lq;
4337 	int nrates = ni->ni_rates.rs_nrates;
4338 	int i, ridx, tab = 0;
4339 //	int txant = 0;
4340 
4341 	if (nrates > nitems(lq->rs_table)) {
4342 		device_printf(sc->sc_dev,
4343 		    "%s: node supports %d rates, driver handles "
4344 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4345 		return;
4346 	}
4347 	if (nrates == 0) {
4348 		device_printf(sc->sc_dev,
4349 		    "%s: node supports 0 rates, odd!\n", __func__);
4350 		return;
4351 	}
4352 
4353 	/*
4354 	 * XXX .. and most of iwm_node is not initialised explicitly;
4355 	 * it's all just 0x0 passed to the firmware.
4356 	 */
4357 
4358 	/* first figure out which rates we should support */
4359 	/* XXX TODO: this isn't 11n aware /at all/ */
4360 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4361 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4362 	    "%s: nrates=%d\n", __func__, nrates);
4363 
4364 	/*
4365 	 * Loop over nrates and populate in_ridx from the highest
4366 	 * rate to the lowest rate.  Remember, in_ridx[] has
4367 	 * IEEE80211_RATE_MAXSIZE entries!
4368 	 */
4369 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4370 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4371 
4372 		/* Map 802.11 rate to HW rate index. */
4373 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4374 			if (iwm_rates[ridx].rate == rate)
4375 				break;
4376 		if (ridx > IWM_RIDX_MAX) {
4377 			device_printf(sc->sc_dev,
4378 			    "%s: WARNING: device rate for %d not found!\n",
4379 			    __func__, rate);
4380 		} else {
4381 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4382 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4383 			    __func__,
4384 			    i,
4385 			    rate,
4386 			    ridx);
4387 			in->in_ridx[i] = ridx;
4388 		}
4389 	}
4390 
4391 	/* then construct a lq_cmd based on those */
4392 	memset(lq, 0, sizeof(*lq));
4393 	lq->sta_id = IWM_STATION_ID;
4394 
4395 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4396 	if (ni->ni_flags & IEEE80211_NODE_HT)
4397 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4398 
4399 	/*
4400 	 * are these used? (we don't do SISO or MIMO)
4401 	 * need to set them to non-zero, though, or we get an error.
4402 	 */
4403 	lq->single_stream_ant_msk = 1;
4404 	lq->dual_stream_ant_msk = 1;
4405 
4406 	/*
4407 	 * Build the actual rate selection table.
4408 	 * The lowest bits are the rates.  Additionally,
4409 	 * CCK needs bit 9 to be set.  The rest of the bits
4410 	 * we add to the table select the tx antenna
4411 	 * Note that we add the rates in the highest rate first
4412 	 * (opposite of ni_rates).
4413 	 */
4414 	/*
4415 	 * XXX TODO: this should be looping over the min of nrates
4416 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4417 	 */
4418 	for (i = 0; i < nrates; i++) {
4419 		int nextant;
4420 
4421 #if 0
4422 		if (txant == 0)
4423 			txant = iwm_mvm_get_valid_tx_ant(sc);
4424 		nextant = 1<<(ffs(txant)-1);
4425 		txant &= ~nextant;
4426 #else
4427 		nextant = iwm_mvm_get_valid_tx_ant(sc);
4428 #endif
4429 		/*
4430 		 * Map the rate id into a rate index into
4431 		 * our hardware table containing the
4432 		 * configuration to use for this rate.
4433 		 */
4434 		ridx = in->in_ridx[i];
4435 		tab = iwm_rates[ridx].plcp;
4436 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4437 		if (IWM_RIDX_IS_CCK(ridx))
4438 			tab |= IWM_RATE_MCS_CCK_MSK;
4439 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4440 		    "station rate i=%d, rate=%d, hw=%x\n",
4441 		    i, iwm_rates[ridx].rate, tab);
4442 		lq->rs_table[i] = htole32(tab);
4443 	}
4444 	/* then fill the rest with the lowest possible rate */
4445 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4446 		KASSERT(tab != 0, ("invalid tab"));
4447 		lq->rs_table[i] = htole32(tab);
4448 	}
4449 }
4450 
4451 static int
4452 iwm_media_change(struct ifnet *ifp)
4453 {
4454 	struct ieee80211vap *vap = ifp->if_softc;
4455 	struct ieee80211com *ic = vap->iv_ic;
4456 	struct iwm_softc *sc = ic->ic_softc;
4457 	int error;
4458 
4459 	error = ieee80211_media_change(ifp);
4460 	if (error != ENETRESET)
4461 		return error;
4462 
4463 	IWM_LOCK(sc);
4464 	if (ic->ic_nrunning > 0) {
4465 		iwm_stop(sc);
4466 		iwm_init(sc);
4467 	}
4468 	IWM_UNLOCK(sc);
4469 	return error;
4470 }
4471 
4472 
4473 static int
4474 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4475 {
4476 	struct iwm_vap *ivp = IWM_VAP(vap);
4477 	struct ieee80211com *ic = vap->iv_ic;
4478 	struct iwm_softc *sc = ic->ic_softc;
4479 	struct iwm_node *in;
4480 	int error;
4481 
4482 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4483 	    "switching state %s -> %s\n",
4484 	    ieee80211_state_name[vap->iv_state],
4485 	    ieee80211_state_name[nstate]);
4486 	IEEE80211_UNLOCK(ic);
4487 	IWM_LOCK(sc);
4488 
4489 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4490 		iwm_led_blink_stop(sc);
4491 
4492 	/* disable beacon filtering if we're hopping out of RUN */
4493 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4494 		iwm_mvm_disable_beacon_filter(sc);
4495 
4496 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4497 			in->in_assoc = 0;
4498 
4499 		if (nstate == IEEE80211_S_INIT) {
4500 			IWM_UNLOCK(sc);
4501 			IEEE80211_LOCK(ic);
4502 			error = ivp->iv_newstate(vap, nstate, arg);
4503 			IEEE80211_UNLOCK(ic);
4504 			IWM_LOCK(sc);
4505 			iwm_release(sc, NULL);
4506 			IWM_UNLOCK(sc);
4507 			IEEE80211_LOCK(ic);
4508 			return error;
4509 		}
4510 
4511 		/*
4512 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4513 		 * above then the card will be completely reinitialized,
4514 		 * so the driver must do everything necessary to bring the card
4515 		 * from INIT to SCAN.
4516 		 *
4517 		 * Additionally, upon receiving deauth frame from AP,
4518 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4519 		 * state. This will also fail with this driver, so bring the FSM
4520 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4521 		 *
4522 		 * XXX TODO: fix this for FreeBSD!
4523 		 */
4524 		if (nstate == IEEE80211_S_SCAN ||
4525 		    nstate == IEEE80211_S_AUTH ||
4526 		    nstate == IEEE80211_S_ASSOC) {
4527 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4528 			    "Force transition to INIT; MGT=%d\n", arg);
4529 			IWM_UNLOCK(sc);
4530 			IEEE80211_LOCK(ic);
4531 			/* Always pass arg as -1 since we can't Tx right now. */
4532 			/*
4533 			 * XXX arg is just ignored anyway when transitioning
4534 			 *     to IEEE80211_S_INIT.
4535 			 */
4536 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4537 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4538 			    "Going INIT->SCAN\n");
4539 			nstate = IEEE80211_S_SCAN;
4540 			IEEE80211_UNLOCK(ic);
4541 			IWM_LOCK(sc);
4542 		}
4543 	}
4544 
4545 	switch (nstate) {
4546 	case IEEE80211_S_INIT:
4547 		break;
4548 
4549 	case IEEE80211_S_AUTH:
4550 		if ((error = iwm_auth(vap, sc)) != 0) {
4551 			device_printf(sc->sc_dev,
4552 			    "%s: could not move to auth state: %d\n",
4553 			    __func__, error);
4554 			break;
4555 		}
4556 		break;
4557 
4558 	case IEEE80211_S_ASSOC:
4559 		/*
4560 		 * EBS may be disabled due to previous failures reported by FW.
4561 		 * Reset EBS status here assuming environment has been changed.
4562 		 */
4563                 sc->last_ebs_successful = TRUE;
4564 		if ((error = iwm_assoc(vap, sc)) != 0) {
4565 			device_printf(sc->sc_dev,
4566 			    "%s: failed to associate: %d\n", __func__,
4567 			    error);
4568 			break;
4569 		}
4570 		break;
4571 
4572 	case IEEE80211_S_RUN:
4573 	{
4574 		struct iwm_host_cmd cmd = {
4575 			.id = IWM_LQ_CMD,
4576 			.len = { sizeof(in->in_lq), },
4577 			.flags = IWM_CMD_SYNC,
4578 		};
4579 
4580 		/* Update the association state, now we have it all */
4581 		/* (eg associd comes in at this point */
4582 		error = iwm_assoc(vap, sc);
4583 		if (error != 0) {
4584 			device_printf(sc->sc_dev,
4585 			    "%s: failed to update association state: %d\n",
4586 			    __func__,
4587 			    error);
4588 			break;
4589 		}
4590 
4591 		in = IWM_NODE(vap->iv_bss);
4592 		iwm_mvm_enable_beacon_filter(sc, in);
4593 		iwm_mvm_power_update_mac(sc);
4594 		iwm_mvm_update_quotas(sc, ivp);
4595 		iwm_setrates(sc, in);
4596 
4597 		cmd.data[0] = &in->in_lq;
4598 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4599 			device_printf(sc->sc_dev,
4600 			    "%s: IWM_LQ_CMD failed\n", __func__);
4601 		}
4602 
4603 		iwm_mvm_led_enable(sc);
4604 		break;
4605 	}
4606 
4607 	default:
4608 		break;
4609 	}
4610 	IWM_UNLOCK(sc);
4611 	IEEE80211_LOCK(ic);
4612 
4613 	return (ivp->iv_newstate(vap, nstate, arg));
4614 }
4615 
4616 void
4617 iwm_endscan_cb(void *arg, int pending)
4618 {
4619 	struct iwm_softc *sc = arg;
4620 	struct ieee80211com *ic = &sc->sc_ic;
4621 
4622 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4623 	    "%s: scan ended\n",
4624 	    __func__);
4625 
4626 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4627 }
4628 
4629 /*
4630  * Aging and idle timeouts for the different possible scenarios
4631  * in default configuration
4632  */
4633 static const uint32_t
4634 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4635 	{
4636 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4637 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4638 	},
4639 	{
4640 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4641 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4642 	},
4643 	{
4644 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4645 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4646 	},
4647 	{
4648 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4649 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4650 	},
4651 	{
4652 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4653 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4654 	},
4655 };
4656 
4657 /*
4658  * Aging and idle timeouts for the different possible scenarios
4659  * in single BSS MAC configuration.
4660  */
4661 static const uint32_t
4662 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4663 	{
4664 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4665 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4666 	},
4667 	{
4668 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4669 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4670 	},
4671 	{
4672 		htole32(IWM_SF_MCAST_AGING_TIMER),
4673 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4674 	},
4675 	{
4676 		htole32(IWM_SF_BA_AGING_TIMER),
4677 		htole32(IWM_SF_BA_IDLE_TIMER)
4678 	},
4679 	{
4680 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4681 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4682 	},
4683 };
4684 
4685 static void
4686 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4687     struct ieee80211_node *ni)
4688 {
4689 	int i, j, watermark;
4690 
4691 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4692 
4693 	/*
4694 	 * If we are in association flow - check antenna configuration
4695 	 * capabilities of the AP station, and choose the watermark accordingly.
4696 	 */
4697 	if (ni) {
4698 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4699 #ifdef notyet
4700 			if (ni->ni_rxmcs[2] != 0)
4701 				watermark = IWM_SF_W_MARK_MIMO3;
4702 			else if (ni->ni_rxmcs[1] != 0)
4703 				watermark = IWM_SF_W_MARK_MIMO2;
4704 			else
4705 #endif
4706 				watermark = IWM_SF_W_MARK_SISO;
4707 		} else {
4708 			watermark = IWM_SF_W_MARK_LEGACY;
4709 		}
4710 	/* default watermark value for unassociated mode. */
4711 	} else {
4712 		watermark = IWM_SF_W_MARK_MIMO2;
4713 	}
4714 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4715 
4716 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4717 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4718 			sf_cmd->long_delay_timeouts[i][j] =
4719 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4720 		}
4721 	}
4722 
4723 	if (ni) {
4724 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4725 		       sizeof(iwm_sf_full_timeout));
4726 	} else {
4727 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4728 		       sizeof(iwm_sf_full_timeout_def));
4729 	}
4730 }
4731 
4732 static int
4733 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4734 {
4735 	struct ieee80211com *ic = &sc->sc_ic;
4736 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4737 	struct iwm_sf_cfg_cmd sf_cmd = {
4738 		.state = htole32(IWM_SF_FULL_ON),
4739 	};
4740 	int ret = 0;
4741 
4742 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4743 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4744 
4745 	switch (new_state) {
4746 	case IWM_SF_UNINIT:
4747 	case IWM_SF_INIT_OFF:
4748 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4749 		break;
4750 	case IWM_SF_FULL_ON:
4751 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4752 		break;
4753 	default:
4754 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4755 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4756 			  new_state);
4757 		return EINVAL;
4758 	}
4759 
4760 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4761 				   sizeof(sf_cmd), &sf_cmd);
4762 	return ret;
4763 }
4764 
4765 static int
4766 iwm_send_bt_init_conf(struct iwm_softc *sc)
4767 {
4768 	struct iwm_bt_coex_cmd bt_cmd;
4769 
4770 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4771 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4772 
4773 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4774 	    &bt_cmd);
4775 }
4776 
4777 static int
4778 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4779 {
4780 	struct iwm_mcc_update_cmd mcc_cmd;
4781 	struct iwm_host_cmd hcmd = {
4782 		.id = IWM_MCC_UPDATE_CMD,
4783 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4784 		.data = { &mcc_cmd },
4785 	};
4786 	int ret;
4787 #ifdef IWM_DEBUG
4788 	struct iwm_rx_packet *pkt;
4789 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4790 	struct iwm_mcc_update_resp *mcc_resp;
4791 	int n_channels;
4792 	uint16_t mcc;
4793 #endif
4794 	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4795 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4796 
4797 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4798 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4799 	if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4800 	    fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4801 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4802 	else
4803 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4804 
4805 	if (resp_v2)
4806 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4807 	else
4808 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4809 
4810 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4811 	    "send MCC update to FW with '%c%c' src = %d\n",
4812 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4813 
4814 	ret = iwm_send_cmd(sc, &hcmd);
4815 	if (ret)
4816 		return ret;
4817 
4818 #ifdef IWM_DEBUG
4819 	pkt = hcmd.resp_pkt;
4820 
4821 	/* Extract MCC response */
4822 	if (resp_v2) {
4823 		mcc_resp = (void *)pkt->data;
4824 		mcc = mcc_resp->mcc;
4825 		n_channels =  le32toh(mcc_resp->n_channels);
4826 	} else {
4827 		mcc_resp_v1 = (void *)pkt->data;
4828 		mcc = mcc_resp_v1->mcc;
4829 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4830 	}
4831 
4832 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4833 	if (mcc == 0)
4834 		mcc = 0x3030;  /* "00" - world */
4835 
4836 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4837 	    "regulatory domain '%c%c' (%d channels available)\n",
4838 	    mcc >> 8, mcc & 0xff, n_channels);
4839 #endif
4840 	iwm_free_resp(sc, &hcmd);
4841 
4842 	return 0;
4843 }
4844 
4845 static void
4846 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4847 {
4848 	struct iwm_host_cmd cmd = {
4849 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4850 		.len = { sizeof(uint32_t), },
4851 		.data = { &backoff, },
4852 	};
4853 
4854 	if (iwm_send_cmd(sc, &cmd) != 0) {
4855 		device_printf(sc->sc_dev,
4856 		    "failed to change thermal tx backoff\n");
4857 	}
4858 }
4859 
4860 static int
4861 iwm_init_hw(struct iwm_softc *sc)
4862 {
4863 	struct ieee80211com *ic = &sc->sc_ic;
4864 	int error, i, ac;
4865 
4866 	if ((error = iwm_start_hw(sc)) != 0) {
4867 		printf("iwm_start_hw: failed %d\n", error);
4868 		return error;
4869 	}
4870 
4871 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4872 		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4873 		return error;
4874 	}
4875 
4876 	/*
4877 	 * should stop and start HW since that INIT
4878 	 * image just loaded
4879 	 */
4880 	iwm_stop_device(sc);
4881 	sc->sc_ps_disabled = FALSE;
4882 	if ((error = iwm_start_hw(sc)) != 0) {
4883 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4884 		return error;
4885 	}
4886 
4887 	/* omstart, this time with the regular firmware */
4888 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4889 	if (error) {
4890 		device_printf(sc->sc_dev, "could not load firmware\n");
4891 		goto error;
4892 	}
4893 
4894 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4895 		device_printf(sc->sc_dev, "bt init conf failed\n");
4896 		goto error;
4897 	}
4898 
4899 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4900 	if (error != 0) {
4901 		device_printf(sc->sc_dev, "antenna config failed\n");
4902 		goto error;
4903 	}
4904 
4905 	/* Send phy db control command and then phy db calibration */
4906 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4907 		goto error;
4908 
4909 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4910 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4911 		goto error;
4912 	}
4913 
4914 	/* Add auxiliary station for scanning */
4915 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4916 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4917 		goto error;
4918 	}
4919 
4920 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4921 		/*
4922 		 * The channel used here isn't relevant as it's
4923 		 * going to be overwritten in the other flows.
4924 		 * For now use the first channel we have.
4925 		 */
4926 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4927 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4928 			goto error;
4929 	}
4930 
4931 	/* Initialize tx backoffs to the minimum. */
4932 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4933 		iwm_mvm_tt_tx_backoff(sc, 0);
4934 
4935 	error = iwm_mvm_power_update_device(sc);
4936 	if (error)
4937 		goto error;
4938 
4939 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4940 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4941 			goto error;
4942 	}
4943 
4944 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4945 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4946 			goto error;
4947 	}
4948 
4949 	/* Enable Tx queues. */
4950 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4951 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4952 		    iwm_mvm_ac_to_tx_fifo[ac]);
4953 		if (error)
4954 			goto error;
4955 	}
4956 
4957 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4958 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4959 		goto error;
4960 	}
4961 
4962 	return 0;
4963 
4964  error:
4965 	iwm_stop_device(sc);
4966 	return error;
4967 }
4968 
4969 /* Allow multicast from our BSSID. */
4970 static int
4971 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4972 {
4973 	struct ieee80211_node *ni = vap->iv_bss;
4974 	struct iwm_mcast_filter_cmd *cmd;
4975 	size_t size;
4976 	int error;
4977 
4978 	size = roundup(sizeof(*cmd), 4);
4979 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4980 	if (cmd == NULL)
4981 		return ENOMEM;
4982 	cmd->filter_own = 1;
4983 	cmd->port_id = 0;
4984 	cmd->count = 0;
4985 	cmd->pass_all = 1;
4986 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4987 
4988 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4989 	    IWM_CMD_SYNC, size, cmd);
4990 	free(cmd, M_DEVBUF);
4991 
4992 	return (error);
4993 }
4994 
4995 /*
4996  * ifnet interfaces
4997  */
4998 
4999 static void
5000 iwm_init(struct iwm_softc *sc)
5001 {
5002 	int error;
5003 
5004 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5005 		return;
5006 	}
5007 	sc->sc_generation++;
5008 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5009 
5010 	if ((error = iwm_init_hw(sc)) != 0) {
5011 		printf("iwm_init_hw failed %d\n", error);
5012 		iwm_stop(sc);
5013 		return;
5014 	}
5015 
5016 	/*
5017 	 * Ok, firmware loaded and we are jogging
5018 	 */
5019 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5020 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5021 }
5022 
5023 static int
5024 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5025 {
5026 	struct iwm_softc *sc;
5027 	int error;
5028 
5029 	sc = ic->ic_softc;
5030 
5031 	IWM_LOCK(sc);
5032 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5033 		IWM_UNLOCK(sc);
5034 		return (ENXIO);
5035 	}
5036 	error = mbufq_enqueue(&sc->sc_snd, m);
5037 	if (error) {
5038 		IWM_UNLOCK(sc);
5039 		return (error);
5040 	}
5041 	iwm_start(sc);
5042 	IWM_UNLOCK(sc);
5043 	return (0);
5044 }
5045 
5046 /*
5047  * Dequeue packets from sendq and call send.
5048  */
5049 static void
5050 iwm_start(struct iwm_softc *sc)
5051 {
5052 	struct ieee80211_node *ni;
5053 	struct mbuf *m;
5054 	int ac = 0;
5055 
5056 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5057 	while (sc->qfullmsk == 0 &&
5058 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5059 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5060 		if (iwm_tx(sc, m, ni, ac) != 0) {
5061 			if_inc_counter(ni->ni_vap->iv_ifp,
5062 			    IFCOUNTER_OERRORS, 1);
5063 			ieee80211_free_node(ni);
5064 			continue;
5065 		}
5066 		sc->sc_tx_timer = 15;
5067 	}
5068 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5069 }
5070 
5071 static void
5072 iwm_stop(struct iwm_softc *sc)
5073 {
5074 
5075 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5076 	sc->sc_flags |= IWM_FLAG_STOPPED;
5077 	sc->sc_generation++;
5078 	iwm_led_blink_stop(sc);
5079 	sc->sc_tx_timer = 0;
5080 	iwm_stop_device(sc);
5081 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5082 }
5083 
5084 static void
5085 iwm_watchdog(void *arg)
5086 {
5087 	struct iwm_softc *sc = arg;
5088 	struct ieee80211com *ic = &sc->sc_ic;
5089 
5090 	if (sc->sc_tx_timer > 0) {
5091 		if (--sc->sc_tx_timer == 0) {
5092 			device_printf(sc->sc_dev, "device timeout\n");
5093 #ifdef IWM_DEBUG
5094 			iwm_nic_error(sc);
5095 #endif
5096 			ieee80211_restart_all(ic);
5097 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5098 			return;
5099 		}
5100 	}
5101 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5102 }
5103 
5104 static void
5105 iwm_parent(struct ieee80211com *ic)
5106 {
5107 	struct iwm_softc *sc = ic->ic_softc;
5108 	int startall = 0;
5109 
5110 	IWM_LOCK(sc);
5111 	if (ic->ic_nrunning > 0) {
5112 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5113 			iwm_init(sc);
5114 			startall = 1;
5115 		}
5116 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5117 		iwm_stop(sc);
5118 	IWM_UNLOCK(sc);
5119 	if (startall)
5120 		ieee80211_start_all(ic);
5121 }
5122 
5123 /*
5124  * The interrupt side of things
5125  */
5126 
5127 /*
5128  * error dumping routines are from iwlwifi/mvm/utils.c
5129  */
5130 
5131 /*
5132  * Note: This structure is read from the device with IO accesses,
5133  * and the reading already does the endian conversion. As it is
5134  * read with uint32_t-sized accesses, any members with a different size
5135  * need to be ordered correctly though!
5136  */
5137 struct iwm_error_event_table {
5138 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5139 	uint32_t error_id;		/* type of error */
5140 	uint32_t trm_hw_status0;	/* TRM HW status */
5141 	uint32_t trm_hw_status1;	/* TRM HW status */
5142 	uint32_t blink2;		/* branch link */
5143 	uint32_t ilink1;		/* interrupt link */
5144 	uint32_t ilink2;		/* interrupt link */
5145 	uint32_t data1;		/* error-specific data */
5146 	uint32_t data2;		/* error-specific data */
5147 	uint32_t data3;		/* error-specific data */
5148 	uint32_t bcon_time;		/* beacon timer */
5149 	uint32_t tsf_low;		/* network timestamp function timer */
5150 	uint32_t tsf_hi;		/* network timestamp function timer */
5151 	uint32_t gp1;		/* GP1 timer register */
5152 	uint32_t gp2;		/* GP2 timer register */
5153 	uint32_t fw_rev_type;	/* firmware revision type */
5154 	uint32_t major;		/* uCode version major */
5155 	uint32_t minor;		/* uCode version minor */
5156 	uint32_t hw_ver;		/* HW Silicon version */
5157 	uint32_t brd_ver;		/* HW board version */
5158 	uint32_t log_pc;		/* log program counter */
5159 	uint32_t frame_ptr;		/* frame pointer */
5160 	uint32_t stack_ptr;		/* stack pointer */
5161 	uint32_t hcmd;		/* last host command header */
5162 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5163 				 * rxtx_flag */
5164 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5165 				 * host_flag */
5166 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5167 				 * enc_flag */
5168 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5169 				 * time_flag */
5170 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5171 				 * wico interrupt */
5172 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5173 	uint32_t wait_event;		/* wait event() caller address */
5174 	uint32_t l2p_control;	/* L2pControlField */
5175 	uint32_t l2p_duration;	/* L2pDurationField */
5176 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5177 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5178 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5179 				 * (LMPM_PMG_SEL) */
5180 	uint32_t u_timestamp;	/* indicate when the date and time of the
5181 				 * compilation */
5182 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5183 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5184 
5185 /*
5186  * UMAC error struct - relevant starting from family 8000 chip.
5187  * Note: This structure is read from the device with IO accesses,
5188  * and the reading already does the endian conversion. As it is
5189  * read with u32-sized accesses, any members with a different size
5190  * need to be ordered correctly though!
5191  */
5192 struct iwm_umac_error_event_table {
5193 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5194 	uint32_t error_id;	/* type of error */
5195 	uint32_t blink1;	/* branch link */
5196 	uint32_t blink2;	/* branch link */
5197 	uint32_t ilink1;	/* interrupt link */
5198 	uint32_t ilink2;	/* interrupt link */
5199 	uint32_t data1;		/* error-specific data */
5200 	uint32_t data2;		/* error-specific data */
5201 	uint32_t data3;		/* error-specific data */
5202 	uint32_t umac_major;
5203 	uint32_t umac_minor;
5204 	uint32_t frame_pointer;	/* core register 27*/
5205 	uint32_t stack_pointer;	/* core register 28 */
5206 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5207 	uint32_t nic_isr_pref;	/* ISR status register */
5208 } __packed;
5209 
5210 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5211 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5212 
5213 #ifdef IWM_DEBUG
5214 struct {
5215 	const char *name;
5216 	uint8_t num;
5217 } advanced_lookup[] = {
5218 	{ "NMI_INTERRUPT_WDG", 0x34 },
5219 	{ "SYSASSERT", 0x35 },
5220 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5221 	{ "BAD_COMMAND", 0x38 },
5222 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5223 	{ "FATAL_ERROR", 0x3D },
5224 	{ "NMI_TRM_HW_ERR", 0x46 },
5225 	{ "NMI_INTERRUPT_TRM", 0x4C },
5226 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5227 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5228 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5229 	{ "NMI_INTERRUPT_HOST", 0x66 },
5230 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5231 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5232 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5233 	{ "ADVANCED_SYSASSERT", 0 },
5234 };
5235 
5236 static const char *
5237 iwm_desc_lookup(uint32_t num)
5238 {
5239 	int i;
5240 
5241 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5242 		if (advanced_lookup[i].num == num)
5243 			return advanced_lookup[i].name;
5244 
5245 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5246 	return advanced_lookup[i].name;
5247 }
5248 
5249 static void
5250 iwm_nic_umac_error(struct iwm_softc *sc)
5251 {
5252 	struct iwm_umac_error_event_table table;
5253 	uint32_t base;
5254 
5255 	base = sc->umac_error_event_table;
5256 
5257 	if (base < 0x800000) {
5258 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5259 		    base);
5260 		return;
5261 	}
5262 
5263 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5264 		device_printf(sc->sc_dev, "reading errlog failed\n");
5265 		return;
5266 	}
5267 
5268 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5269 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5270 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5271 		    sc->sc_flags, table.valid);
5272 	}
5273 
5274 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5275 		iwm_desc_lookup(table.error_id));
5276 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5277 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5278 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5279 	    table.ilink1);
5280 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5281 	    table.ilink2);
5282 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5283 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5284 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5285 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5286 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5287 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5288 	    table.frame_pointer);
5289 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5290 	    table.stack_pointer);
5291 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5292 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5293 	    table.nic_isr_pref);
5294 }
5295 
5296 /*
5297  * Support for dumping the error log seemed like a good idea ...
5298  * but it's mostly hex junk and the only sensible thing is the
5299  * hw/ucode revision (which we know anyway).  Since it's here,
5300  * I'll just leave it in, just in case e.g. the Intel guys want to
5301  * help us decipher some "ADVANCED_SYSASSERT" later.
5302  */
5303 static void
5304 iwm_nic_error(struct iwm_softc *sc)
5305 {
5306 	struct iwm_error_event_table table;
5307 	uint32_t base;
5308 
5309 	device_printf(sc->sc_dev, "dumping device error log\n");
5310 	base = sc->error_event_table;
5311 	if (base < 0x800000) {
5312 		device_printf(sc->sc_dev,
5313 		    "Invalid error log pointer 0x%08x\n", base);
5314 		return;
5315 	}
5316 
5317 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5318 		device_printf(sc->sc_dev, "reading errlog failed\n");
5319 		return;
5320 	}
5321 
5322 	if (!table.valid) {
5323 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5324 		return;
5325 	}
5326 
5327 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5328 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5329 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5330 		    sc->sc_flags, table.valid);
5331 	}
5332 
5333 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5334 	    iwm_desc_lookup(table.error_id));
5335 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5336 	    table.trm_hw_status0);
5337 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5338 	    table.trm_hw_status1);
5339 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5340 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5341 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5342 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5343 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5344 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5345 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5346 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5347 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5348 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5349 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5350 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5351 	    table.fw_rev_type);
5352 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5353 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5354 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5355 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5356 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5357 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5358 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5359 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5360 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5361 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5362 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5363 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5364 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5365 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5366 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5367 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5368 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5369 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5370 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5371 
5372 	if (sc->umac_error_event_table)
5373 		iwm_nic_umac_error(sc);
5374 }
5375 #endif
5376 
5377 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5378 
5379 /*
5380  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5381  * Basic structure from if_iwn
5382  */
5383 static void
5384 iwm_notif_intr(struct iwm_softc *sc)
5385 {
5386 	struct ieee80211com *ic = &sc->sc_ic;
5387 	uint16_t hw;
5388 
5389 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5390 	    BUS_DMASYNC_POSTREAD);
5391 
5392 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5393 
5394 	/*
5395 	 * Process responses
5396 	 */
5397 	while (sc->rxq.cur != hw) {
5398 		struct iwm_rx_ring *ring = &sc->rxq;
5399 		struct iwm_rx_data *data = &ring->data[ring->cur];
5400 		struct iwm_rx_packet *pkt;
5401 		struct iwm_cmd_response *cresp;
5402 		int qid, idx, code;
5403 
5404 		bus_dmamap_sync(ring->data_dmat, data->map,
5405 		    BUS_DMASYNC_POSTREAD);
5406 		pkt = mtod(data->m, struct iwm_rx_packet *);
5407 
5408 		qid = pkt->hdr.qid & ~0x80;
5409 		idx = pkt->hdr.idx;
5410 
5411 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5412 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5413 		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5414 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5415 
5416 		/*
5417 		 * randomly get these from the firmware, no idea why.
5418 		 * they at least seem harmless, so just ignore them for now
5419 		 */
5420 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5421 		    || pkt->len_n_flags == htole32(0x55550000))) {
5422 			ADVANCE_RXQ(sc);
5423 			continue;
5424 		}
5425 
5426 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5427 
5428 		switch (code) {
5429 		case IWM_REPLY_RX_PHY_CMD:
5430 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5431 			break;
5432 
5433 		case IWM_REPLY_RX_MPDU_CMD:
5434 			iwm_mvm_rx_rx_mpdu(sc, data->m);
5435 			break;
5436 
5437 		case IWM_TX_CMD:
5438 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5439 			break;
5440 
5441 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5442 			struct iwm_missed_beacons_notif *resp;
5443 			int missed;
5444 
5445 			/* XXX look at mac_id to determine interface ID */
5446 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5447 
5448 			resp = (void *)pkt->data;
5449 			missed = le32toh(resp->consec_missed_beacons);
5450 
5451 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5452 			    "%s: MISSED_BEACON: mac_id=%d, "
5453 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5454 			    "num_rx=%d\n",
5455 			    __func__,
5456 			    le32toh(resp->mac_id),
5457 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5458 			    le32toh(resp->consec_missed_beacons),
5459 			    le32toh(resp->num_expected_beacons),
5460 			    le32toh(resp->num_recvd_beacons));
5461 
5462 			/* Be paranoid */
5463 			if (vap == NULL)
5464 				break;
5465 
5466 			/* XXX no net80211 locking? */
5467 			if (vap->iv_state == IEEE80211_S_RUN &&
5468 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5469 				if (missed > vap->iv_bmissthreshold) {
5470 					/* XXX bad locking; turn into task */
5471 					IWM_UNLOCK(sc);
5472 					ieee80211_beacon_miss(ic);
5473 					IWM_LOCK(sc);
5474 				}
5475 			}
5476 
5477 			break;
5478 		}
5479 
5480 		case IWM_MFUART_LOAD_NOTIFICATION:
5481 			break;
5482 
5483 		case IWM_MVM_ALIVE:
5484 			break;
5485 
5486 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5487 			break;
5488 
5489 		case IWM_STATISTICS_NOTIFICATION: {
5490 			struct iwm_notif_statistics *stats;
5491 			stats = (void *)pkt->data;
5492 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5493 			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5494 			break;
5495 		}
5496 
5497 		case IWM_NVM_ACCESS_CMD:
5498 		case IWM_MCC_UPDATE_CMD:
5499 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5500 				memcpy(sc->sc_cmd_resp,
5501 				    pkt, sizeof(sc->sc_cmd_resp));
5502 			}
5503 			break;
5504 
5505 		case IWM_MCC_CHUB_UPDATE_CMD: {
5506 			struct iwm_mcc_chub_notif *notif;
5507 			notif = (void *)pkt->data;
5508 
5509 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5510 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5511 			sc->sc_fw_mcc[2] = '\0';
5512 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5513 			    "fw source %d sent CC '%s'\n",
5514 			    notif->source_id, sc->sc_fw_mcc);
5515 			break;
5516 		}
5517 
5518 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5519 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5520 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5521 			struct iwm_dts_measurement_notif_v1 *notif;
5522 
5523 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5524 				device_printf(sc->sc_dev,
5525 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5526 				break;
5527 			}
5528 			notif = (void *)pkt->data;
5529 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5530 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5531 			    notif->temp);
5532 			break;
5533 		}
5534 
5535 		case IWM_PHY_CONFIGURATION_CMD:
5536 		case IWM_TX_ANT_CONFIGURATION_CMD:
5537 		case IWM_ADD_STA:
5538 		case IWM_MAC_CONTEXT_CMD:
5539 		case IWM_REPLY_SF_CFG_CMD:
5540 		case IWM_POWER_TABLE_CMD:
5541 		case IWM_PHY_CONTEXT_CMD:
5542 		case IWM_BINDING_CONTEXT_CMD:
5543 		case IWM_TIME_EVENT_CMD:
5544 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5545 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5546 		case IWM_SCAN_ABORT_UMAC:
5547 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5548 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5549 		case IWM_REPLY_BEACON_FILTERING_CMD:
5550 		case IWM_MAC_PM_POWER_TABLE:
5551 		case IWM_TIME_QUOTA_CMD:
5552 		case IWM_REMOVE_STA:
5553 		case IWM_TXPATH_FLUSH:
5554 		case IWM_LQ_CMD:
5555 		case IWM_FW_PAGING_BLOCK_CMD:
5556 		case IWM_BT_CONFIG:
5557 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5558 			cresp = (void *)pkt->data;
5559 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5560 				memcpy(sc->sc_cmd_resp,
5561 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5562 			}
5563 			break;
5564 
5565 		/* ignore */
5566 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5567 			break;
5568 
5569 		case IWM_INIT_COMPLETE_NOTIF:
5570 			break;
5571 
5572 		case IWM_SCAN_OFFLOAD_COMPLETE:
5573 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5574 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5575 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5576 				ieee80211_runtask(ic, &sc->sc_es_task);
5577 			}
5578 			break;
5579 
5580 		case IWM_SCAN_ITERATION_COMPLETE: {
5581 			struct iwm_lmac_scan_complete_notif *notif;
5582 			notif = (void *)pkt->data;
5583 			break;
5584 		}
5585 
5586 		case IWM_SCAN_COMPLETE_UMAC:
5587 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5588 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5589 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5590 				ieee80211_runtask(ic, &sc->sc_es_task);
5591 			}
5592 			break;
5593 
5594 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5595 			struct iwm_umac_scan_iter_complete_notif *notif;
5596 			notif = (void *)pkt->data;
5597 
5598 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5599 			    "complete, status=0x%x, %d channels scanned\n",
5600 			    notif->status, notif->scanned_channels);
5601 			break;
5602 		}
5603 
5604 		case IWM_REPLY_ERROR: {
5605 			struct iwm_error_resp *resp;
5606 			resp = (void *)pkt->data;
5607 
5608 			device_printf(sc->sc_dev,
5609 			    "firmware error 0x%x, cmd 0x%x\n",
5610 			    le32toh(resp->error_type),
5611 			    resp->cmd_id);
5612 			break;
5613 		}
5614 
5615 		case IWM_TIME_EVENT_NOTIFICATION: {
5616 			struct iwm_time_event_notif *notif;
5617 			notif = (void *)pkt->data;
5618 
5619 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5620 			    "TE notif status = 0x%x action = 0x%x\n",
5621 			    notif->status, notif->action);
5622 			break;
5623 		}
5624 
5625 		case IWM_MCAST_FILTER_CMD:
5626 			break;
5627 
5628 		case IWM_SCD_QUEUE_CFG: {
5629 			struct iwm_scd_txq_cfg_rsp *rsp;
5630 			rsp = (void *)pkt->data;
5631 
5632 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5633 			    "queue cfg token=0x%x sta_id=%d "
5634 			    "tid=%d scd_queue=%d\n",
5635 			    rsp->token, rsp->sta_id, rsp->tid,
5636 			    rsp->scd_queue);
5637 			break;
5638 		}
5639 
5640 		default:
5641 			device_printf(sc->sc_dev,
5642 			    "frame %d/%d %x UNHANDLED (this should "
5643 			    "not happen)\n", qid, idx,
5644 			    pkt->len_n_flags);
5645 			break;
5646 		}
5647 
5648 		/*
5649 		 * Why test bit 0x80?  The Linux driver:
5650 		 *
5651 		 * There is one exception:  uCode sets bit 15 when it
5652 		 * originates the response/notification, i.e. when the
5653 		 * response/notification is not a direct response to a
5654 		 * command sent by the driver.  For example, uCode issues
5655 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5656 		 * it is not a direct response to any driver command.
5657 		 *
5658 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5659 		 * uses a slightly different format for pkt->hdr, and "qid"
5660 		 * is actually the upper byte of a two-byte field.
5661 		 */
5662 		if (!(pkt->hdr.qid & (1 << 7))) {
5663 			iwm_cmd_done(sc, pkt);
5664 		}
5665 
5666 		ADVANCE_RXQ(sc);
5667 	}
5668 
5669 	/*
5670 	 * Tell the firmware what we have processed.
5671 	 * Seems like the hardware gets upset unless we align
5672 	 * the write by 8??
5673 	 */
5674 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5675 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5676 }
5677 
5678 static void
5679 iwm_intr(void *arg)
5680 {
5681 	struct iwm_softc *sc = arg;
5682 	int handled = 0;
5683 	int r1, r2, rv = 0;
5684 	int isperiodic = 0;
5685 
5686 	IWM_LOCK(sc);
5687 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5688 
5689 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5690 		uint32_t *ict = sc->ict_dma.vaddr;
5691 		int tmp;
5692 
5693 		tmp = htole32(ict[sc->ict_cur]);
5694 		if (!tmp)
5695 			goto out_ena;
5696 
5697 		/*
5698 		 * ok, there was something.  keep plowing until we have all.
5699 		 */
5700 		r1 = r2 = 0;
5701 		while (tmp) {
5702 			r1 |= tmp;
5703 			ict[sc->ict_cur] = 0;
5704 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5705 			tmp = htole32(ict[sc->ict_cur]);
5706 		}
5707 
5708 		/* this is where the fun begins.  don't ask */
5709 		if (r1 == 0xffffffff)
5710 			r1 = 0;
5711 
5712 		/* i am not expected to understand this */
5713 		if (r1 & 0xc0000)
5714 			r1 |= 0x8000;
5715 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5716 	} else {
5717 		r1 = IWM_READ(sc, IWM_CSR_INT);
5718 		/* "hardware gone" (where, fishing?) */
5719 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5720 			goto out;
5721 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5722 	}
5723 	if (r1 == 0 && r2 == 0) {
5724 		goto out_ena;
5725 	}
5726 
5727 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5728 
5729 	/* Safely ignore these bits for debug checks below */
5730 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5731 
5732 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5733 		int i;
5734 		struct ieee80211com *ic = &sc->sc_ic;
5735 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5736 
5737 #ifdef IWM_DEBUG
5738 		iwm_nic_error(sc);
5739 #endif
5740 		/* Dump driver status (TX and RX rings) while we're here. */
5741 		device_printf(sc->sc_dev, "driver status:\n");
5742 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5743 			struct iwm_tx_ring *ring = &sc->txq[i];
5744 			device_printf(sc->sc_dev,
5745 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5746 			    "queued=%-3d\n",
5747 			    i, ring->qid, ring->cur, ring->queued);
5748 		}
5749 		device_printf(sc->sc_dev,
5750 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5751 		device_printf(sc->sc_dev,
5752 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5753 
5754 		/* Don't stop the device; just do a VAP restart */
5755 		IWM_UNLOCK(sc);
5756 
5757 		if (vap == NULL) {
5758 			printf("%s: null vap\n", __func__);
5759 			return;
5760 		}
5761 
5762 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5763 		    "restarting\n", __func__, vap->iv_state);
5764 
5765 		ieee80211_restart_all(ic);
5766 		return;
5767 	}
5768 
5769 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5770 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5771 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5772 		iwm_stop(sc);
5773 		rv = 1;
5774 		goto out;
5775 	}
5776 
5777 	/* firmware chunk loaded */
5778 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5779 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5780 		handled |= IWM_CSR_INT_BIT_FH_TX;
5781 		sc->sc_fw_chunk_done = 1;
5782 		wakeup(&sc->sc_fw);
5783 	}
5784 
5785 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5786 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5787 		if (iwm_check_rfkill(sc)) {
5788 			device_printf(sc->sc_dev,
5789 			    "%s: rfkill switch, disabling interface\n",
5790 			    __func__);
5791 			iwm_stop(sc);
5792 		}
5793 	}
5794 
5795 	/*
5796 	 * The Linux driver uses periodic interrupts to avoid races.
5797 	 * We cargo-cult like it's going out of fashion.
5798 	 */
5799 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5800 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5801 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5802 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5803 			IWM_WRITE_1(sc,
5804 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5805 		isperiodic = 1;
5806 	}
5807 
5808 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5809 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5810 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5811 
5812 		iwm_notif_intr(sc);
5813 
5814 		/* enable periodic interrupt, see above */
5815 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5816 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5817 			    IWM_CSR_INT_PERIODIC_ENA);
5818 	}
5819 
5820 	if (__predict_false(r1 & ~handled))
5821 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5822 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5823 	rv = 1;
5824 
5825  out_ena:
5826 	iwm_restore_interrupts(sc);
5827  out:
5828 	IWM_UNLOCK(sc);
5829 	return;
5830 }
5831 
5832 /*
5833  * Autoconf glue-sniffing
5834  */
5835 #define	PCI_VENDOR_INTEL		0x8086
5836 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5837 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5838 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5839 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5840 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5841 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5842 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5843 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5844 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5845 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5846 
5847 static const struct iwm_devices {
5848 	uint16_t		device;
5849 	const struct iwm_cfg	*cfg;
5850 } iwm_devices[] = {
5851 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5852 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5853 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5854 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5855 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5856 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5857 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5858 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5859 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5860 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5861 };
5862 
5863 static int
5864 iwm_probe(device_t dev)
5865 {
5866 	int i;
5867 
5868 	for (i = 0; i < nitems(iwm_devices); i++) {
5869 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5870 		    pci_get_device(dev) == iwm_devices[i].device) {
5871 			device_set_desc(dev, iwm_devices[i].cfg->name);
5872 			return (BUS_PROBE_DEFAULT);
5873 		}
5874 	}
5875 
5876 	return (ENXIO);
5877 }
5878 
5879 static int
5880 iwm_dev_check(device_t dev)
5881 {
5882 	struct iwm_softc *sc;
5883 	uint16_t devid;
5884 	int i;
5885 
5886 	sc = device_get_softc(dev);
5887 
5888 	devid = pci_get_device(dev);
5889 	for (i = 0; i < nitems(iwm_devices); i++) {
5890 		if (iwm_devices[i].device == devid) {
5891 			sc->cfg = iwm_devices[i].cfg;
5892 			return (0);
5893 		}
5894 	}
5895 	device_printf(dev, "unknown adapter type\n");
5896 	return ENXIO;
5897 }
5898 
5899 /* PCI registers */
5900 #define PCI_CFG_RETRY_TIMEOUT	0x041
5901 
5902 static int
5903 iwm_pci_attach(device_t dev)
5904 {
5905 	struct iwm_softc *sc;
5906 	int count, error, rid;
5907 	uint16_t reg;
5908 
5909 	sc = device_get_softc(dev);
5910 
5911 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5912 	 * PCI Tx retries from interfering with C3 CPU state */
5913 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5914 
5915 	/* Enable bus-mastering and hardware bug workaround. */
5916 	pci_enable_busmaster(dev);
5917 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5918 	/* if !MSI */
5919 	if (reg & PCIM_STATUS_INTxSTATE) {
5920 		reg &= ~PCIM_STATUS_INTxSTATE;
5921 	}
5922 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5923 
5924 	rid = PCIR_BAR(0);
5925 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5926 	    RF_ACTIVE);
5927 	if (sc->sc_mem == NULL) {
5928 		device_printf(sc->sc_dev, "can't map mem space\n");
5929 		return (ENXIO);
5930 	}
5931 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5932 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5933 
5934 	/* Install interrupt handler. */
5935 	count = 1;
5936 	rid = 0;
5937 	if (pci_alloc_msi(dev, &count) == 0)
5938 		rid = 1;
5939 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5940 	    (rid != 0 ? 0 : RF_SHAREABLE));
5941 	if (sc->sc_irq == NULL) {
5942 		device_printf(dev, "can't map interrupt\n");
5943 			return (ENXIO);
5944 	}
5945 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5946 	    NULL, iwm_intr, sc, &sc->sc_ih);
5947 	if (sc->sc_ih == NULL) {
5948 		device_printf(dev, "can't establish interrupt");
5949 			return (ENXIO);
5950 	}
5951 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5952 
5953 	return (0);
5954 }
5955 
5956 static void
5957 iwm_pci_detach(device_t dev)
5958 {
5959 	struct iwm_softc *sc = device_get_softc(dev);
5960 
5961 	if (sc->sc_irq != NULL) {
5962 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5963 		bus_release_resource(dev, SYS_RES_IRQ,
5964 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5965 		pci_release_msi(dev);
5966         }
5967 	if (sc->sc_mem != NULL)
5968 		bus_release_resource(dev, SYS_RES_MEMORY,
5969 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5970 }
5971 
5972 
5973 
5974 static int
5975 iwm_attach(device_t dev)
5976 {
5977 	struct iwm_softc *sc = device_get_softc(dev);
5978 	struct ieee80211com *ic = &sc->sc_ic;
5979 	int error;
5980 	int txq_i, i;
5981 
5982 	sc->sc_dev = dev;
5983 	sc->sc_attached = 1;
5984 	IWM_LOCK_INIT(sc);
5985 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5986 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5987 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5988 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5989 
5990 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5991 	if (sc->sc_notif_wait == NULL) {
5992 		device_printf(dev, "failed to init notification wait struct\n");
5993 		goto fail;
5994 	}
5995 
5996 	/* Init phy db */
5997 	sc->sc_phy_db = iwm_phy_db_init(sc);
5998 	if (!sc->sc_phy_db) {
5999 		device_printf(dev, "Cannot init phy_db\n");
6000 		goto fail;
6001 	}
6002 
6003 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6004 	sc->last_ebs_successful = TRUE;
6005 
6006 	/* PCI attach */
6007 	error = iwm_pci_attach(dev);
6008 	if (error != 0)
6009 		goto fail;
6010 
6011 	sc->sc_wantresp = -1;
6012 
6013 	/* Check device type */
6014 	error = iwm_dev_check(dev);
6015 	if (error != 0)
6016 		goto fail;
6017 
6018 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6019 	/*
6020 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6021 	 * changed, and now the revision step also includes bit 0-1 (no more
6022 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6023 	 * in the old format.
6024 	 */
6025 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6026 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6027 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6028 
6029 	if (iwm_prepare_card_hw(sc) != 0) {
6030 		device_printf(dev, "could not initialize hardware\n");
6031 		goto fail;
6032 	}
6033 
6034 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6035 		int ret;
6036 		uint32_t hw_step;
6037 
6038 		/*
6039 		 * In order to recognize C step the driver should read the
6040 		 * chip version id located at the AUX bus MISC address.
6041 		 */
6042 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6043 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6044 		DELAY(2);
6045 
6046 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6047 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6048 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6049 				   25000);
6050 		if (!ret) {
6051 			device_printf(sc->sc_dev,
6052 			    "Failed to wake up the nic\n");
6053 			goto fail;
6054 		}
6055 
6056 		if (iwm_nic_lock(sc)) {
6057 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6058 			hw_step |= IWM_ENABLE_WFPM;
6059 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6060 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6061 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6062 			if (hw_step == 0x3)
6063 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6064 						(IWM_SILICON_C_STEP << 2);
6065 			iwm_nic_unlock(sc);
6066 		} else {
6067 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6068 			goto fail;
6069 		}
6070 	}
6071 
6072 	/* special-case 7265D, it has the same PCI IDs. */
6073 	if (sc->cfg == &iwm7265_cfg &&
6074 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6075 		sc->cfg = &iwm7265d_cfg;
6076 	}
6077 
6078 	/* Allocate DMA memory for firmware transfers. */
6079 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6080 		device_printf(dev, "could not allocate memory for firmware\n");
6081 		goto fail;
6082 	}
6083 
6084 	/* Allocate "Keep Warm" page. */
6085 	if ((error = iwm_alloc_kw(sc)) != 0) {
6086 		device_printf(dev, "could not allocate keep warm page\n");
6087 		goto fail;
6088 	}
6089 
6090 	/* We use ICT interrupts */
6091 	if ((error = iwm_alloc_ict(sc)) != 0) {
6092 		device_printf(dev, "could not allocate ICT table\n");
6093 		goto fail;
6094 	}
6095 
6096 	/* Allocate TX scheduler "rings". */
6097 	if ((error = iwm_alloc_sched(sc)) != 0) {
6098 		device_printf(dev, "could not allocate TX scheduler rings\n");
6099 		goto fail;
6100 	}
6101 
6102 	/* Allocate TX rings */
6103 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6104 		if ((error = iwm_alloc_tx_ring(sc,
6105 		    &sc->txq[txq_i], txq_i)) != 0) {
6106 			device_printf(dev,
6107 			    "could not allocate TX ring %d\n",
6108 			    txq_i);
6109 			goto fail;
6110 		}
6111 	}
6112 
6113 	/* Allocate RX ring. */
6114 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6115 		device_printf(dev, "could not allocate RX ring\n");
6116 		goto fail;
6117 	}
6118 
6119 	/* Clear pending interrupts. */
6120 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6121 
6122 	ic->ic_softc = sc;
6123 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6124 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6125 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6126 
6127 	/* Set device capabilities. */
6128 	ic->ic_caps =
6129 	    IEEE80211_C_STA |
6130 	    IEEE80211_C_WPA |		/* WPA/RSN */
6131 	    IEEE80211_C_WME |
6132 	    IEEE80211_C_PMGT |
6133 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6134 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6135 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6136 	    ;
6137 	/* Advertise full-offload scanning */
6138 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6139 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6140 		sc->sc_phyctxt[i].id = i;
6141 		sc->sc_phyctxt[i].color = 0;
6142 		sc->sc_phyctxt[i].ref = 0;
6143 		sc->sc_phyctxt[i].channel = NULL;
6144 	}
6145 
6146 	/* Default noise floor */
6147 	sc->sc_noise = -96;
6148 
6149 	/* Max RSSI */
6150 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6151 
6152 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6153 	sc->sc_preinit_hook.ich_arg = sc;
6154 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6155 		device_printf(dev, "config_intrhook_establish failed\n");
6156 		goto fail;
6157 	}
6158 
6159 #ifdef IWM_DEBUG
6160 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6161 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6162 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6163 #endif
6164 
6165 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6166 	    "<-%s\n", __func__);
6167 
6168 	return 0;
6169 
6170 	/* Free allocated memory if something failed during attachment. */
6171 fail:
6172 	iwm_detach_local(sc, 0);
6173 
6174 	return ENXIO;
6175 }
6176 
6177 static int
6178 iwm_is_valid_ether_addr(uint8_t *addr)
6179 {
6180 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6181 
6182 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6183 		return (FALSE);
6184 
6185 	return (TRUE);
6186 }
6187 
6188 static int
6189 iwm_update_edca(struct ieee80211com *ic)
6190 {
6191 	struct iwm_softc *sc = ic->ic_softc;
6192 
6193 	device_printf(sc->sc_dev, "%s: called\n", __func__);
6194 	return (0);
6195 }
6196 
6197 static void
6198 iwm_preinit(void *arg)
6199 {
6200 	struct iwm_softc *sc = arg;
6201 	device_t dev = sc->sc_dev;
6202 	struct ieee80211com *ic = &sc->sc_ic;
6203 	int error;
6204 
6205 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6206 	    "->%s\n", __func__);
6207 
6208 	IWM_LOCK(sc);
6209 	if ((error = iwm_start_hw(sc)) != 0) {
6210 		device_printf(dev, "could not initialize hardware\n");
6211 		IWM_UNLOCK(sc);
6212 		goto fail;
6213 	}
6214 
6215 	error = iwm_run_init_mvm_ucode(sc, 1);
6216 	iwm_stop_device(sc);
6217 	if (error) {
6218 		IWM_UNLOCK(sc);
6219 		goto fail;
6220 	}
6221 	device_printf(dev,
6222 	    "hw rev 0x%x, fw ver %s, address %s\n",
6223 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6224 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6225 
6226 	/* not all hardware can do 5GHz band */
6227 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6228 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6229 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6230 	IWM_UNLOCK(sc);
6231 
6232 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6233 	    ic->ic_channels);
6234 
6235 	/*
6236 	 * At this point we've committed - if we fail to do setup,
6237 	 * we now also have to tear down the net80211 state.
6238 	 */
6239 	ieee80211_ifattach(ic);
6240 	ic->ic_vap_create = iwm_vap_create;
6241 	ic->ic_vap_delete = iwm_vap_delete;
6242 	ic->ic_raw_xmit = iwm_raw_xmit;
6243 	ic->ic_node_alloc = iwm_node_alloc;
6244 	ic->ic_scan_start = iwm_scan_start;
6245 	ic->ic_scan_end = iwm_scan_end;
6246 	ic->ic_update_mcast = iwm_update_mcast;
6247 	ic->ic_getradiocaps = iwm_init_channel_map;
6248 	ic->ic_set_channel = iwm_set_channel;
6249 	ic->ic_scan_curchan = iwm_scan_curchan;
6250 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6251 	ic->ic_wme.wme_update = iwm_update_edca;
6252 	ic->ic_parent = iwm_parent;
6253 	ic->ic_transmit = iwm_transmit;
6254 	iwm_radiotap_attach(sc);
6255 	if (bootverbose)
6256 		ieee80211_announce(ic);
6257 
6258 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6259 	    "<-%s\n", __func__);
6260 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6261 
6262 	return;
6263 fail:
6264 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6265 	iwm_detach_local(sc, 0);
6266 }
6267 
6268 /*
6269  * Attach the interface to 802.11 radiotap.
6270  */
6271 static void
6272 iwm_radiotap_attach(struct iwm_softc *sc)
6273 {
6274         struct ieee80211com *ic = &sc->sc_ic;
6275 
6276 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6277 	    "->%s begin\n", __func__);
6278         ieee80211_radiotap_attach(ic,
6279             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6280                 IWM_TX_RADIOTAP_PRESENT,
6281             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6282                 IWM_RX_RADIOTAP_PRESENT);
6283 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6284 	    "->%s end\n", __func__);
6285 }
6286 
6287 static struct ieee80211vap *
6288 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6289     enum ieee80211_opmode opmode, int flags,
6290     const uint8_t bssid[IEEE80211_ADDR_LEN],
6291     const uint8_t mac[IEEE80211_ADDR_LEN])
6292 {
6293 	struct iwm_vap *ivp;
6294 	struct ieee80211vap *vap;
6295 
6296 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6297 		return NULL;
6298 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6299 	vap = &ivp->iv_vap;
6300 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6301 	vap->iv_bmissthreshold = 10;            /* override default */
6302 	/* Override with driver methods. */
6303 	ivp->iv_newstate = vap->iv_newstate;
6304 	vap->iv_newstate = iwm_newstate;
6305 
6306 	ieee80211_ratectl_init(vap);
6307 	/* Complete setup. */
6308 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6309 	    mac);
6310 	ic->ic_opmode = opmode;
6311 
6312 	return vap;
6313 }
6314 
6315 static void
6316 iwm_vap_delete(struct ieee80211vap *vap)
6317 {
6318 	struct iwm_vap *ivp = IWM_VAP(vap);
6319 
6320 	ieee80211_ratectl_deinit(vap);
6321 	ieee80211_vap_detach(vap);
6322 	free(ivp, M_80211_VAP);
6323 }
6324 
6325 static void
6326 iwm_scan_start(struct ieee80211com *ic)
6327 {
6328 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6329 	struct iwm_softc *sc = ic->ic_softc;
6330 	int error;
6331 
6332 	IWM_LOCK(sc);
6333 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6334 		/* This should not be possible */
6335 		device_printf(sc->sc_dev,
6336 		    "%s: Previous scan not completed yet\n", __func__);
6337 	}
6338 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6339 		error = iwm_mvm_umac_scan(sc);
6340 	else
6341 		error = iwm_mvm_lmac_scan(sc);
6342 	if (error != 0) {
6343 		device_printf(sc->sc_dev, "could not initiate scan\n");
6344 		IWM_UNLOCK(sc);
6345 		ieee80211_cancel_scan(vap);
6346 	} else {
6347 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6348 		iwm_led_blink_start(sc);
6349 		IWM_UNLOCK(sc);
6350 	}
6351 }
6352 
6353 static void
6354 iwm_scan_end(struct ieee80211com *ic)
6355 {
6356 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6357 	struct iwm_softc *sc = ic->ic_softc;
6358 
6359 	IWM_LOCK(sc);
6360 	iwm_led_blink_stop(sc);
6361 	if (vap->iv_state == IEEE80211_S_RUN)
6362 		iwm_mvm_led_enable(sc);
6363 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6364 		/*
6365 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6366 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6367 		 * taskqueue.
6368 		 */
6369 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6370 		iwm_mvm_scan_stop_wait(sc);
6371 	}
6372 	IWM_UNLOCK(sc);
6373 
6374 	/*
6375 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6376 	 * This is to make sure that it won't call ieee80211_scan_done
6377 	 * when we have already started the next scan.
6378 	 */
6379 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6380 }
6381 
6382 static void
6383 iwm_update_mcast(struct ieee80211com *ic)
6384 {
6385 }
6386 
6387 static void
6388 iwm_set_channel(struct ieee80211com *ic)
6389 {
6390 }
6391 
6392 static void
6393 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6394 {
6395 }
6396 
6397 static void
6398 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6399 {
6400 	return;
6401 }
6402 
6403 void
6404 iwm_init_task(void *arg1)
6405 {
6406 	struct iwm_softc *sc = arg1;
6407 
6408 	IWM_LOCK(sc);
6409 	while (sc->sc_flags & IWM_FLAG_BUSY)
6410 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6411 	sc->sc_flags |= IWM_FLAG_BUSY;
6412 	iwm_stop(sc);
6413 	if (sc->sc_ic.ic_nrunning > 0)
6414 		iwm_init(sc);
6415 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6416 	wakeup(&sc->sc_flags);
6417 	IWM_UNLOCK(sc);
6418 }
6419 
6420 static int
6421 iwm_resume(device_t dev)
6422 {
6423 	struct iwm_softc *sc = device_get_softc(dev);
6424 	int do_reinit = 0;
6425 
6426 	/*
6427 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6428 	 * PCI Tx retries from interfering with C3 CPU state.
6429 	 */
6430 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6431 	iwm_init_task(device_get_softc(dev));
6432 
6433 	IWM_LOCK(sc);
6434 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6435 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6436 		do_reinit = 1;
6437 	}
6438 	IWM_UNLOCK(sc);
6439 
6440 	if (do_reinit)
6441 		ieee80211_resume_all(&sc->sc_ic);
6442 
6443 	return 0;
6444 }
6445 
6446 static int
6447 iwm_suspend(device_t dev)
6448 {
6449 	int do_stop = 0;
6450 	struct iwm_softc *sc = device_get_softc(dev);
6451 
6452 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6453 
6454 	ieee80211_suspend_all(&sc->sc_ic);
6455 
6456 	if (do_stop) {
6457 		IWM_LOCK(sc);
6458 		iwm_stop(sc);
6459 		sc->sc_flags |= IWM_FLAG_SCANNING;
6460 		IWM_UNLOCK(sc);
6461 	}
6462 
6463 	return (0);
6464 }
6465 
6466 static int
6467 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6468 {
6469 	struct iwm_fw_info *fw = &sc->sc_fw;
6470 	device_t dev = sc->sc_dev;
6471 	int i;
6472 
6473 	if (!sc->sc_attached)
6474 		return 0;
6475 	sc->sc_attached = 0;
6476 
6477 	if (do_net80211)
6478 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6479 
6480 	callout_drain(&sc->sc_led_blink_to);
6481 	callout_drain(&sc->sc_watchdog_to);
6482 	iwm_stop_device(sc);
6483 	if (do_net80211) {
6484 		ieee80211_ifdetach(&sc->sc_ic);
6485 	}
6486 
6487 	iwm_phy_db_free(sc->sc_phy_db);
6488 	sc->sc_phy_db = NULL;
6489 
6490 	iwm_free_nvm_data(sc->nvm_data);
6491 
6492 	/* Free descriptor rings */
6493 	iwm_free_rx_ring(sc, &sc->rxq);
6494 	for (i = 0; i < nitems(sc->txq); i++)
6495 		iwm_free_tx_ring(sc, &sc->txq[i]);
6496 
6497 	/* Free firmware */
6498 	if (fw->fw_fp != NULL)
6499 		iwm_fw_info_free(fw);
6500 
6501 	/* Free scheduler */
6502 	iwm_dma_contig_free(&sc->sched_dma);
6503 	iwm_dma_contig_free(&sc->ict_dma);
6504 	iwm_dma_contig_free(&sc->kw_dma);
6505 	iwm_dma_contig_free(&sc->fw_dma);
6506 
6507 	iwm_free_fw_paging(sc);
6508 
6509 	/* Finished with the hardware - detach things */
6510 	iwm_pci_detach(dev);
6511 
6512 	if (sc->sc_notif_wait != NULL) {
6513 		iwm_notification_wait_free(sc->sc_notif_wait);
6514 		sc->sc_notif_wait = NULL;
6515 	}
6516 
6517 	mbufq_drain(&sc->sc_snd);
6518 	IWM_LOCK_DESTROY(sc);
6519 
6520 	return (0);
6521 }
6522 
6523 static int
6524 iwm_detach(device_t dev)
6525 {
6526 	struct iwm_softc *sc = device_get_softc(dev);
6527 
6528 	return (iwm_detach_local(sc, 1));
6529 }
6530 
6531 static device_method_t iwm_pci_methods[] = {
6532         /* Device interface */
6533         DEVMETHOD(device_probe,         iwm_probe),
6534         DEVMETHOD(device_attach,        iwm_attach),
6535         DEVMETHOD(device_detach,        iwm_detach),
6536         DEVMETHOD(device_suspend,       iwm_suspend),
6537         DEVMETHOD(device_resume,        iwm_resume),
6538 
6539         DEVMETHOD_END
6540 };
6541 
6542 static driver_t iwm_pci_driver = {
6543         "iwm",
6544         iwm_pci_methods,
6545         sizeof (struct iwm_softc)
6546 };
6547 
6548 static devclass_t iwm_devclass;
6549 
6550 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6551 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6552 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6553 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6554