xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 1c6dd33d26eb02c6145383a49150965eeca61120)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 #include "opt_wlan.h"
107 #include "opt_iwm.h"
108 
109 #include <sys/param.h>
110 #include <sys/bus.h>
111 #include <sys/conf.h>
112 #include <sys/endian.h>
113 #include <sys/firmware.h>
114 #include <sys/kernel.h>
115 #include <sys/malloc.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/module.h>
119 #include <sys/proc.h>
120 #include <sys/rman.h>
121 #include <sys/socket.h>
122 #include <sys/sockio.h>
123 #include <sys/sysctl.h>
124 #include <sys/linker.h>
125 
126 #include <machine/bus.h>
127 #include <machine/endian.h>
128 #include <machine/resource.h>
129 
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcireg.h>
132 
133 #include <net/bpf.h>
134 
135 #include <net/if.h>
136 #include <net/if_var.h>
137 #include <net/if_arp.h>
138 #include <net/if_dl.h>
139 #include <net/if_media.h>
140 #include <net/if_types.h>
141 
142 #include <netinet/in.h>
143 #include <netinet/in_systm.h>
144 #include <netinet/if_ether.h>
145 #include <netinet/ip.h>
146 
147 #include <net80211/ieee80211_var.h>
148 #include <net80211/ieee80211_regdomain.h>
149 #include <net80211/ieee80211_ratectl.h>
150 #include <net80211/ieee80211_radiotap.h>
151 
152 #include <dev/iwm/if_iwmreg.h>
153 #include <dev/iwm/if_iwmvar.h>
154 #include <dev/iwm/if_iwm_config.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165 #include <dev/iwm/if_iwm_sf.h>
166 #include <dev/iwm/if_iwm_sta.h>
167 
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171 
172 /* From DragonflyBSD */
173 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174 
175 const uint8_t iwm_nvm_channels[] = {
176 	/* 2.4 GHz */
177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 	/* 5 GHz */
179 	36, 40, 44, 48, 52, 56, 60, 64,
180 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 	149, 153, 157, 161, 165
182 };
183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184     "IWM_NUM_CHANNELS is too small");
185 
186 const uint8_t iwm_nvm_channels_8000[] = {
187 	/* 2.4 GHz */
188 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189 	/* 5 GHz */
190 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192 	149, 153, 157, 161, 165, 169, 173, 177, 181
193 };
194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195     "IWM_NUM_CHANNELS_8000 is too small");
196 
197 #define IWM_NUM_2GHZ_CHANNELS	14
198 #define IWM_N_HW_ADDR_MASK	0xF
199 
200 /*
201  * XXX For now, there's simply a fixed set of rate table entries
202  * that are populated.
203  */
204 const struct iwm_rate {
205 	uint8_t rate;
206 	uint8_t plcp;
207 } iwm_rates[] = {
208 	{   2,	IWM_RATE_1M_PLCP  },
209 	{   4,	IWM_RATE_2M_PLCP  },
210 	{  11,	IWM_RATE_5M_PLCP  },
211 	{  22,	IWM_RATE_11M_PLCP },
212 	{  12,	IWM_RATE_6M_PLCP  },
213 	{  18,	IWM_RATE_9M_PLCP  },
214 	{  24,	IWM_RATE_12M_PLCP },
215 	{  36,	IWM_RATE_18M_PLCP },
216 	{  48,	IWM_RATE_24M_PLCP },
217 	{  72,	IWM_RATE_36M_PLCP },
218 	{  96,	IWM_RATE_48M_PLCP },
219 	{ 108,	IWM_RATE_54M_PLCP },
220 };
221 #define IWM_RIDX_CCK	0
222 #define IWM_RIDX_OFDM	4
223 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
224 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226 
227 struct iwm_nvm_section {
228 	uint16_t length;
229 	uint8_t *data;
230 };
231 
232 #define IWM_UCODE_ALIVE_TIMEOUT	hz
233 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
234 
235 struct iwm_alive_data {
236 	int valid;
237 	uint32_t scd_base_addr;
238 };
239 
240 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241 static int	iwm_firmware_store_section(struct iwm_softc *,
242                                            enum iwm_ucode_type,
243                                            const uint8_t *, size_t);
244 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
245 static void	iwm_fw_info_free(struct iwm_fw_info *);
246 static int	iwm_read_firmware(struct iwm_softc *);
247 static int	iwm_alloc_fwmem(struct iwm_softc *);
248 static int	iwm_alloc_sched(struct iwm_softc *);
249 static int	iwm_alloc_kw(struct iwm_softc *);
250 static int	iwm_alloc_ict(struct iwm_softc *);
251 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                   int);
256 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void	iwm_enable_interrupts(struct iwm_softc *);
259 static void	iwm_restore_interrupts(struct iwm_softc *);
260 static void	iwm_disable_interrupts(struct iwm_softc *);
261 static void	iwm_ict_reset(struct iwm_softc *);
262 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263 static void	iwm_stop_device(struct iwm_softc *);
264 static void	iwm_nic_config(struct iwm_softc *);
265 static int	iwm_nic_rx_init(struct iwm_softc *);
266 static int	iwm_nic_tx_init(struct iwm_softc *);
267 static int	iwm_nic_init(struct iwm_softc *);
268 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                    uint16_t, uint8_t *, uint16_t *);
271 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272 				     uint16_t *, uint32_t);
273 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274 static void	iwm_add_channel_band(struct iwm_softc *,
275 		    struct ieee80211_channel[], int, int *, int, size_t,
276 		    const uint8_t[]);
277 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278 		    struct ieee80211_channel[]);
279 static struct iwm_nvm_data *
280 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281 			   const uint16_t *, const uint16_t *,
282 			   const uint16_t *, const uint16_t *,
283 			   const uint16_t *);
284 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286 					       struct iwm_nvm_data *,
287 					       const uint16_t *,
288 					       const uint16_t *);
289 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290 			    const uint16_t *);
291 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293 				  const uint16_t *);
294 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295 				   const uint16_t *);
296 static void	iwm_set_radio_cfg(const struct iwm_softc *,
297 				  struct iwm_nvm_data *, uint32_t);
298 static struct iwm_nvm_data *
299 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300 static int	iwm_nvm_init(struct iwm_softc *);
301 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302 				      const struct iwm_fw_desc *);
303 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304 					     bus_addr_t, uint32_t);
305 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306 						const struct iwm_fw_img *,
307 						int, int *);
308 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309 					   const struct iwm_fw_img *,
310 					   int, int *);
311 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312 					       const struct iwm_fw_img *);
313 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314 					  const struct iwm_fw_img *);
315 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
316 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
319                                               enum iwm_ucode_type);
320 static int	iwm_run_init_ucode(struct iwm_softc *, int);
321 static int	iwm_config_ltr(struct iwm_softc *sc);
322 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
323 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
324                                       struct iwm_rx_packet *);
325 static int	iwm_get_noise(struct iwm_softc *,
326 		    const struct iwm_statistics_rx_non_phy *);
327 static void	iwm_handle_rx_statistics(struct iwm_softc *,
328 		    struct iwm_rx_packet *);
329 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
330 		    uint32_t, bool);
331 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
332                                          struct iwm_rx_packet *,
333 				         struct iwm_node *);
334 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
335 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
336 #if 0
337 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
338                                  uint16_t);
339 #endif
340 static const struct iwm_rate *
341 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
342 			struct mbuf *, struct iwm_tx_cmd *);
343 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
344                        struct ieee80211_node *, int);
345 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
346 			     const struct ieee80211_bpf_params *);
347 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
348 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
349 static struct ieee80211_node *
350 		iwm_node_alloc(struct ieee80211vap *,
351 		               const uint8_t[IEEE80211_ADDR_LEN]);
352 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
353 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
354 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
355 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356 static void	iwm_endscan_cb(void *, int);
357 static int	iwm_send_bt_init_conf(struct iwm_softc *);
358 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
359 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
360 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
361 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
362 static int	iwm_init_hw(struct iwm_softc *);
363 static void	iwm_init(struct iwm_softc *);
364 static void	iwm_start(struct iwm_softc *);
365 static void	iwm_stop(struct iwm_softc *);
366 static void	iwm_watchdog(void *);
367 static void	iwm_parent(struct ieee80211com *);
368 #ifdef IWM_DEBUG
369 static const char *
370 		iwm_desc_lookup(uint32_t);
371 static void	iwm_nic_error(struct iwm_softc *);
372 static void	iwm_nic_umac_error(struct iwm_softc *);
373 #endif
374 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
375 static void	iwm_notif_intr(struct iwm_softc *);
376 static void	iwm_intr(void *);
377 static int	iwm_attach(device_t);
378 static int	iwm_is_valid_ether_addr(uint8_t *);
379 static void	iwm_preinit(void *);
380 static int	iwm_detach_local(struct iwm_softc *sc, int);
381 static void	iwm_init_task(void *);
382 static void	iwm_radiotap_attach(struct iwm_softc *);
383 static struct ieee80211vap *
384 		iwm_vap_create(struct ieee80211com *,
385 		               const char [IFNAMSIZ], int,
386 		               enum ieee80211_opmode, int,
387 		               const uint8_t [IEEE80211_ADDR_LEN],
388 		               const uint8_t [IEEE80211_ADDR_LEN]);
389 static void	iwm_vap_delete(struct ieee80211vap *);
390 static void	iwm_xmit_queue_drain(struct iwm_softc *);
391 static void	iwm_scan_start(struct ieee80211com *);
392 static void	iwm_scan_end(struct ieee80211com *);
393 static void	iwm_update_mcast(struct ieee80211com *);
394 static void	iwm_set_channel(struct ieee80211com *);
395 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int	iwm_detach(device_t);
398 
399 static int	iwm_lar_disable = 0;
400 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
401 
402 /*
403  * Firmware parser.
404  */
405 
406 static int
iwm_store_cscheme(struct iwm_softc * sc,const uint8_t * data,size_t dlen)407 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
408 {
409 	const struct iwm_fw_cscheme_list *l = (const void *)data;
410 
411 	if (dlen < sizeof(*l) ||
412 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
413 		return EINVAL;
414 
415 	/* we don't actually store anything for now, always use s/w crypto */
416 
417 	return 0;
418 }
419 
420 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,const uint8_t * data,size_t dlen)421 iwm_firmware_store_section(struct iwm_softc *sc,
422     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
423 {
424 	struct iwm_fw_img *fws;
425 	struct iwm_fw_desc *fwone;
426 
427 	if (type >= IWM_UCODE_TYPE_MAX)
428 		return EINVAL;
429 	if (dlen < sizeof(uint32_t))
430 		return EINVAL;
431 
432 	fws = &sc->sc_fw.img[type];
433 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
434 		return EINVAL;
435 
436 	fwone = &fws->sec[fws->fw_count];
437 
438 	/* first 32bit are device load offset */
439 	memcpy(&fwone->offset, data, sizeof(uint32_t));
440 
441 	/* rest is data */
442 	fwone->data = data + sizeof(uint32_t);
443 	fwone->len = dlen - sizeof(uint32_t);
444 
445 	fws->fw_count++;
446 
447 	return 0;
448 }
449 
450 #define IWM_DEFAULT_SCAN_CHANNELS 40
451 
452 /* iwlwifi: iwl-drv.c */
453 struct iwm_tlv_calib_data {
454 	uint32_t ucode_type;
455 	struct iwm_tlv_calib_ctrl calib;
456 } __packed;
457 
458 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)459 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
460 {
461 	const struct iwm_tlv_calib_data *def_calib = data;
462 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
463 
464 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
465 		device_printf(sc->sc_dev,
466 		    "Wrong ucode_type %u for default "
467 		    "calibration.\n", ucode_type);
468 		return EINVAL;
469 	}
470 
471 	sc->sc_default_calib[ucode_type].flow_trigger =
472 	    def_calib->calib.flow_trigger;
473 	sc->sc_default_calib[ucode_type].event_trigger =
474 	    def_calib->calib.event_trigger;
475 
476 	return 0;
477 }
478 
479 static int
iwm_set_ucode_api_flags(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)480 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
481 			struct iwm_ucode_capabilities *capa)
482 {
483 	const struct iwm_ucode_api *ucode_api = (const void *)data;
484 	uint32_t api_index = le32toh(ucode_api->api_index);
485 	uint32_t api_flags = le32toh(ucode_api->api_flags);
486 	int i;
487 
488 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
489 		device_printf(sc->sc_dev,
490 		    "api flags index %d larger than supported by driver\n",
491 		    api_index);
492 		/* don't return an error so we can load FW that has more bits */
493 		return 0;
494 	}
495 
496 	for (i = 0; i < 32; i++) {
497 		if (api_flags & (1U << i))
498 			setbit(capa->enabled_api, i + 32 * api_index);
499 	}
500 
501 	return 0;
502 }
503 
504 static int
iwm_set_ucode_capabilities(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)505 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
506 			   struct iwm_ucode_capabilities *capa)
507 {
508 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
509 	uint32_t api_index = le32toh(ucode_capa->api_index);
510 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
511 	int i;
512 
513 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
514 		device_printf(sc->sc_dev,
515 		    "capa flags index %d larger than supported by driver\n",
516 		    api_index);
517 		/* don't return an error so we can load FW that has more bits */
518 		return 0;
519 	}
520 
521 	for (i = 0; i < 32; i++) {
522 		if (api_flags & (1U << i))
523 			setbit(capa->enabled_capa, i + 32 * api_index);
524 	}
525 
526 	return 0;
527 }
528 
529 static void
iwm_fw_info_free(struct iwm_fw_info * fw)530 iwm_fw_info_free(struct iwm_fw_info *fw)
531 {
532 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
533 	fw->fw_fp = NULL;
534 	memset(fw->img, 0, sizeof(fw->img));
535 }
536 
537 static int
iwm_read_firmware(struct iwm_softc * sc)538 iwm_read_firmware(struct iwm_softc *sc)
539 {
540 	struct iwm_fw_info *fw = &sc->sc_fw;
541 	const struct iwm_tlv_ucode_header *uhdr;
542 	const struct iwm_ucode_tlv *tlv;
543 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
544 	enum iwm_ucode_tlv_type tlv_type;
545 	const struct firmware *fwp;
546 	const uint8_t *data;
547 	uint32_t tlv_len;
548 	uint32_t usniffer_img;
549 	const uint8_t *tlv_data;
550 	uint32_t paging_mem_size;
551 	int num_of_cpus;
552 	int error = 0;
553 	size_t len;
554 
555 	/*
556 	 * Load firmware into driver memory.
557 	 * fw_fp will be set.
558 	 */
559 	fwp = firmware_get(sc->cfg->fw_name);
560 	if (fwp == NULL) {
561 		device_printf(sc->sc_dev,
562 		    "could not read firmware %s (error %d)\n",
563 		    sc->cfg->fw_name, error);
564 		goto out;
565 	}
566 	fw->fw_fp = fwp;
567 
568 	/* (Re-)Initialize default values. */
569 	capa->flags = 0;
570 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
571 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
572 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
573 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
574 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
575 
576 	/*
577 	 * Parse firmware contents
578 	 */
579 
580 	uhdr = (const void *)fw->fw_fp->data;
581 	if (*(const uint32_t *)fw->fw_fp->data != 0
582 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
583 		device_printf(sc->sc_dev, "invalid firmware %s\n",
584 		    sc->cfg->fw_name);
585 		error = EINVAL;
586 		goto out;
587 	}
588 
589 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
590 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
591 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
592 	    IWM_UCODE_API(le32toh(uhdr->ver)));
593 	data = uhdr->data;
594 	len = fw->fw_fp->datasize - sizeof(*uhdr);
595 
596 	while (len >= sizeof(*tlv)) {
597 		len -= sizeof(*tlv);
598 		tlv = (const void *)data;
599 
600 		tlv_len = le32toh(tlv->length);
601 		tlv_type = le32toh(tlv->type);
602 		tlv_data = tlv->data;
603 
604 		if (len < tlv_len) {
605 			device_printf(sc->sc_dev,
606 			    "firmware too short: %zu bytes\n",
607 			    len);
608 			error = EINVAL;
609 			goto parse_out;
610 		}
611 		len -= roundup2(tlv_len, 4);
612 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
613 
614 		switch ((int)tlv_type) {
615 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
616 			if (tlv_len != sizeof(uint32_t)) {
617 				device_printf(sc->sc_dev,
618 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
619 				    __func__, tlv_len);
620 				error = EINVAL;
621 				goto parse_out;
622 			}
623 			capa->max_probe_length =
624 			    le32_to_cpup((const uint32_t *)tlv_data);
625 			/* limit it to something sensible */
626 			if (capa->max_probe_length >
627 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
628 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
629 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
630 				    "ridiculous\n", __func__);
631 				error = EINVAL;
632 				goto parse_out;
633 			}
634 			break;
635 		case IWM_UCODE_TLV_PAN:
636 			if (tlv_len) {
637 				device_printf(sc->sc_dev,
638 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
639 				    __func__, tlv_len);
640 				error = EINVAL;
641 				goto parse_out;
642 			}
643 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
644 			break;
645 		case IWM_UCODE_TLV_FLAGS:
646 			if (tlv_len < sizeof(uint32_t)) {
647 				device_printf(sc->sc_dev,
648 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
649 				    __func__, tlv_len);
650 				error = EINVAL;
651 				goto parse_out;
652 			}
653 			if (tlv_len % sizeof(uint32_t)) {
654 				device_printf(sc->sc_dev,
655 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
656 				    __func__, tlv_len);
657 				error = EINVAL;
658 				goto parse_out;
659 			}
660 			/*
661 			 * Apparently there can be many flags, but Linux driver
662 			 * parses only the first one, and so do we.
663 			 *
664 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
665 			 * Intentional or a bug?  Observations from
666 			 * current firmware file:
667 			 *  1) TLV_PAN is parsed first
668 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
669 			 * ==> this resets TLV_PAN to itself... hnnnk
670 			 */
671 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
672 			break;
673 		case IWM_UCODE_TLV_CSCHEME:
674 			if ((error = iwm_store_cscheme(sc,
675 			    tlv_data, tlv_len)) != 0) {
676 				device_printf(sc->sc_dev,
677 				    "%s: iwm_store_cscheme(): returned %d\n",
678 				    __func__, error);
679 				goto parse_out;
680 			}
681 			break;
682 		case IWM_UCODE_TLV_NUM_OF_CPU:
683 			if (tlv_len != sizeof(uint32_t)) {
684 				device_printf(sc->sc_dev,
685 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
686 				    __func__, tlv_len);
687 				error = EINVAL;
688 				goto parse_out;
689 			}
690 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
691 			if (num_of_cpus == 2) {
692 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
693 					TRUE;
694 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
695 					TRUE;
696 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
697 					TRUE;
698 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
699 				device_printf(sc->sc_dev,
700 				    "%s: Driver supports only 1 or 2 CPUs\n",
701 				    __func__);
702 				error = EINVAL;
703 				goto parse_out;
704 			}
705 			break;
706 		case IWM_UCODE_TLV_SEC_RT:
707 			if ((error = iwm_firmware_store_section(sc,
708 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
709 				device_printf(sc->sc_dev,
710 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
711 				    __func__, error);
712 				goto parse_out;
713 			}
714 			break;
715 		case IWM_UCODE_TLV_SEC_INIT:
716 			if ((error = iwm_firmware_store_section(sc,
717 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
718 				device_printf(sc->sc_dev,
719 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
720 				    __func__, error);
721 				goto parse_out;
722 			}
723 			break;
724 		case IWM_UCODE_TLV_SEC_WOWLAN:
725 			if ((error = iwm_firmware_store_section(sc,
726 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
727 				device_printf(sc->sc_dev,
728 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
729 				    __func__, error);
730 				goto parse_out;
731 			}
732 			break;
733 		case IWM_UCODE_TLV_DEF_CALIB:
734 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
735 				device_printf(sc->sc_dev,
736 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
737 				    __func__, tlv_len,
738 				    sizeof(struct iwm_tlv_calib_data));
739 				error = EINVAL;
740 				goto parse_out;
741 			}
742 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
743 				device_printf(sc->sc_dev,
744 				    "%s: iwm_set_default_calib() failed: %d\n",
745 				    __func__, error);
746 				goto parse_out;
747 			}
748 			break;
749 		case IWM_UCODE_TLV_PHY_SKU:
750 			if (tlv_len != sizeof(uint32_t)) {
751 				error = EINVAL;
752 				device_printf(sc->sc_dev,
753 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
754 				    __func__, tlv_len);
755 				goto parse_out;
756 			}
757 			sc->sc_fw.phy_config =
758 			    le32_to_cpup((const uint32_t *)tlv_data);
759 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
760 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
761 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
762 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
763 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
764 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
765 			break;
766 
767 		case IWM_UCODE_TLV_API_CHANGES_SET: {
768 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
769 				error = EINVAL;
770 				goto parse_out;
771 			}
772 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
773 				error = EINVAL;
774 				goto parse_out;
775 			}
776 			break;
777 		}
778 
779 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
780 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
781 				error = EINVAL;
782 				goto parse_out;
783 			}
784 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
785 				error = EINVAL;
786 				goto parse_out;
787 			}
788 			break;
789 		}
790 
791 		case IWM_UCODE_TLV_CMD_VERSIONS:
792 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
793 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
794 			/* ignore, not used by current driver */
795 			break;
796 
797 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
798 			if ((error = iwm_firmware_store_section(sc,
799 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
800 			    tlv_len)) != 0)
801 				goto parse_out;
802 			break;
803 
804 		case IWM_UCODE_TLV_PAGING:
805 			if (tlv_len != sizeof(uint32_t)) {
806 				error = EINVAL;
807 				goto parse_out;
808 			}
809 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
810 
811 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
812 			    "%s: Paging: paging enabled (size = %u bytes)\n",
813 			    __func__, paging_mem_size);
814 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
815 				device_printf(sc->sc_dev,
816 					"%s: Paging: driver supports up to %u bytes for paging image\n",
817 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
818 				error = EINVAL;
819 				goto out;
820 			}
821 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
822 				device_printf(sc->sc_dev,
823 				    "%s: Paging: image isn't multiple %u\n",
824 				    __func__, IWM_FW_PAGING_SIZE);
825 				error = EINVAL;
826 				goto out;
827 			}
828 
829 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
830 			    paging_mem_size;
831 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
832 			sc->sc_fw.img[usniffer_img].paging_mem_size =
833 			    paging_mem_size;
834 			break;
835 
836 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
837 			if (tlv_len != sizeof(uint32_t)) {
838 				error = EINVAL;
839 				goto parse_out;
840 			}
841 			capa->n_scan_channels =
842 			    le32_to_cpup((const uint32_t *)tlv_data);
843 			break;
844 
845 		case IWM_UCODE_TLV_FW_VERSION:
846 			if (tlv_len != sizeof(uint32_t) * 3) {
847 				error = EINVAL;
848 				goto parse_out;
849 			}
850 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
851 			    "%u.%u.%u",
852 			    le32toh(((const uint32_t *)tlv_data)[0]),
853 			    le32toh(((const uint32_t *)tlv_data)[1]),
854 			    le32toh(((const uint32_t *)tlv_data)[2]));
855 			break;
856 
857 		case IWM_UCODE_TLV_FW_MEM_SEG:
858 			break;
859 
860 		default:
861 			device_printf(sc->sc_dev,
862 			    "%s: unknown firmware section %d, abort\n",
863 			    __func__, tlv_type);
864 			error = EINVAL;
865 			goto parse_out;
866 		}
867 	}
868 
869 	KASSERT(error == 0, ("unhandled error"));
870 
871  parse_out:
872 	if (error) {
873 		device_printf(sc->sc_dev, "firmware parse error %d, "
874 		    "section type %d\n", error, tlv_type);
875 	}
876 
877  out:
878 	if (error) {
879 		if (fw->fw_fp != NULL)
880 			iwm_fw_info_free(fw);
881 	}
882 
883 	return error;
884 }
885 
886 /*
887  * DMA resource routines
888  */
889 
890 /* fwmem is used to load firmware onto the card */
891 static int
iwm_alloc_fwmem(struct iwm_softc * sc)892 iwm_alloc_fwmem(struct iwm_softc *sc)
893 {
894 	/* Must be aligned on a 16-byte boundary. */
895 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
896 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
897 }
898 
899 /* tx scheduler rings.  not used? */
900 static int
iwm_alloc_sched(struct iwm_softc * sc)901 iwm_alloc_sched(struct iwm_softc *sc)
902 {
903 	/* TX scheduler rings must be aligned on a 1KB boundary. */
904 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
905 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
906 }
907 
908 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
909 static int
iwm_alloc_kw(struct iwm_softc * sc)910 iwm_alloc_kw(struct iwm_softc *sc)
911 {
912 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
913 }
914 
915 /* interrupt cause table */
916 static int
iwm_alloc_ict(struct iwm_softc * sc)917 iwm_alloc_ict(struct iwm_softc *sc)
918 {
919 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
920 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
921 }
922 
923 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)924 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
925 {
926 	bus_size_t size;
927 	size_t descsz;
928 	int count, i, error;
929 
930 	ring->cur = 0;
931 	if (sc->cfg->mqrx_supported) {
932 		count = IWM_RX_MQ_RING_COUNT;
933 		descsz = sizeof(uint64_t);
934 	} else {
935 		count = IWM_RX_LEGACY_RING_COUNT;
936 		descsz = sizeof(uint32_t);
937 	}
938 
939 	/* Allocate RX descriptors (256-byte aligned). */
940 	size = count * descsz;
941 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
942 	    256);
943 	if (error != 0) {
944 		device_printf(sc->sc_dev,
945 		    "could not allocate RX ring DMA memory\n");
946 		goto fail;
947 	}
948 	ring->desc = ring->free_desc_dma.vaddr;
949 
950 	/* Allocate RX status area (16-byte aligned). */
951 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
952 	    sizeof(*ring->stat), 16);
953 	if (error != 0) {
954 		device_printf(sc->sc_dev,
955 		    "could not allocate RX status DMA memory\n");
956 		goto fail;
957 	}
958 	ring->stat = ring->stat_dma.vaddr;
959 
960 	if (sc->cfg->mqrx_supported) {
961 		size = count * sizeof(uint32_t);
962 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
963 		    size, 256);
964 		if (error != 0) {
965 			device_printf(sc->sc_dev,
966 			    "could not allocate RX ring DMA memory\n");
967 			goto fail;
968 		}
969 	}
970 
971         /* Create RX buffer DMA tag. */
972         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
973             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
974             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
975         if (error != 0) {
976                 device_printf(sc->sc_dev,
977                     "%s: could not create RX buf DMA tag, error %d\n",
978                     __func__, error);
979                 goto fail;
980         }
981 
982 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
983 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
984 	if (error != 0) {
985 		device_printf(sc->sc_dev,
986 		    "%s: could not create RX buf DMA map, error %d\n",
987 		    __func__, error);
988 		goto fail;
989 	}
990 
991 	/*
992 	 * Allocate and map RX buffers.
993 	 */
994 	for (i = 0; i < count; i++) {
995 		struct iwm_rx_data *data = &ring->data[i];
996 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
997 		if (error != 0) {
998 			device_printf(sc->sc_dev,
999 			    "%s: could not create RX buf DMA map, error %d\n",
1000 			    __func__, error);
1001 			goto fail;
1002 		}
1003 		data->m = NULL;
1004 
1005 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1006 			goto fail;
1007 		}
1008 	}
1009 	return 0;
1010 
1011 fail:	iwm_free_rx_ring(sc, ring);
1012 	return error;
1013 }
1014 
1015 static void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1016 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1017 {
1018 	/* Reset the ring state */
1019 	ring->cur = 0;
1020 
1021 	/*
1022 	 * The hw rx ring index in shared memory must also be cleared,
1023 	 * otherwise the discrepancy can cause reprocessing chaos.
1024 	 */
1025 	if (sc->rxq.stat)
1026 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1027 }
1028 
1029 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1030 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1031 {
1032 	int count, i;
1033 
1034 	iwm_dma_contig_free(&ring->free_desc_dma);
1035 	iwm_dma_contig_free(&ring->stat_dma);
1036 	iwm_dma_contig_free(&ring->used_desc_dma);
1037 
1038 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1039 	    IWM_RX_LEGACY_RING_COUNT;
1040 
1041 	for (i = 0; i < count; i++) {
1042 		struct iwm_rx_data *data = &ring->data[i];
1043 
1044 		if (data->m != NULL) {
1045 			bus_dmamap_sync(ring->data_dmat, data->map,
1046 			    BUS_DMASYNC_POSTREAD);
1047 			bus_dmamap_unload(ring->data_dmat, data->map);
1048 			m_freem(data->m);
1049 			data->m = NULL;
1050 		}
1051 		if (data->map != NULL) {
1052 			bus_dmamap_destroy(ring->data_dmat, data->map);
1053 			data->map = NULL;
1054 		}
1055 	}
1056 	if (ring->spare_map != NULL) {
1057 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1058 		ring->spare_map = NULL;
1059 	}
1060 	if (ring->data_dmat != NULL) {
1061 		bus_dma_tag_destroy(ring->data_dmat);
1062 		ring->data_dmat = NULL;
1063 	}
1064 }
1065 
1066 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1067 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1068 {
1069 	bus_addr_t paddr;
1070 	bus_size_t size;
1071 	size_t maxsize;
1072 	int nsegments;
1073 	int i, error;
1074 
1075 	ring->qid = qid;
1076 	ring->queued = 0;
1077 	ring->cur = 0;
1078 
1079 	/* Allocate TX descriptors (256-byte aligned). */
1080 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1081 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1082 	if (error != 0) {
1083 		device_printf(sc->sc_dev,
1084 		    "could not allocate TX ring DMA memory\n");
1085 		goto fail;
1086 	}
1087 	ring->desc = ring->desc_dma.vaddr;
1088 
1089 	/*
1090 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1091 	 * to allocate commands space for other rings.
1092 	 */
1093 	if (qid > IWM_CMD_QUEUE)
1094 		return 0;
1095 
1096 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1097 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1098 	if (error != 0) {
1099 		device_printf(sc->sc_dev,
1100 		    "could not allocate TX cmd DMA memory\n");
1101 		goto fail;
1102 	}
1103 	ring->cmd = ring->cmd_dma.vaddr;
1104 
1105 	/* FW commands may require more mapped space than packets. */
1106 	if (qid == IWM_CMD_QUEUE) {
1107 		maxsize = IWM_RBUF_SIZE;
1108 		nsegments = 1;
1109 	} else {
1110 		maxsize = MCLBYTES;
1111 		nsegments = IWM_MAX_SCATTER - 2;
1112 	}
1113 
1114 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1115 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1116             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1117 	if (error != 0) {
1118 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1119 		goto fail;
1120 	}
1121 
1122 	paddr = ring->cmd_dma.paddr;
1123 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1124 		struct iwm_tx_data *data = &ring->data[i];
1125 
1126 		data->cmd_paddr = paddr;
1127 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1128 		    + offsetof(struct iwm_tx_cmd, scratch);
1129 		paddr += sizeof(struct iwm_device_cmd);
1130 
1131 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1132 		if (error != 0) {
1133 			device_printf(sc->sc_dev,
1134 			    "could not create TX buf DMA map\n");
1135 			goto fail;
1136 		}
1137 	}
1138 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1139 	    ("invalid physical address"));
1140 	return 0;
1141 
1142 fail:	iwm_free_tx_ring(sc, ring);
1143 	return error;
1144 }
1145 
1146 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1147 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1148 {
1149 	int i;
1150 
1151 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1152 		struct iwm_tx_data *data = &ring->data[i];
1153 
1154 		if (data->m != NULL) {
1155 			bus_dmamap_sync(ring->data_dmat, data->map,
1156 			    BUS_DMASYNC_POSTWRITE);
1157 			bus_dmamap_unload(ring->data_dmat, data->map);
1158 			m_freem(data->m);
1159 			data->m = NULL;
1160 		}
1161 	}
1162 	/* Clear TX descriptors. */
1163 	memset(ring->desc, 0, ring->desc_dma.size);
1164 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1165 	    BUS_DMASYNC_PREWRITE);
1166 	sc->qfullmsk &= ~(1 << ring->qid);
1167 	ring->queued = 0;
1168 	ring->cur = 0;
1169 
1170 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1171 		iwm_pcie_clear_cmd_in_flight(sc);
1172 }
1173 
1174 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1175 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1176 {
1177 	int i;
1178 
1179 	iwm_dma_contig_free(&ring->desc_dma);
1180 	iwm_dma_contig_free(&ring->cmd_dma);
1181 
1182 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1183 		struct iwm_tx_data *data = &ring->data[i];
1184 
1185 		if (data->m != NULL) {
1186 			bus_dmamap_sync(ring->data_dmat, data->map,
1187 			    BUS_DMASYNC_POSTWRITE);
1188 			bus_dmamap_unload(ring->data_dmat, data->map);
1189 			m_freem(data->m);
1190 			data->m = NULL;
1191 		}
1192 		if (data->map != NULL) {
1193 			bus_dmamap_destroy(ring->data_dmat, data->map);
1194 			data->map = NULL;
1195 		}
1196 	}
1197 	if (ring->data_dmat != NULL) {
1198 		bus_dma_tag_destroy(ring->data_dmat);
1199 		ring->data_dmat = NULL;
1200 	}
1201 }
1202 
1203 /*
1204  * High-level hardware frobbing routines
1205  */
1206 
1207 static void
iwm_enable_interrupts(struct iwm_softc * sc)1208 iwm_enable_interrupts(struct iwm_softc *sc)
1209 {
1210 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1211 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1212 }
1213 
1214 static void
iwm_restore_interrupts(struct iwm_softc * sc)1215 iwm_restore_interrupts(struct iwm_softc *sc)
1216 {
1217 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1218 }
1219 
1220 static void
iwm_disable_interrupts(struct iwm_softc * sc)1221 iwm_disable_interrupts(struct iwm_softc *sc)
1222 {
1223 	/* disable interrupts */
1224 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1225 
1226 	/* acknowledge all interrupts */
1227 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1228 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1229 }
1230 
1231 static void
iwm_ict_reset(struct iwm_softc * sc)1232 iwm_ict_reset(struct iwm_softc *sc)
1233 {
1234 	iwm_disable_interrupts(sc);
1235 
1236 	/* Reset ICT table. */
1237 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1238 	sc->ict_cur = 0;
1239 
1240 	/* Set physical address of ICT table (4KB aligned). */
1241 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1242 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1243 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1244 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1245 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1246 
1247 	/* Switch to ICT interrupt mode in driver. */
1248 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1249 
1250 	/* Re-enable interrupts. */
1251 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1252 	iwm_enable_interrupts(sc);
1253 }
1254 
1255 /* iwlwifi pcie/trans.c */
1256 
1257 /*
1258  * Since this .. hard-resets things, it's time to actually
1259  * mark the first vap (if any) as having no mac context.
1260  * It's annoying, but since the driver is potentially being
1261  * stop/start'ed whilst active (thanks openbsd port!) we
1262  * have to correctly track this.
1263  */
1264 static void
iwm_stop_device(struct iwm_softc * sc)1265 iwm_stop_device(struct iwm_softc *sc)
1266 {
1267 	struct ieee80211com *ic = &sc->sc_ic;
1268 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1269 	int chnl, qid;
1270 	uint32_t mask = 0;
1271 
1272 	/* tell the device to stop sending interrupts */
1273 	iwm_disable_interrupts(sc);
1274 
1275 	/*
1276 	 * FreeBSD-local: mark the first vap as not-uploaded,
1277 	 * so the next transition through auth/assoc
1278 	 * will correctly populate the MAC context.
1279 	 */
1280 	if (vap) {
1281 		struct iwm_vap *iv = IWM_VAP(vap);
1282 		iv->phy_ctxt = NULL;
1283 		iv->is_uploaded = 0;
1284 	}
1285 	sc->sc_firmware_state = 0;
1286 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1287 
1288 	/* device going down, Stop using ICT table */
1289 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1290 
1291 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1292 
1293 	if (iwm_nic_lock(sc)) {
1294 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1295 
1296 		/* Stop each Tx DMA channel */
1297 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1298 			IWM_WRITE(sc,
1299 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1300 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1301 		}
1302 
1303 		/* Wait for DMA channels to be idle */
1304 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1305 		    5000)) {
1306 			device_printf(sc->sc_dev,
1307 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1308 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1309 		}
1310 		iwm_nic_unlock(sc);
1311 	}
1312 	iwm_pcie_rx_stop(sc);
1313 
1314 	/* Stop RX ring. */
1315 	iwm_reset_rx_ring(sc, &sc->rxq);
1316 
1317 	/* Reset all TX rings. */
1318 	for (qid = 0; qid < nitems(sc->txq); qid++)
1319 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1320 
1321 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1322 		/* Power-down device's busmaster DMA clocks */
1323 		if (iwm_nic_lock(sc)) {
1324 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1325 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1326 			iwm_nic_unlock(sc);
1327 		}
1328 		DELAY(5);
1329 	}
1330 
1331 	/* Make sure (redundant) we've released our request to stay awake */
1332 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1333 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1334 
1335 	/* Stop the device, and put it in low power state */
1336 	iwm_apm_stop(sc);
1337 
1338 	/* stop and reset the on-board processor */
1339 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1340 	DELAY(5000);
1341 
1342 	/*
1343 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1344 	 */
1345 	iwm_disable_interrupts(sc);
1346 
1347 	/*
1348 	 * Even if we stop the HW, we still want the RF kill
1349 	 * interrupt
1350 	 */
1351 	iwm_enable_rfkill_int(sc);
1352 	iwm_check_rfkill(sc);
1353 
1354 	iwm_prepare_card_hw(sc);
1355 }
1356 
1357 /* iwlwifi: mvm/ops.c */
1358 static void
iwm_nic_config(struct iwm_softc * sc)1359 iwm_nic_config(struct iwm_softc *sc)
1360 {
1361 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1362 	uint32_t reg_val = 0;
1363 	uint32_t phy_config = iwm_get_phy_config(sc);
1364 
1365 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371 
1372 	/* SKU control */
1373 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377 
1378 	/* radio configuration */
1379 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382 
1383 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1384 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1385 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1386 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1391 	    reg_val);
1392 
1393 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1394 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1395 	    radio_cfg_step, radio_cfg_dash);
1396 
1397 	/*
1398 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1399 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1400 	 * to lose ownership and not being able to obtain it back.
1401 	 */
1402 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1403 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1404 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1405 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1406 	}
1407 }
1408 
1409 static int
iwm_nic_rx_mq_init(struct iwm_softc * sc)1410 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1411 {
1412 	int enabled;
1413 
1414 	if (!iwm_nic_lock(sc))
1415 		return EBUSY;
1416 
1417 	/* Stop RX DMA. */
1418 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1419 	/* Disable RX used and free queue operation. */
1420 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1421 
1422 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1423 	    sc->rxq.free_desc_dma.paddr);
1424 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1425 	    sc->rxq.used_desc_dma.paddr);
1426 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1427 	    sc->rxq.stat_dma.paddr);
1428 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1429 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1430 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1431 
1432 	/* We configure only queue 0 for now. */
1433 	enabled = ((1 << 0) << 16) | (1 << 0);
1434 
1435 	/* Enable RX DMA, 4KB buffer size. */
1436 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1437 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1438 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1439 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1440 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1441 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1442 
1443 	/* Enable RX DMA snooping. */
1444 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1445 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1446 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1447 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1448 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1449 
1450 	/* Enable the configured queue(s). */
1451 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1452 
1453 	iwm_nic_unlock(sc);
1454 
1455 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1456 
1457 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1458 
1459 	return (0);
1460 }
1461 
1462 static int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)1463 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1464 {
1465 
1466 	/* Stop Rx DMA */
1467 	iwm_pcie_rx_stop(sc);
1468 
1469 	if (!iwm_nic_lock(sc))
1470 		return EBUSY;
1471 
1472 	/* reset and flush pointers */
1473 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1474 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1475 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1476 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1477 
1478 	/* Set physical address of RX ring (256-byte aligned). */
1479 	IWM_WRITE(sc,
1480 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1481 	    sc->rxq.free_desc_dma.paddr >> 8);
1482 
1483 	/* Set physical address of RX status (16-byte aligned). */
1484 	IWM_WRITE(sc,
1485 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1486 
1487 	/* Enable Rx DMA
1488 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1489 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1490 	 *      the credit mechanism in 5000 HW RX FIFO
1491 	 * Direct rx interrupts to hosts
1492 	 * Rx buffer size 4 or 8k or 12k
1493 	 * RB timeout 0x10
1494 	 * 256 RBDs
1495 	 */
1496 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1497 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1498 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1499 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1500 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1501 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1502 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1503 
1504 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1505 
1506 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1507 	if (sc->cfg->host_interrupt_operation_mode)
1508 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1509 
1510 	iwm_nic_unlock(sc);
1511 
1512 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1513 
1514 	return 0;
1515 }
1516 
1517 static int
iwm_nic_rx_init(struct iwm_softc * sc)1518 iwm_nic_rx_init(struct iwm_softc *sc)
1519 {
1520 	if (sc->cfg->mqrx_supported)
1521 		return iwm_nic_rx_mq_init(sc);
1522 	else
1523 		return iwm_nic_rx_legacy_init(sc);
1524 }
1525 
1526 static int
iwm_nic_tx_init(struct iwm_softc * sc)1527 iwm_nic_tx_init(struct iwm_softc *sc)
1528 {
1529 	int qid;
1530 
1531 	if (!iwm_nic_lock(sc))
1532 		return EBUSY;
1533 
1534 	/* Deactivate TX scheduler. */
1535 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1536 
1537 	/* Set physical address of "keep warm" page (16-byte aligned). */
1538 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1539 
1540 	/* Initialize TX rings. */
1541 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1542 		struct iwm_tx_ring *txq = &sc->txq[qid];
1543 
1544 		/* Set physical address of TX ring (256-byte aligned). */
1545 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1546 		    txq->desc_dma.paddr >> 8);
1547 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1548 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1549 		    __func__,
1550 		    qid, txq->desc,
1551 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1552 	}
1553 
1554 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1555 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1556 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1557 
1558 	iwm_nic_unlock(sc);
1559 
1560 	return 0;
1561 }
1562 
1563 static int
iwm_nic_init(struct iwm_softc * sc)1564 iwm_nic_init(struct iwm_softc *sc)
1565 {
1566 	int error;
1567 
1568 	iwm_apm_init(sc);
1569 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1570 		iwm_set_pwr(sc);
1571 
1572 	iwm_nic_config(sc);
1573 
1574 	if ((error = iwm_nic_rx_init(sc)) != 0)
1575 		return error;
1576 
1577 	/*
1578 	 * Ditto for TX, from iwn
1579 	 */
1580 	if ((error = iwm_nic_tx_init(sc)) != 0)
1581 		return error;
1582 
1583 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1584 	    "%s: shadow registers enabled\n", __func__);
1585 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1586 
1587 	return 0;
1588 }
1589 
1590 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo)1591 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1592 {
1593 	int qmsk;
1594 
1595 	qmsk = 1 << qid;
1596 
1597 	if (!iwm_nic_lock(sc)) {
1598 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1599 		    __func__, qid);
1600 		return EBUSY;
1601 	}
1602 
1603 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1604 
1605 	if (qid == IWM_CMD_QUEUE) {
1606 		/* Disable the scheduler. */
1607 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1608 
1609 		/* Stop the TX queue prior to configuration. */
1610 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1611 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1612 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1613 
1614 		iwm_nic_unlock(sc);
1615 
1616 		/* Disable aggregations for this queue. */
1617 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1618 
1619 		if (!iwm_nic_lock(sc)) {
1620 			device_printf(sc->sc_dev,
1621 			    "%s: cannot enable txq %d\n", __func__, qid);
1622 			return EBUSY;
1623 		}
1624 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1625 		iwm_nic_unlock(sc);
1626 
1627 		iwm_write_mem32(sc,
1628 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1629 		/* Set scheduler window size and frame limit. */
1630 		iwm_write_mem32(sc,
1631 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1632 		    sizeof(uint32_t),
1633 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1634 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1635 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1636 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1637 
1638 		if (!iwm_nic_lock(sc)) {
1639 			device_printf(sc->sc_dev,
1640 			    "%s: cannot enable txq %d\n", __func__, qid);
1641 			return EBUSY;
1642 		}
1643 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1644 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1645 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1646 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1647 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1648 
1649 		/* Enable the scheduler for this queue. */
1650 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1651 	} else {
1652 		struct iwm_scd_txq_cfg_cmd cmd;
1653 		int error;
1654 
1655 		iwm_nic_unlock(sc);
1656 
1657 		memset(&cmd, 0, sizeof(cmd));
1658 		cmd.scd_queue = qid;
1659 		cmd.enable = 1;
1660 		cmd.sta_id = sta_id;
1661 		cmd.tx_fifo = fifo;
1662 		cmd.aggregate = 0;
1663 		cmd.window = IWM_FRAME_LIMIT;
1664 
1665 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1666 		    sizeof(cmd), &cmd);
1667 		if (error) {
1668 			device_printf(sc->sc_dev,
1669 			    "cannot enable txq %d\n", qid);
1670 			return error;
1671 		}
1672 
1673 		if (!iwm_nic_lock(sc))
1674 			return EBUSY;
1675 	}
1676 
1677 	iwm_nic_unlock(sc);
1678 
1679 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1680 	    __func__, qid, fifo);
1681 
1682 	return 0;
1683 }
1684 
1685 static int
iwm_trans_pcie_fw_alive(struct iwm_softc * sc,uint32_t scd_base_addr)1686 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1687 {
1688 	int error, chnl;
1689 
1690 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1691 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1692 
1693 	if (!iwm_nic_lock(sc))
1694 		return EBUSY;
1695 
1696 	iwm_ict_reset(sc);
1697 
1698 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1699 	if (scd_base_addr != 0 &&
1700 	    scd_base_addr != sc->scd_base_addr) {
1701 		device_printf(sc->sc_dev,
1702 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1703 		    __func__, sc->scd_base_addr, scd_base_addr);
1704 	}
1705 
1706 	iwm_nic_unlock(sc);
1707 
1708 	/* reset context data, TX status and translation data */
1709 	error = iwm_write_mem(sc,
1710 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1711 	    NULL, clear_dwords);
1712 	if (error)
1713 		return EBUSY;
1714 
1715 	if (!iwm_nic_lock(sc))
1716 		return EBUSY;
1717 
1718 	/* Set physical address of TX scheduler rings (1KB aligned). */
1719 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1720 
1721 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1722 
1723 	iwm_nic_unlock(sc);
1724 
1725 	/* enable command channel */
1726 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1727 	if (error)
1728 		return error;
1729 
1730 	if (!iwm_nic_lock(sc))
1731 		return EBUSY;
1732 
1733 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1734 
1735 	/* Enable DMA channels. */
1736 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1737 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1738 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1739 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1740 	}
1741 
1742 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1743 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1744 
1745 	iwm_nic_unlock(sc);
1746 
1747 	/* Enable L1-Active */
1748 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1749 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1750 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1751 	}
1752 
1753 	return error;
1754 }
1755 
1756 /*
1757  * NVM read access and content parsing.  We do not support
1758  * external NVM or writing NVM.
1759  * iwlwifi/mvm/nvm.c
1760  */
1761 
1762 /* Default NVM size to read */
1763 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1764 
1765 #define IWM_NVM_WRITE_OPCODE 1
1766 #define IWM_NVM_READ_OPCODE 0
1767 
1768 /* load nvm chunk response */
1769 enum {
1770 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1771 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1772 };
1773 
1774 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)1775 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1776 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1777 {
1778 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1779 		.offset = htole16(offset),
1780 		.length = htole16(length),
1781 		.type = htole16(section),
1782 		.op_code = IWM_NVM_READ_OPCODE,
1783 	};
1784 	struct iwm_nvm_access_resp *nvm_resp;
1785 	struct iwm_rx_packet *pkt;
1786 	struct iwm_host_cmd cmd = {
1787 		.id = IWM_NVM_ACCESS_CMD,
1788 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1789 		.data = { &nvm_access_cmd, },
1790 	};
1791 	int ret, bytes_read, offset_read;
1792 	uint8_t *resp_data;
1793 
1794 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1795 
1796 	ret = iwm_send_cmd(sc, &cmd);
1797 	if (ret) {
1798 		device_printf(sc->sc_dev,
1799 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1800 		return ret;
1801 	}
1802 
1803 	pkt = cmd.resp_pkt;
1804 
1805 	/* Extract NVM response */
1806 	nvm_resp = (void *)pkt->data;
1807 	ret = le16toh(nvm_resp->status);
1808 	bytes_read = le16toh(nvm_resp->length);
1809 	offset_read = le16toh(nvm_resp->offset);
1810 	resp_data = nvm_resp->data;
1811 	if (ret) {
1812 		if ((offset != 0) &&
1813 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1814 			/*
1815 			 * meaning of NOT_VALID_ADDRESS:
1816 			 * driver try to read chunk from address that is
1817 			 * multiple of 2K and got an error since addr is empty.
1818 			 * meaning of (offset != 0): driver already
1819 			 * read valid data from another chunk so this case
1820 			 * is not an error.
1821 			 */
1822 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1823 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1824 				    offset);
1825 			*len = 0;
1826 			ret = 0;
1827 		} else {
1828 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829 				    "NVM access command failed with status %d\n", ret);
1830 			ret = EIO;
1831 		}
1832 		goto exit;
1833 	}
1834 
1835 	if (offset_read != offset) {
1836 		device_printf(sc->sc_dev,
1837 		    "NVM ACCESS response with invalid offset %d\n",
1838 		    offset_read);
1839 		ret = EINVAL;
1840 		goto exit;
1841 	}
1842 
1843 	if (bytes_read > length) {
1844 		device_printf(sc->sc_dev,
1845 		    "NVM ACCESS response with too much data "
1846 		    "(%d bytes requested, %d bytes received)\n",
1847 		    length, bytes_read);
1848 		ret = EINVAL;
1849 		goto exit;
1850 	}
1851 
1852 	/* Write data to NVM */
1853 	memcpy(data + offset, resp_data, bytes_read);
1854 	*len = bytes_read;
1855 
1856  exit:
1857 	iwm_free_resp(sc, &cmd);
1858 	return ret;
1859 }
1860 
1861 /*
1862  * Reads an NVM section completely.
1863  * NICs prior to 7000 family don't have a real NVM, but just read
1864  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1865  * by uCode, we need to manually check in this case that we don't
1866  * overflow and try to read more than the EEPROM size.
1867  * For 7000 family NICs, we supply the maximal size we can read, and
1868  * the uCode fills the response with as much data as we can,
1869  * without overflowing, so no check is needed.
1870  */
1871 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,uint32_t size_read)1872 iwm_nvm_read_section(struct iwm_softc *sc,
1873 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1874 {
1875 	uint16_t seglen, length, offset = 0;
1876 	int ret;
1877 
1878 	/* Set nvm section read length */
1879 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1880 
1881 	seglen = length;
1882 
1883 	/* Read the NVM until exhausted (reading less than requested) */
1884 	while (seglen == length) {
1885 		/* Check no memory assumptions fail and cause an overflow */
1886 		if ((size_read + offset + length) >
1887 		    sc->cfg->eeprom_size) {
1888 			device_printf(sc->sc_dev,
1889 			    "EEPROM size is too small for NVM\n");
1890 			return ENOBUFS;
1891 		}
1892 
1893 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1894 		if (ret) {
1895 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1896 				    "Cannot read NVM from section %d offset %d, length %d\n",
1897 				    section, offset, length);
1898 			return ret;
1899 		}
1900 		offset += seglen;
1901 	}
1902 
1903 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1904 		    "NVM section %d read completed\n", section);
1905 	*len = offset;
1906 	return 0;
1907 }
1908 
1909 /*
1910  * BEGIN IWM_NVM_PARSE
1911  */
1912 
1913 /* iwlwifi/iwl-nvm-parse.c */
1914 
1915 /*
1916  * Translate EEPROM flags to net80211.
1917  */
1918 static uint32_t
iwm_eeprom_channel_flags(uint16_t ch_flags)1919 iwm_eeprom_channel_flags(uint16_t ch_flags)
1920 {
1921 	uint32_t nflags;
1922 
1923 	nflags = 0;
1924 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1925 		nflags |= IEEE80211_CHAN_PASSIVE;
1926 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1927 		nflags |= IEEE80211_CHAN_NOADHOC;
1928 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1929 		nflags |= IEEE80211_CHAN_DFS;
1930 		/* Just in case. */
1931 		nflags |= IEEE80211_CHAN_NOADHOC;
1932 	}
1933 
1934 	return (nflags);
1935 }
1936 
1937 static void
iwm_add_channel_band(struct iwm_softc * sc,struct ieee80211_channel chans[],int maxchans,int * nchans,int ch_idx,size_t ch_num,const uint8_t bands[])1938 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1939     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1940     const uint8_t bands[])
1941 {
1942 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1943 	uint32_t nflags;
1944 	uint16_t ch_flags;
1945 	uint8_t ieee;
1946 	int error;
1947 
1948 	for (; ch_idx < ch_num; ch_idx++) {
1949 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1950 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1951 			ieee = iwm_nvm_channels[ch_idx];
1952 		else
1953 			ieee = iwm_nvm_channels_8000[ch_idx];
1954 
1955 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1956 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1957 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1958 			    ieee, ch_flags,
1959 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1960 			    "5.2" : "2.4");
1961 			continue;
1962 		}
1963 
1964 		nflags = iwm_eeprom_channel_flags(ch_flags);
1965 		error = ieee80211_add_channel(chans, maxchans, nchans,
1966 		    ieee, 0, 0, nflags, bands);
1967 		if (error != 0)
1968 			break;
1969 
1970 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1971 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1972 		    ieee, ch_flags,
1973 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1974 		    "5.2" : "2.4");
1975 	}
1976 }
1977 
1978 static void
iwm_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])1979 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1980     struct ieee80211_channel chans[])
1981 {
1982 	struct iwm_softc *sc = ic->ic_softc;
1983 	struct iwm_nvm_data *data = sc->nvm_data;
1984 	uint8_t bands[IEEE80211_MODE_BYTES];
1985 	size_t ch_num;
1986 
1987 	memset(bands, 0, sizeof(bands));
1988 	/* 1-13: 11b/g channels. */
1989 	setbit(bands, IEEE80211_MODE_11B);
1990 	setbit(bands, IEEE80211_MODE_11G);
1991 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1992 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1993 
1994 	/* 14: 11b channel only. */
1995 	clrbit(bands, IEEE80211_MODE_11G);
1996 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1997 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1998 
1999 	if (data->sku_cap_band_52GHz_enable) {
2000 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2001 			ch_num = nitems(iwm_nvm_channels);
2002 		else
2003 			ch_num = nitems(iwm_nvm_channels_8000);
2004 		memset(bands, 0, sizeof(bands));
2005 		setbit(bands, IEEE80211_MODE_11A);
2006 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2007 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2008 	}
2009 }
2010 
2011 static void
iwm_set_hw_address_family_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)2012 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2013 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2014 {
2015 	const uint8_t *hw_addr;
2016 
2017 	if (mac_override) {
2018 		static const uint8_t reserved_mac[] = {
2019 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2020 		};
2021 
2022 		hw_addr = (const uint8_t *)(mac_override +
2023 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2024 
2025 		/*
2026 		 * Store the MAC address from MAO section.
2027 		 * No byte swapping is required in MAO section
2028 		 */
2029 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2030 
2031 		/*
2032 		 * Force the use of the OTP MAC address in case of reserved MAC
2033 		 * address in the NVM, or if address is given but invalid.
2034 		 */
2035 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2036 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2037 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2038 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2039 			return;
2040 
2041 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2042 		    "%s: mac address from nvm override section invalid\n",
2043 		    __func__);
2044 	}
2045 
2046 	if (nvm_hw) {
2047 		/* read the mac address from WFMP registers */
2048 		uint32_t mac_addr0 =
2049 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2050 		uint32_t mac_addr1 =
2051 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2052 
2053 		hw_addr = (const uint8_t *)&mac_addr0;
2054 		data->hw_addr[0] = hw_addr[3];
2055 		data->hw_addr[1] = hw_addr[2];
2056 		data->hw_addr[2] = hw_addr[1];
2057 		data->hw_addr[3] = hw_addr[0];
2058 
2059 		hw_addr = (const uint8_t *)&mac_addr1;
2060 		data->hw_addr[4] = hw_addr[1];
2061 		data->hw_addr[5] = hw_addr[0];
2062 
2063 		return;
2064 	}
2065 
2066 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2067 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2068 }
2069 
2070 static int
iwm_get_sku(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2071 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2072 	    const uint16_t *phy_sku)
2073 {
2074 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2075 		return le16_to_cpup(nvm_sw + IWM_SKU);
2076 
2077 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2078 }
2079 
2080 static int
iwm_get_nvm_version(const struct iwm_softc * sc,const uint16_t * nvm_sw)2081 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2082 {
2083 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2084 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2085 	else
2086 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2087 						IWM_NVM_VERSION_8000));
2088 }
2089 
2090 static int
iwm_get_radio_cfg(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2091 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2092 		  const uint16_t *phy_sku)
2093 {
2094         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2095                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2096 
2097         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2098 }
2099 
2100 static int
iwm_get_n_hw_addrs(const struct iwm_softc * sc,const uint16_t * nvm_sw)2101 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2102 {
2103 	int n_hw_addr;
2104 
2105 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2106 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2107 
2108 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2109 
2110         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2111 }
2112 
2113 static void
iwm_set_radio_cfg(const struct iwm_softc * sc,struct iwm_nvm_data * data,uint32_t radio_cfg)2114 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2115 		  uint32_t radio_cfg)
2116 {
2117 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2118 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2119 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2120 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2121 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2122 		return;
2123 	}
2124 
2125 	/* set the radio configuration for family 8000 */
2126 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2127 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2128 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2129 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2130 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2131 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2132 }
2133 
2134 static int
iwm_set_hw_address(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * nvm_hw,const uint16_t * mac_override)2135 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2136 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2137 {
2138 #ifdef notyet /* for FAMILY 9000 */
2139 	if (cfg->mac_addr_from_csr) {
2140 		iwm_set_hw_address_from_csr(sc, data);
2141         } else
2142 #endif
2143 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2144 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2145 
2146 		/* The byte order is little endian 16 bit, meaning 214365 */
2147 		data->hw_addr[0] = hw_addr[1];
2148 		data->hw_addr[1] = hw_addr[0];
2149 		data->hw_addr[2] = hw_addr[3];
2150 		data->hw_addr[3] = hw_addr[2];
2151 		data->hw_addr[4] = hw_addr[5];
2152 		data->hw_addr[5] = hw_addr[4];
2153 	} else {
2154 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2155 	}
2156 
2157 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2158 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2159 		return EINVAL;
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 static struct iwm_nvm_data *
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory)2166 iwm_parse_nvm_data(struct iwm_softc *sc,
2167 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2168 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2169 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2170 {
2171 	struct iwm_nvm_data *data;
2172 	uint32_t sku, radio_cfg;
2173 	uint16_t lar_config;
2174 
2175 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2176 		data = malloc(sizeof(*data) +
2177 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2178 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2179 	} else {
2180 		data = malloc(sizeof(*data) +
2181 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2182 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2183 	}
2184 	if (!data)
2185 		return NULL;
2186 
2187 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2188 
2189 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2190 	iwm_set_radio_cfg(sc, data, radio_cfg);
2191 
2192 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2193 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2194 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2195 	data->sku_cap_11n_enable = 0;
2196 
2197 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2198 
2199 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2200 		/* TODO: use IWL_NVM_EXT */
2201 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2202 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2203 				       IWM_NVM_LAR_OFFSET_8000;
2204 
2205 		lar_config = le16_to_cpup(regulatory + lar_offset);
2206 		data->lar_enabled = !!(lar_config &
2207 				       IWM_NVM_LAR_ENABLED_8000);
2208 	}
2209 
2210 	/* If no valid mac address was found - bail out */
2211 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2212 		free(data, M_DEVBUF);
2213 		return NULL;
2214 	}
2215 
2216 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2217 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2218 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2219 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2220 	} else {
2221 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2222 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2223 	}
2224 
2225 	return data;
2226 }
2227 
2228 static void
iwm_free_nvm_data(struct iwm_nvm_data * data)2229 iwm_free_nvm_data(struct iwm_nvm_data *data)
2230 {
2231 	if (data != NULL)
2232 		free(data, M_DEVBUF);
2233 }
2234 
2235 static struct iwm_nvm_data *
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)2236 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2237 {
2238 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2239 
2240 	/* Checking for required sections */
2241 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2242 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2243 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2244 			device_printf(sc->sc_dev,
2245 			    "Can't parse empty OTP/NVM sections\n");
2246 			return NULL;
2247 		}
2248 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2249 		/* SW and REGULATORY sections are mandatory */
2250 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2251 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2252 			device_printf(sc->sc_dev,
2253 			    "Can't parse empty OTP/NVM sections\n");
2254 			return NULL;
2255 		}
2256 		/* MAC_OVERRIDE or at least HW section must exist */
2257 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2258 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2259 			device_printf(sc->sc_dev,
2260 			    "Can't parse mac_address, empty sections\n");
2261 			return NULL;
2262 		}
2263 
2264 		/* PHY_SKU section is mandatory in B0 */
2265 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2266 			device_printf(sc->sc_dev,
2267 			    "Can't parse phy_sku in B0, empty sections\n");
2268 			return NULL;
2269 		}
2270 	} else {
2271 		panic("unknown device family %d\n", sc->cfg->device_family);
2272 	}
2273 
2274 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2275 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2276 	calib = (const uint16_t *)
2277 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2278 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2279 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2280 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2281 	mac_override = (const uint16_t *)
2282 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2283 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2284 
2285 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2286 	    phy_sku, regulatory);
2287 }
2288 
2289 static int
iwm_nvm_init(struct iwm_softc * sc)2290 iwm_nvm_init(struct iwm_softc *sc)
2291 {
2292 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2293 	int i, ret, section;
2294 	uint32_t size_read = 0;
2295 	uint8_t *nvm_buffer, *temp;
2296 	uint16_t len;
2297 
2298 	memset(nvm_sections, 0, sizeof(nvm_sections));
2299 
2300 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2301 		return EINVAL;
2302 
2303 	/* load NVM values from nic */
2304 	/* Read From FW NVM */
2305 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2306 
2307 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2308 	if (!nvm_buffer)
2309 		return ENOMEM;
2310 	for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2311 		/* we override the constness for initial read */
2312 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2313 					   &len, size_read);
2314 		if (ret)
2315 			continue;
2316 		size_read += len;
2317 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2318 		if (!temp) {
2319 			ret = ENOMEM;
2320 			break;
2321 		}
2322 		memcpy(temp, nvm_buffer, len);
2323 
2324 		nvm_sections[section].data = temp;
2325 		nvm_sections[section].length = len;
2326 	}
2327 	if (!size_read)
2328 		device_printf(sc->sc_dev, "OTP is blank\n");
2329 	free(nvm_buffer, M_DEVBUF);
2330 
2331 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2332 	if (!sc->nvm_data)
2333 		return EINVAL;
2334 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2335 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2336 
2337 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2338 		if (nvm_sections[i].data != NULL)
2339 			free(nvm_sections[i].data, M_DEVBUF);
2340 	}
2341 
2342 	return 0;
2343 }
2344 
2345 static int
iwm_pcie_load_section(struct iwm_softc * sc,uint8_t section_num,const struct iwm_fw_desc * section)2346 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2347 	const struct iwm_fw_desc *section)
2348 {
2349 	struct iwm_dma_info *dma = &sc->fw_dma;
2350 	uint8_t *v_addr;
2351 	bus_addr_t p_addr;
2352 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2353 	int ret = 0;
2354 
2355 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2356 		    "%s: [%d] uCode section being loaded...\n",
2357 		    __func__, section_num);
2358 
2359 	v_addr = dma->vaddr;
2360 	p_addr = dma->paddr;
2361 
2362 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2363 		uint32_t copy_size, dst_addr;
2364 		int extended_addr = FALSE;
2365 
2366 		copy_size = MIN(chunk_sz, section->len - offset);
2367 		dst_addr = section->offset + offset;
2368 
2369 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2370 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2371 			extended_addr = TRUE;
2372 
2373 		if (extended_addr)
2374 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2375 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2376 
2377 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2378 		    copy_size);
2379 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2380 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2381 						   copy_size);
2382 
2383 		if (extended_addr)
2384 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2385 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2386 
2387 		if (ret) {
2388 			device_printf(sc->sc_dev,
2389 			    "%s: Could not load the [%d] uCode section\n",
2390 			    __func__, section_num);
2391 			break;
2392 		}
2393 	}
2394 
2395 	return ret;
2396 }
2397 
2398 /*
2399  * ucode
2400  */
2401 static int
iwm_pcie_load_firmware_chunk(struct iwm_softc * sc,uint32_t dst_addr,bus_addr_t phy_addr,uint32_t byte_cnt)2402 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2403 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2404 {
2405 	sc->sc_fw_chunk_done = 0;
2406 
2407 	if (!iwm_nic_lock(sc))
2408 		return EBUSY;
2409 
2410 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2411 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2412 
2413 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2414 	    dst_addr);
2415 
2416 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2417 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2418 
2419 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2420 	    (iwm_get_dma_hi_addr(phy_addr)
2421 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2422 
2423 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2424 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2425 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2426 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2427 
2428 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2429 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2430 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2431 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2432 
2433 	iwm_nic_unlock(sc);
2434 
2435 	/* wait up to 5s for this segment to load */
2436 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2437 
2438 	if (!sc->sc_fw_chunk_done) {
2439 		device_printf(sc->sc_dev,
2440 		    "fw chunk addr 0x%x len %d failed to load\n",
2441 		    dst_addr, byte_cnt);
2442 		return ETIMEDOUT;
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 static int
iwm_pcie_load_cpu_sections_8000(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2449 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2450 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2451 {
2452 	int shift_param;
2453 	int i, ret = 0, sec_num = 0x1;
2454 	uint32_t val, last_read_idx = 0;
2455 
2456 	if (cpu == 1) {
2457 		shift_param = 0;
2458 		*first_ucode_section = 0;
2459 	} else {
2460 		shift_param = 16;
2461 		(*first_ucode_section)++;
2462 	}
2463 
2464 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2465 		last_read_idx = i;
2466 
2467 		/*
2468 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2469 		 * CPU1 to CPU2.
2470 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2471 		 * CPU2 non paged to CPU2 paging sec.
2472 		 */
2473 		if (!image->sec[i].data ||
2474 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2475 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2476 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2477 				    "Break since Data not valid or Empty section, sec = %d\n",
2478 				    i);
2479 			break;
2480 		}
2481 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2482 		if (ret)
2483 			return ret;
2484 
2485 		/* Notify the ucode of the loaded section number and status */
2486 		if (iwm_nic_lock(sc)) {
2487 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2488 			val = val | (sec_num << shift_param);
2489 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2490 			sec_num = (sec_num << 1) | 0x1;
2491 			iwm_nic_unlock(sc);
2492 		}
2493 	}
2494 
2495 	*first_ucode_section = last_read_idx;
2496 
2497 	iwm_enable_interrupts(sc);
2498 
2499 	if (iwm_nic_lock(sc)) {
2500 		if (cpu == 1)
2501 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2502 		else
2503 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2504 		iwm_nic_unlock(sc);
2505 	}
2506 
2507 	return 0;
2508 }
2509 
2510 static int
iwm_pcie_load_cpu_sections(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2511 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2512 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2513 {
2514 	int i, ret = 0;
2515 	uint32_t last_read_idx = 0;
2516 
2517 	if (cpu == 1) {
2518 		*first_ucode_section = 0;
2519 	} else {
2520 		(*first_ucode_section)++;
2521 	}
2522 
2523 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2524 		last_read_idx = i;
2525 
2526 		/*
2527 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2528 		 * CPU1 to CPU2.
2529 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2530 		 * CPU2 non paged to CPU2 paging sec.
2531 		 */
2532 		if (!image->sec[i].data ||
2533 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2534 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2535 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2536 				    "Break since Data not valid or Empty section, sec = %d\n",
2537 				     i);
2538 			break;
2539 		}
2540 
2541 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2542 		if (ret)
2543 			return ret;
2544 	}
2545 
2546 	*first_ucode_section = last_read_idx;
2547 
2548 	return 0;
2549 
2550 }
2551 
2552 static int
iwm_pcie_load_given_ucode(struct iwm_softc * sc,const struct iwm_fw_img * image)2553 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2554 {
2555 	int ret = 0;
2556 	int first_ucode_section;
2557 
2558 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2559 		     image->is_dual_cpus ? "Dual" : "Single");
2560 
2561 	/* load to FW the binary non secured sections of CPU1 */
2562 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2563 	if (ret)
2564 		return ret;
2565 
2566 	if (image->is_dual_cpus) {
2567 		/* set CPU2 header address */
2568 		if (iwm_nic_lock(sc)) {
2569 			iwm_write_prph(sc,
2570 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2571 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2572 			iwm_nic_unlock(sc);
2573 		}
2574 
2575 		/* load to FW the binary sections of CPU2 */
2576 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2577 						 &first_ucode_section);
2578 		if (ret)
2579 			return ret;
2580 	}
2581 
2582 	iwm_enable_interrupts(sc);
2583 
2584 	/* release CPU reset */
2585 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2586 
2587 	return 0;
2588 }
2589 
2590 int
iwm_pcie_load_given_ucode_8000(struct iwm_softc * sc,const struct iwm_fw_img * image)2591 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2592 	const struct iwm_fw_img *image)
2593 {
2594 	int ret = 0;
2595 	int first_ucode_section;
2596 
2597 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2598 		    image->is_dual_cpus ? "Dual" : "Single");
2599 
2600 	/* configure the ucode to be ready to get the secured image */
2601 	/* release CPU reset */
2602 	if (iwm_nic_lock(sc)) {
2603 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2604 		    IWM_RELEASE_CPU_RESET_BIT);
2605 		iwm_nic_unlock(sc);
2606 	}
2607 
2608 	/* load to FW the binary Secured sections of CPU1 */
2609 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2610 	    &first_ucode_section);
2611 	if (ret)
2612 		return ret;
2613 
2614 	/* load to FW the binary sections of CPU2 */
2615 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2616 	    &first_ucode_section);
2617 }
2618 
2619 /* XXX Get rid of this definition */
2620 static inline void
iwm_enable_fw_load_int(struct iwm_softc * sc)2621 iwm_enable_fw_load_int(struct iwm_softc *sc)
2622 {
2623 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2624 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2625 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2626 }
2627 
2628 /* XXX Add proper rfkill support code */
2629 static int
iwm_start_fw(struct iwm_softc * sc,const struct iwm_fw_img * fw)2630 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2631 {
2632 	int ret;
2633 
2634 	/* This may fail if AMT took ownership of the device */
2635 	if (iwm_prepare_card_hw(sc)) {
2636 		device_printf(sc->sc_dev,
2637 		    "%s: Exit HW not ready\n", __func__);
2638 		ret = EIO;
2639 		goto out;
2640 	}
2641 
2642 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2643 
2644 	iwm_disable_interrupts(sc);
2645 
2646 	/* make sure rfkill handshake bits are cleared */
2647 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2648 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2649 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2650 
2651 	/* clear (again), then enable host interrupts */
2652 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2653 
2654 	ret = iwm_nic_init(sc);
2655 	if (ret) {
2656 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2657 		goto out;
2658 	}
2659 
2660 	/*
2661 	 * Now, we load the firmware and don't want to be interrupted, even
2662 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2663 	 * FH_TX interrupt which is needed to load the firmware). If the
2664 	 * RF-Kill switch is toggled, we will find out after having loaded
2665 	 * the firmware and return the proper value to the caller.
2666 	 */
2667 	iwm_enable_fw_load_int(sc);
2668 
2669 	/* really make sure rfkill handshake bits are cleared */
2670 	/* maybe we should write a few times more?  just to make sure */
2671 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2672 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2673 
2674 	/* Load the given image to the HW */
2675 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2676 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2677 	else
2678 		ret = iwm_pcie_load_given_ucode(sc, fw);
2679 
2680 	/* XXX re-check RF-Kill state */
2681 
2682 out:
2683 	return ret;
2684 }
2685 
2686 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)2687 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2688 {
2689 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2690 		.valid = htole32(valid_tx_ant),
2691 	};
2692 
2693 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2694 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2695 }
2696 
2697 /* iwlwifi: mvm/fw.c */
2698 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)2699 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2700 {
2701 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2702 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2703 
2704 	/* Set parameters */
2705 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2706 	phy_cfg_cmd.calib_control.event_trigger =
2707 	    sc->sc_default_calib[ucode_type].event_trigger;
2708 	phy_cfg_cmd.calib_control.flow_trigger =
2709 	    sc->sc_default_calib[ucode_type].flow_trigger;
2710 
2711 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2712 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2713 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2714 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2715 }
2716 
2717 static int
iwm_alive_fn(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2718 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2719 {
2720 	struct iwm_alive_data *alive_data = data;
2721 	struct iwm_alive_resp_v3 *palive3;
2722 	struct iwm_alive_resp *palive;
2723 	struct iwm_umac_alive *umac;
2724 	struct iwm_lmac_alive *lmac1;
2725 	struct iwm_lmac_alive *lmac2 = NULL;
2726 	uint16_t status;
2727 
2728 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2729 		palive = (void *)pkt->data;
2730 		umac = &palive->umac_data;
2731 		lmac1 = &palive->lmac_data[0];
2732 		lmac2 = &palive->lmac_data[1];
2733 		status = le16toh(palive->status);
2734 	} else {
2735 		palive3 = (void *)pkt->data;
2736 		umac = &palive3->umac_data;
2737 		lmac1 = &palive3->lmac_data;
2738 		status = le16toh(palive3->status);
2739 	}
2740 
2741 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2742 	if (lmac2)
2743 		sc->error_event_table[1] =
2744 			le32toh(lmac2->error_event_table_ptr);
2745 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2746 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2747 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2748 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2749 	if (sc->umac_error_event_table)
2750 		sc->support_umac_log = TRUE;
2751 
2752 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2753 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2754 		    status, lmac1->ver_type, lmac1->ver_subtype);
2755 
2756 	if (lmac2)
2757 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2758 
2759 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2760 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2761 		    le32toh(umac->umac_major),
2762 		    le32toh(umac->umac_minor));
2763 
2764 	return TRUE;
2765 }
2766 
2767 static int
iwm_wait_phy_db_entry(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2768 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2769 	struct iwm_rx_packet *pkt, void *data)
2770 {
2771 	struct iwm_phy_db *phy_db = data;
2772 
2773 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2774 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2775 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2776 			    __func__, pkt->hdr.code);
2777 		}
2778 		return TRUE;
2779 	}
2780 
2781 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2782 		device_printf(sc->sc_dev,
2783 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2784 	}
2785 
2786 	return FALSE;
2787 }
2788 
2789 static int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2790 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2791 	enum iwm_ucode_type ucode_type)
2792 {
2793 	struct iwm_notification_wait alive_wait;
2794 	struct iwm_alive_data alive_data;
2795 	const struct iwm_fw_img *fw;
2796 	enum iwm_ucode_type old_type = sc->cur_ucode;
2797 	int error;
2798 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2799 
2800 	fw = &sc->sc_fw.img[ucode_type];
2801 	sc->cur_ucode = ucode_type;
2802 	sc->ucode_loaded = FALSE;
2803 
2804 	memset(&alive_data, 0, sizeof(alive_data));
2805 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2806 				   alive_cmd, nitems(alive_cmd),
2807 				   iwm_alive_fn, &alive_data);
2808 
2809 	error = iwm_start_fw(sc, fw);
2810 	if (error) {
2811 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2812 		sc->cur_ucode = old_type;
2813 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2814 		return error;
2815 	}
2816 
2817 	/*
2818 	 * Some things may run in the background now, but we
2819 	 * just wait for the ALIVE notification here.
2820 	 */
2821 	IWM_UNLOCK(sc);
2822 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2823 				      IWM_UCODE_ALIVE_TIMEOUT);
2824 	IWM_LOCK(sc);
2825 	if (error) {
2826 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2827 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2828 			if (iwm_nic_lock(sc)) {
2829 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2830 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2831 				iwm_nic_unlock(sc);
2832 			}
2833 			device_printf(sc->sc_dev,
2834 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2835 			    a, b);
2836 		}
2837 		sc->cur_ucode = old_type;
2838 		return error;
2839 	}
2840 
2841 	if (!alive_data.valid) {
2842 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2843 		    __func__);
2844 		sc->cur_ucode = old_type;
2845 		return EIO;
2846 	}
2847 
2848 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2849 
2850 	/*
2851 	 * configure and operate fw paging mechanism.
2852 	 * driver configures the paging flow only once, CPU2 paging image
2853 	 * included in the IWM_UCODE_INIT image.
2854 	 */
2855 	if (fw->paging_mem_size) {
2856 		error = iwm_save_fw_paging(sc, fw);
2857 		if (error) {
2858 			device_printf(sc->sc_dev,
2859 			    "%s: failed to save the FW paging image\n",
2860 			    __func__);
2861 			return error;
2862 		}
2863 
2864 		error = iwm_send_paging_cmd(sc, fw);
2865 		if (error) {
2866 			device_printf(sc->sc_dev,
2867 			    "%s: failed to send the paging cmd\n", __func__);
2868 			iwm_free_fw_paging(sc);
2869 			return error;
2870 		}
2871 	}
2872 
2873 	if (!error)
2874 		sc->ucode_loaded = TRUE;
2875 	return error;
2876 }
2877 
2878 /*
2879  * mvm misc bits
2880  */
2881 
2882 /*
2883  * follows iwlwifi/fw.c
2884  */
2885 static int
iwm_run_init_ucode(struct iwm_softc * sc,int justnvm)2886 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2887 {
2888 	struct iwm_notification_wait calib_wait;
2889 	static const uint16_t init_complete[] = {
2890 		IWM_INIT_COMPLETE_NOTIF,
2891 		IWM_CALIB_RES_NOTIF_PHY_DB
2892 	};
2893 	int ret;
2894 
2895 	/* do not operate with rfkill switch turned on */
2896 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2897 		device_printf(sc->sc_dev,
2898 		    "radio is disabled by hardware switch\n");
2899 		return EPERM;
2900 	}
2901 
2902 	iwm_init_notification_wait(sc->sc_notif_wait,
2903 				   &calib_wait,
2904 				   init_complete,
2905 				   nitems(init_complete),
2906 				   iwm_wait_phy_db_entry,
2907 				   sc->sc_phy_db);
2908 
2909 	/* Will also start the device */
2910 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2911 	if (ret) {
2912 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2913 		    ret);
2914 		goto error;
2915 	}
2916 
2917 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2918 		ret = iwm_send_bt_init_conf(sc);
2919 		if (ret) {
2920 			device_printf(sc->sc_dev,
2921 			    "failed to send bt coex configuration: %d\n", ret);
2922 			goto error;
2923 		}
2924 	}
2925 
2926 	if (justnvm) {
2927 		/* Read nvm */
2928 		ret = iwm_nvm_init(sc);
2929 		if (ret) {
2930 			device_printf(sc->sc_dev, "failed to read nvm\n");
2931 			goto error;
2932 		}
2933 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2934 		goto error;
2935 	}
2936 
2937 	/* Send TX valid antennas before triggering calibrations */
2938 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2939 	if (ret) {
2940 		device_printf(sc->sc_dev,
2941 		    "failed to send antennas before calibration: %d\n", ret);
2942 		goto error;
2943 	}
2944 
2945 	/*
2946 	 * Send phy configurations command to init uCode
2947 	 * to start the 16.0 uCode init image internal calibrations.
2948 	 */
2949 	ret = iwm_send_phy_cfg_cmd(sc);
2950 	if (ret) {
2951 		device_printf(sc->sc_dev,
2952 		    "%s: Failed to run INIT calibrations: %d\n",
2953 		    __func__, ret);
2954 		goto error;
2955 	}
2956 
2957 	/*
2958 	 * Nothing to do but wait for the init complete notification
2959 	 * from the firmware.
2960 	 */
2961 	IWM_UNLOCK(sc);
2962 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2963 	    IWM_UCODE_CALIB_TIMEOUT);
2964 	IWM_LOCK(sc);
2965 
2966 
2967 	goto out;
2968 
2969 error:
2970 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2971 out:
2972 	return ret;
2973 }
2974 
2975 static int
iwm_config_ltr(struct iwm_softc * sc)2976 iwm_config_ltr(struct iwm_softc *sc)
2977 {
2978 	struct iwm_ltr_config_cmd cmd = {
2979 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
2980 	};
2981 
2982 	if (!sc->sc_ltr_enabled)
2983 		return 0;
2984 
2985 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2986 }
2987 
2988 /*
2989  * receive side
2990  */
2991 
2992 /* (re)stock rx ring, called at init-time and at runtime */
2993 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)2994 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2995 {
2996 	struct iwm_rx_ring *ring = &sc->rxq;
2997 	struct iwm_rx_data *data = &ring->data[idx];
2998 	struct mbuf *m;
2999 	bus_dmamap_t dmamap;
3000 	bus_dma_segment_t seg;
3001 	int nsegs, error;
3002 
3003 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3004 	if (m == NULL)
3005 		return ENOBUFS;
3006 
3007 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3008 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3009 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3010 	if (error != 0) {
3011 		device_printf(sc->sc_dev,
3012 		    "%s: can't map mbuf, error %d\n", __func__, error);
3013 		m_freem(m);
3014 		return error;
3015 	}
3016 
3017 	if (data->m != NULL)
3018 		bus_dmamap_unload(ring->data_dmat, data->map);
3019 
3020 	/* Swap ring->spare_map with data->map */
3021 	dmamap = data->map;
3022 	data->map = ring->spare_map;
3023 	ring->spare_map = dmamap;
3024 
3025 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3026 	data->m = m;
3027 
3028 	/* Update RX descriptor. */
3029 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3030 	if (sc->cfg->mqrx_supported)
3031 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3032 	else
3033 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3034 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3035 	    BUS_DMASYNC_PREWRITE);
3036 
3037 	return 0;
3038 }
3039 
3040 static void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3041 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3042 {
3043 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3044 
3045 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3046 
3047 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3048 }
3049 
3050 /*
3051  * Retrieve the average noise (in dBm) among receivers.
3052  */
3053 static int
iwm_get_noise(struct iwm_softc * sc,const struct iwm_statistics_rx_non_phy * stats)3054 iwm_get_noise(struct iwm_softc *sc,
3055     const struct iwm_statistics_rx_non_phy *stats)
3056 {
3057 	int i, noise;
3058 #ifdef IWM_DEBUG
3059 	int nbant, total;
3060 #else
3061 	int nbant __unused, total __unused;
3062 #endif
3063 
3064 	total = nbant = noise = 0;
3065 	for (i = 0; i < 3; i++) {
3066 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3067 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3068 		    __func__,
3069 		    i,
3070 		    noise);
3071 
3072 		if (noise) {
3073 			total += noise;
3074 			nbant++;
3075 		}
3076 	}
3077 
3078 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3079 	    __func__, nbant, total);
3080 #if 0
3081 	/* There should be at least one antenna but check anyway. */
3082 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3083 #else
3084 	/* For now, just hard-code it to -96 to be safe */
3085 	return (-96);
3086 #endif
3087 }
3088 
3089 static void
iwm_handle_rx_statistics(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3090 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3091 {
3092 	struct iwm_notif_statistics *stats = (void *)&pkt->data;
3093 
3094 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3095 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3096 }
3097 
3098 /* iwlwifi: mvm/rx.c */
3099 /*
3100  * iwm_get_signal_strength - use new rx PHY INFO API
3101  * values are reported by the fw as positive values - need to negate
3102  * to obtain their dBM.  Account for missing antennas by replacing 0
3103  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3104  */
3105 static int
iwm_rx_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3106 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3107     struct iwm_rx_phy_info *phy_info)
3108 {
3109 	int energy_a, energy_b, energy_c, max_energy;
3110 	uint32_t val;
3111 
3112 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3113 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3114 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3115 	energy_a = energy_a ? -energy_a : -256;
3116 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3117 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3118 	energy_b = energy_b ? -energy_b : -256;
3119 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3120 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3121 	energy_c = energy_c ? -energy_c : -256;
3122 	max_energy = MAX(energy_a, energy_b);
3123 	max_energy = MAX(max_energy, energy_c);
3124 
3125 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3126 	    "energy In A %d B %d C %d , and max %d\n",
3127 	    energy_a, energy_b, energy_c, max_energy);
3128 
3129 	return max_energy;
3130 }
3131 
3132 static int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)3133 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3134     struct iwm_rx_mpdu_desc *desc)
3135 {
3136 	int energy_a, energy_b;
3137 
3138 	energy_a = desc->v1.energy_a;
3139 	energy_b = desc->v1.energy_b;
3140 	energy_a = energy_a ? -energy_a : -256;
3141 	energy_b = energy_b ? -energy_b : -256;
3142 	return MAX(energy_a, energy_b);
3143 }
3144 
3145 /*
3146  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3147  *
3148  * Handles the actual data of the Rx packet from the fw
3149  */
3150 static bool
iwm_rx_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3151 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3152     bool stolen)
3153 {
3154 	struct ieee80211com *ic = &sc->sc_ic;
3155 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3156 	struct ieee80211_rx_stats rxs;
3157 	struct iwm_rx_phy_info *phy_info;
3158 	struct iwm_rx_mpdu_res_start *rx_res;
3159 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3160 	uint32_t len;
3161 	uint32_t rx_pkt_status;
3162 	int rssi;
3163 
3164 	phy_info = &sc->sc_last_phy_info;
3165 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3166 	len = le16toh(rx_res->byte_count);
3167 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3168 
3169 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3170 		device_printf(sc->sc_dev,
3171 		    "dsp size out of range [0,20]: %d\n",
3172 		    phy_info->cfg_phy_cnt);
3173 		return false;
3174 	}
3175 
3176 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3177 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3178 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3179 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3180 		return false;
3181 	}
3182 
3183 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3184 
3185 	/* Map it to relative value */
3186 	rssi = rssi - sc->sc_noise;
3187 
3188 	/* replenish ring for the buffer we're going to feed to the sharks */
3189 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3190 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3191 		    __func__);
3192 		return false;
3193 	}
3194 
3195 	m->m_data = pkt->data + sizeof(*rx_res);
3196 	m->m_pkthdr.len = m->m_len = len;
3197 
3198 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3199 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3200 
3201 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3202 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3203 	    __func__,
3204 	    le16toh(phy_info->channel),
3205 	    le16toh(phy_info->phy_flags));
3206 
3207 	/*
3208 	 * Populate an RX state struct with the provided information.
3209 	 */
3210 	bzero(&rxs, sizeof(rxs));
3211 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3212 	rxs.r_flags |= IEEE80211_R_BAND;
3213 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3214 	rxs.c_ieee = le16toh(phy_info->channel);
3215 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3216 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3217 		rxs.c_band = IEEE80211_CHAN_2GHZ;
3218 	} else {
3219 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3220 		rxs.c_band = IEEE80211_CHAN_5GHZ;
3221 	}
3222 
3223 	/* rssi is in 1/2db units */
3224 	rxs.c_rssi = rssi * 2;
3225 	rxs.c_nf = sc->sc_noise;
3226 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3227 		return false;
3228 
3229 	if (ieee80211_radiotap_active_vap(vap)) {
3230 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3231 
3232 		tap->wr_flags = 0;
3233 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3234 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3235 		tap->wr_chan_freq = htole16(rxs.c_freq);
3236 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3237 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3238 		tap->wr_dbm_antsignal = (int8_t)rssi;
3239 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3240 		tap->wr_tsft = phy_info->system_timestamp;
3241 		switch (phy_info->rate) {
3242 		/* CCK rates. */
3243 		case  10: tap->wr_rate =   2; break;
3244 		case  20: tap->wr_rate =   4; break;
3245 		case  55: tap->wr_rate =  11; break;
3246 		case 110: tap->wr_rate =  22; break;
3247 		/* OFDM rates. */
3248 		case 0xd: tap->wr_rate =  12; break;
3249 		case 0xf: tap->wr_rate =  18; break;
3250 		case 0x5: tap->wr_rate =  24; break;
3251 		case 0x7: tap->wr_rate =  36; break;
3252 		case 0x9: tap->wr_rate =  48; break;
3253 		case 0xb: tap->wr_rate =  72; break;
3254 		case 0x1: tap->wr_rate =  96; break;
3255 		case 0x3: tap->wr_rate = 108; break;
3256 		/* Unknown rate: should not happen. */
3257 		default:  tap->wr_rate =   0;
3258 		}
3259 	}
3260 
3261 	return true;
3262 }
3263 
3264 static bool
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3265 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3266     bool stolen)
3267 {
3268 	struct ieee80211com *ic = &sc->sc_ic;
3269 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3270 	struct ieee80211_frame *wh;
3271 	struct ieee80211_rx_stats rxs;
3272 	struct iwm_rx_mpdu_desc *desc;
3273 	struct iwm_rx_packet *pkt;
3274 	int rssi;
3275 	uint32_t hdrlen, len, rate_n_flags;
3276 	uint16_t phy_info;
3277 	uint8_t channel;
3278 
3279 	pkt = mtodo(m, offset);
3280 	desc = (void *)pkt->data;
3281 
3282 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3283 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3284 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3285 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3286 		return false;
3287 	}
3288 
3289 	channel = desc->v1.channel;
3290 	len = le16toh(desc->mpdu_len);
3291 	phy_info = le16toh(desc->phy_info);
3292 	rate_n_flags = desc->v1.rate_n_flags;
3293 
3294 	wh = mtodo(m, sizeof(*desc));
3295 	m->m_data = pkt->data + sizeof(*desc);
3296 	m->m_pkthdr.len = m->m_len = len;
3297 	m->m_len = len;
3298 
3299 	/* Account for padding following the frame header. */
3300 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3301 		hdrlen = ieee80211_anyhdrsize(wh);
3302 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3303 		m->m_data = mtodo(m, 2);
3304 		wh = mtod(m, struct ieee80211_frame *);
3305 	}
3306 
3307 	/* Map it to relative value */
3308 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3309 	rssi = rssi - sc->sc_noise;
3310 
3311 	/* replenish ring for the buffer we're going to feed to the sharks */
3312 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3313 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3314 		    __func__);
3315 		return false;
3316 	}
3317 
3318 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3319 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3320 
3321 	/*
3322 	 * Populate an RX state struct with the provided information.
3323 	 */
3324 	bzero(&rxs, sizeof(rxs));
3325 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3326 	rxs.r_flags |= IEEE80211_R_BAND;
3327 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3328 	rxs.c_ieee = channel;
3329 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3330 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3331 	rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3332 
3333 	/* rssi is in 1/2db units */
3334 	rxs.c_rssi = rssi * 2;
3335 	rxs.c_nf = sc->sc_noise;
3336 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3337 		return false;
3338 
3339 	if (ieee80211_radiotap_active_vap(vap)) {
3340 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3341 
3342 		tap->wr_flags = 0;
3343 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3344 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3345 		tap->wr_chan_freq = htole16(rxs.c_freq);
3346 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3347 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3348 		tap->wr_dbm_antsignal = (int8_t)rssi;
3349 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3350 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3351 		switch ((rate_n_flags & 0xff)) {
3352 		/* CCK rates. */
3353 		case  10: tap->wr_rate =   2; break;
3354 		case  20: tap->wr_rate =   4; break;
3355 		case  55: tap->wr_rate =  11; break;
3356 		case 110: tap->wr_rate =  22; break;
3357 		/* OFDM rates. */
3358 		case 0xd: tap->wr_rate =  12; break;
3359 		case 0xf: tap->wr_rate =  18; break;
3360 		case 0x5: tap->wr_rate =  24; break;
3361 		case 0x7: tap->wr_rate =  36; break;
3362 		case 0x9: tap->wr_rate =  48; break;
3363 		case 0xb: tap->wr_rate =  72; break;
3364 		case 0x1: tap->wr_rate =  96; break;
3365 		case 0x3: tap->wr_rate = 108; break;
3366 		/* Unknown rate: should not happen. */
3367 		default:  tap->wr_rate =   0;
3368 		}
3369 	}
3370 
3371 	return true;
3372 }
3373 
3374 static bool
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3375 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3376     bool stolen)
3377 {
3378 	struct ieee80211com *ic;
3379 	struct ieee80211_frame *wh;
3380 	struct ieee80211_node *ni;
3381 	bool ret;
3382 
3383 	ic = &sc->sc_ic;
3384 
3385 	ret = sc->cfg->mqrx_supported ?
3386 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3387 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3388 	if (!ret) {
3389 		counter_u64_add(ic->ic_ierrors, 1);
3390 		return (ret);
3391 	}
3392 
3393 	wh = mtod(m, struct ieee80211_frame *);
3394 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3395 
3396 	IWM_UNLOCK(sc);
3397 	if (ni != NULL) {
3398 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3399 		ieee80211_input_mimo(ni, m);
3400 		ieee80211_free_node(ni);
3401 	} else {
3402 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3403 		ieee80211_input_mimo_all(ic, m);
3404 	}
3405 	IWM_LOCK(sc);
3406 
3407 	return true;
3408 }
3409 
3410 static int
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)3411 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3412 	struct iwm_node *in)
3413 {
3414 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3415 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3416 	struct ieee80211_node *ni = &in->in_ni;
3417 	struct ieee80211vap *vap = ni->ni_vap;
3418 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3419 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3420 	boolean_t rate_matched;
3421 	uint8_t tx_resp_rate;
3422 
3423 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3424 
3425 	/* Update rate control statistics. */
3426 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3427 	    __func__,
3428 	    (int) le16toh(tx_resp->status.status),
3429 	    (int) le16toh(tx_resp->status.sequence),
3430 	    tx_resp->frame_count,
3431 	    tx_resp->bt_kill_count,
3432 	    tx_resp->failure_rts,
3433 	    tx_resp->failure_frame,
3434 	    le32toh(tx_resp->initial_rate),
3435 	    (int) le16toh(tx_resp->wireless_media_time));
3436 
3437 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3438 
3439 	/* For rate control, ignore frames sent at different initial rate */
3440 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3441 
3442 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3443 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3444 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3445 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3446 	}
3447 
3448 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3449 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3450 	txs->short_retries = tx_resp->failure_rts;
3451 	txs->long_retries = tx_resp->failure_frame;
3452 	if (status != IWM_TX_STATUS_SUCCESS &&
3453 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3454 		switch (status) {
3455 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3456 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3457 			break;
3458 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3459 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3460 			break;
3461 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3462 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3463 			break;
3464 		default:
3465 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3466 			break;
3467 		}
3468 	} else {
3469 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3470 	}
3471 
3472 	if (rate_matched) {
3473 		ieee80211_ratectl_tx_complete(ni, txs);
3474 
3475 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3476 		new_rate = vap->iv_bss->ni_txrate;
3477 		if (new_rate != 0 && new_rate != cur_rate) {
3478 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3479 			iwm_setrates(sc, in, rix);
3480 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3481 		}
3482  	}
3483 
3484 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3485 }
3486 
3487 static void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3488 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3489 {
3490 	struct iwm_cmd_header *cmd_hdr;
3491 	struct iwm_tx_ring *ring;
3492 	struct iwm_tx_data *txd;
3493 	struct iwm_node *in;
3494 	struct mbuf *m;
3495 	int idx, qid, qmsk, status;
3496 
3497 	cmd_hdr = &pkt->hdr;
3498 	idx = cmd_hdr->idx;
3499 	qid = cmd_hdr->qid;
3500 
3501 	ring = &sc->txq[qid];
3502 	txd = &ring->data[idx];
3503 	in = txd->in;
3504 	m = txd->m;
3505 
3506 	KASSERT(txd->done == 0, ("txd not done"));
3507 	KASSERT(txd->in != NULL, ("txd without node"));
3508 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3509 
3510 	sc->sc_tx_timer = 0;
3511 
3512 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3513 
3514 	/* Unmap and free mbuf. */
3515 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3516 	bus_dmamap_unload(ring->data_dmat, txd->map);
3517 
3518 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3519 	    "free txd %p, in %p\n", txd, txd->in);
3520 	txd->done = 1;
3521 	txd->m = NULL;
3522 	txd->in = NULL;
3523 
3524 	ieee80211_tx_complete(&in->in_ni, m, status);
3525 
3526 	qmsk = 1 << qid;
3527 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3528 		sc->qfullmsk &= ~qmsk;
3529 		if (sc->qfullmsk == 0)
3530 			iwm_start(sc);
3531 	}
3532 }
3533 
3534 /*
3535  * transmit side
3536  */
3537 
3538 /*
3539  * Process a "command done" firmware notification.  This is where we wakeup
3540  * processes waiting for a synchronous command completion.
3541  * from if_iwn
3542  */
3543 static void
iwm_cmd_done(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3544 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3545 {
3546 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3547 	struct iwm_tx_data *data;
3548 
3549 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3550 		return;	/* Not a command ack. */
3551 	}
3552 
3553 	/* XXX wide commands? */
3554 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3555 	    "cmd notification type 0x%x qid %d idx %d\n",
3556 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3557 
3558 	data = &ring->data[pkt->hdr.idx];
3559 
3560 	/* If the command was mapped in an mbuf, free it. */
3561 	if (data->m != NULL) {
3562 		bus_dmamap_sync(ring->data_dmat, data->map,
3563 		    BUS_DMASYNC_POSTWRITE);
3564 		bus_dmamap_unload(ring->data_dmat, data->map);
3565 		m_freem(data->m);
3566 		data->m = NULL;
3567 	}
3568 	wakeup(&ring->desc[pkt->hdr.idx]);
3569 
3570 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3571 		device_printf(sc->sc_dev,
3572 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3573 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3574 		/* XXX call iwm_force_nmi() */
3575 	}
3576 
3577 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3578 	ring->queued--;
3579 	if (ring->queued == 0)
3580 		iwm_pcie_clear_cmd_in_flight(sc);
3581 }
3582 
3583 #if 0
3584 /*
3585  * necessary only for block ack mode
3586  */
3587 void
3588 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3589 	uint16_t len)
3590 {
3591 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3592 	uint16_t w_val;
3593 
3594 	scd_bc_tbl = sc->sched_dma.vaddr;
3595 
3596 	len += 8; /* magic numbers came naturally from paris */
3597 	len = roundup(len, 4) / 4;
3598 
3599 	w_val = htole16(sta_id << 12 | len);
3600 
3601 	/* Update TX scheduler. */
3602 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3603 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3604 	    BUS_DMASYNC_PREWRITE);
3605 
3606 	/* I really wonder what this is ?!? */
3607 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3608 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3609 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3610 		    BUS_DMASYNC_PREWRITE);
3611 	}
3612 }
3613 #endif
3614 
3615 static int
iwm_tx_rateidx_global_lookup(struct iwm_softc * sc,uint8_t rate)3616 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3617 {
3618 	int i;
3619 
3620 	for (i = 0; i < nitems(iwm_rates); i++) {
3621 		if (iwm_rates[i].rate == rate)
3622 			return (i);
3623 	}
3624 	/* XXX error? */
3625 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3626 	    "%s: couldn't find an entry for rate=%d\n",
3627 	    __func__,
3628 	    rate);
3629 	return (0);
3630 }
3631 
3632 /*
3633  * Fill in the rate related information for a transmit command.
3634  */
3635 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct mbuf * m,struct iwm_tx_cmd * tx)3636 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3637 	struct mbuf *m, struct iwm_tx_cmd *tx)
3638 {
3639 	struct ieee80211_node *ni = &in->in_ni;
3640 	struct ieee80211_frame *wh;
3641 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3642 	const struct iwm_rate *rinfo;
3643 	int type;
3644 	int ridx, rate_flags;
3645 
3646 	wh = mtod(m, struct ieee80211_frame *);
3647 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3648 
3649 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3650 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3651 
3652 	if (type == IEEE80211_FC0_TYPE_MGT ||
3653 	    type == IEEE80211_FC0_TYPE_CTL ||
3654 	    (m->m_flags & M_EAPOL) != 0) {
3655 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3656 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3657 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3658 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3659 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3660 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3661 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3662 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3663 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3664 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3665 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3666 	} else {
3667 		/* for data frames, use RS table */
3668 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3669 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3670 		if (ridx == -1)
3671 			ridx = 0;
3672 
3673 		/* This is the index into the programmed table */
3674 		tx->initial_rate_index = 0;
3675 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3676 	}
3677 
3678 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3679 	    "%s: frame type=%d txrate %d\n",
3680 	        __func__, type, iwm_rates[ridx].rate);
3681 
3682 	rinfo = &iwm_rates[ridx];
3683 
3684 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3685 	    __func__, ridx,
3686 	    rinfo->rate,
3687 	    !! (IWM_RIDX_IS_CCK(ridx))
3688 	    );
3689 
3690 	/* XXX TODO: hard-coded TX antenna? */
3691 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3692 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3693 	else
3694 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3695 	if (IWM_RIDX_IS_CCK(ridx))
3696 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3697 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3698 
3699 	return rinfo;
3700 }
3701 
3702 #define TB0_SIZE 16
3703 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)3704 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3705 {
3706 	struct ieee80211com *ic = &sc->sc_ic;
3707 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3708 	struct iwm_node *in = IWM_NODE(ni);
3709 	struct iwm_tx_ring *ring;
3710 	struct iwm_tx_data *data;
3711 	struct iwm_tfd *desc;
3712 	struct iwm_device_cmd *cmd;
3713 	struct iwm_tx_cmd *tx;
3714 	struct ieee80211_frame *wh;
3715 	struct ieee80211_key *k = NULL;
3716 	struct mbuf *m1;
3717 	const struct iwm_rate *rinfo;
3718 	uint32_t flags;
3719 	u_int hdrlen;
3720 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3721 	int nsegs;
3722 	uint8_t tid, type;
3723 	int i, totlen, error, pad;
3724 
3725 	wh = mtod(m, struct ieee80211_frame *);
3726 	hdrlen = ieee80211_anyhdrsize(wh);
3727 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3728 	tid = 0;
3729 	ring = &sc->txq[ac];
3730 	desc = &ring->desc[ring->cur];
3731 	data = &ring->data[ring->cur];
3732 
3733 	/* Fill out iwm_tx_cmd to send to the firmware */
3734 	cmd = &ring->cmd[ring->cur];
3735 	cmd->hdr.code = IWM_TX_CMD;
3736 	cmd->hdr.flags = 0;
3737 	cmd->hdr.qid = ring->qid;
3738 	cmd->hdr.idx = ring->cur;
3739 
3740 	tx = (void *)cmd->data;
3741 	memset(tx, 0, sizeof(*tx));
3742 
3743 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3744 
3745 	/* Encrypt the frame if need be. */
3746 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3747 		/* Retrieve key for TX && do software encryption. */
3748 		k = ieee80211_crypto_encap(ni, m);
3749 		if (k == NULL) {
3750 			m_freem(m);
3751 			return (ENOBUFS);
3752 		}
3753 		/* 802.11 header may have moved. */
3754 		wh = mtod(m, struct ieee80211_frame *);
3755 	}
3756 
3757 	if (ieee80211_radiotap_active_vap(vap)) {
3758 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3759 
3760 		tap->wt_flags = 0;
3761 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3762 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3763 		tap->wt_rate = rinfo->rate;
3764 		if (k != NULL)
3765 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3766 		ieee80211_radiotap_tx(vap, m);
3767 	}
3768 
3769 	flags = 0;
3770 	totlen = m->m_pkthdr.len;
3771 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3772 		flags |= IWM_TX_CMD_FLG_ACK;
3773 	}
3774 
3775 	if (type == IEEE80211_FC0_TYPE_DATA &&
3776 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3777 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3778 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3779 	}
3780 
3781 	tx->sta_id = IWM_STATION_ID;
3782 
3783 	if (type == IEEE80211_FC0_TYPE_MGT) {
3784 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3785 
3786 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3787 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3788 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3789 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3790 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3791 		} else {
3792 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3793 		}
3794 	} else {
3795 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3796 	}
3797 
3798 	if (hdrlen & 3) {
3799 		/* First segment length must be a multiple of 4. */
3800 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3801 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3802 		pad = 4 - (hdrlen & 3);
3803 	} else {
3804 		tx->offload_assist = 0;
3805 		pad = 0;
3806 	}
3807 
3808 	tx->len = htole16(totlen);
3809 	tx->tid_tspec = tid;
3810 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3811 
3812 	/* Set physical address of "scratch area". */
3813 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3814 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3815 
3816 	/* Copy 802.11 header in TX command. */
3817 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3818 
3819 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3820 
3821 	tx->sec_ctl = 0;
3822 	tx->tx_flags |= htole32(flags);
3823 
3824 	/* Trim 802.11 header. */
3825 	m_adj(m, hdrlen);
3826 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3827 	    segs, &nsegs, BUS_DMA_NOWAIT);
3828 	if (error != 0) {
3829 		if (error != EFBIG) {
3830 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3831 			    error);
3832 			m_freem(m);
3833 			return error;
3834 		}
3835 		/* Too many DMA segments, linearize mbuf. */
3836 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3837 		if (m1 == NULL) {
3838 			device_printf(sc->sc_dev,
3839 			    "%s: could not defrag mbuf\n", __func__);
3840 			m_freem(m);
3841 			return (ENOBUFS);
3842 		}
3843 		m = m1;
3844 
3845 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3846 		    segs, &nsegs, BUS_DMA_NOWAIT);
3847 		if (error != 0) {
3848 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3849 			    error);
3850 			m_freem(m);
3851 			return error;
3852 		}
3853 	}
3854 	data->m = m;
3855 	data->in = in;
3856 	data->done = 0;
3857 
3858 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3859 	    "sending txd %p, in %p\n", data, data->in);
3860 	KASSERT(data->in != NULL, ("node is NULL"));
3861 
3862 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3863 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3864 	    ring->qid, ring->cur, totlen, nsegs,
3865 	    le32toh(tx->tx_flags),
3866 	    le32toh(tx->rate_n_flags),
3867 	    tx->initial_rate_index
3868 	    );
3869 
3870 	/* Fill TX descriptor. */
3871 	memset(desc, 0, sizeof(*desc));
3872 	desc->num_tbs = 2 + nsegs;
3873 
3874 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3875 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3876 	    (TB0_SIZE << 4));
3877 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3878 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3879 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3880 	    hdrlen + pad - TB0_SIZE) << 4));
3881 
3882 	/* Other DMA segments are for data payload. */
3883 	for (i = 0; i < nsegs; i++) {
3884 		seg = &segs[i];
3885 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3886 		desc->tbs[i + 2].hi_n_len =
3887 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3888 		    (seg->ds_len << 4);
3889 	}
3890 
3891 	bus_dmamap_sync(ring->data_dmat, data->map,
3892 	    BUS_DMASYNC_PREWRITE);
3893 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3894 	    BUS_DMASYNC_PREWRITE);
3895 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3896 	    BUS_DMASYNC_PREWRITE);
3897 
3898 #if 0
3899 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3900 #endif
3901 
3902 	/* Kick TX ring. */
3903 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3904 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3905 
3906 	/* Mark TX ring as full if we reach a certain threshold. */
3907 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3908 		sc->qfullmsk |= 1 << ring->qid;
3909 	}
3910 
3911 	return 0;
3912 }
3913 
3914 static int
iwm_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3915 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3916     const struct ieee80211_bpf_params *params)
3917 {
3918 	struct ieee80211com *ic = ni->ni_ic;
3919 	struct iwm_softc *sc = ic->ic_softc;
3920 	int error = 0;
3921 
3922 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3923 	    "->%s begin\n", __func__);
3924 
3925 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3926 		m_freem(m);
3927 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3928 		    "<-%s not RUNNING\n", __func__);
3929 		return (ENETDOWN);
3930         }
3931 
3932 	IWM_LOCK(sc);
3933 	/* XXX fix this */
3934         if (params == NULL) {
3935 		error = iwm_tx(sc, m, ni, 0);
3936 	} else {
3937 		error = iwm_tx(sc, m, ni, 0);
3938 	}
3939 	if (sc->sc_tx_timer == 0)
3940 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3941 	sc->sc_tx_timer = 5;
3942 	IWM_UNLOCK(sc);
3943 
3944         return (error);
3945 }
3946 
3947 /*
3948  * mvm/tx.c
3949  */
3950 
3951 /*
3952  * Note that there are transports that buffer frames before they reach
3953  * the firmware. This means that after flush_tx_path is called, the
3954  * queue might not be empty. The race-free way to handle this is to:
3955  * 1) set the station as draining
3956  * 2) flush the Tx path
3957  * 3) wait for the transport queues to be empty
3958  */
3959 int
iwm_flush_tx_path(struct iwm_softc * sc,uint32_t tfd_msk,uint32_t flags)3960 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3961 {
3962 	int ret;
3963 	struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3964 		.queues_ctl = htole32(tfd_msk),
3965 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3966 	};
3967 
3968 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3969 	    sizeof(flush_cmd), &flush_cmd);
3970 	if (ret)
3971                 device_printf(sc->sc_dev,
3972 		    "Flushing tx queue failed: %d\n", ret);
3973 	return ret;
3974 }
3975 
3976 /*
3977  * BEGIN mvm/quota.c
3978  */
3979 
3980 static int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_vap * ivp)3981 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3982 {
3983 	struct iwm_time_quota_cmd_v1 cmd;
3984 	int i, idx, ret, num_active_macs, quota, quota_rem;
3985 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3986 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3987 	uint16_t id;
3988 
3989 	memset(&cmd, 0, sizeof(cmd));
3990 
3991 	/* currently, PHY ID == binding ID */
3992 	if (ivp) {
3993 		id = ivp->phy_ctxt->id;
3994 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3995 		colors[id] = ivp->phy_ctxt->color;
3996 
3997 		if (1)
3998 			n_ifs[id] = 1;
3999 	}
4000 
4001 	/*
4002 	 * The FW's scheduling session consists of
4003 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4004 	 * equally between all the bindings that require quota
4005 	 */
4006 	num_active_macs = 0;
4007 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4008 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4009 		num_active_macs += n_ifs[i];
4010 	}
4011 
4012 	quota = 0;
4013 	quota_rem = 0;
4014 	if (num_active_macs) {
4015 		quota = IWM_MAX_QUOTA / num_active_macs;
4016 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4017 	}
4018 
4019 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4020 		if (colors[i] < 0)
4021 			continue;
4022 
4023 		cmd.quotas[idx].id_and_color =
4024 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4025 
4026 		if (n_ifs[i] <= 0) {
4027 			cmd.quotas[idx].quota = htole32(0);
4028 			cmd.quotas[idx].max_duration = htole32(0);
4029 		} else {
4030 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4031 			cmd.quotas[idx].max_duration = htole32(0);
4032 		}
4033 		idx++;
4034 	}
4035 
4036 	/* Give the remainder of the session to the first binding */
4037 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4038 
4039 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4040 	    sizeof(cmd), &cmd);
4041 	if (ret)
4042 		device_printf(sc->sc_dev,
4043 		    "%s: Failed to send quota: %d\n", __func__, ret);
4044 	return ret;
4045 }
4046 
4047 /*
4048  * END mvm/quota.c
4049  */
4050 
4051 /*
4052  * ieee80211 routines
4053  */
4054 
4055 /*
4056  * Change to AUTH state in 80211 state machine.  Roughly matches what
4057  * Linux does in bss_info_changed().
4058  */
4059 static int
iwm_auth(struct ieee80211vap * vap,struct iwm_softc * sc)4060 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4061 {
4062 	struct ieee80211_node *ni;
4063 	struct iwm_node *in;
4064 	struct iwm_vap *iv = IWM_VAP(vap);
4065 	uint32_t duration;
4066 	int error;
4067 
4068 	/*
4069 	 * XXX i have a feeling that the vap node is being
4070 	 * freed from underneath us. Grr.
4071 	 */
4072 	ni = ieee80211_ref_node(vap->iv_bss);
4073 	in = IWM_NODE(ni);
4074 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4075 	    "%s: called; vap=%p, bss ni=%p\n",
4076 	    __func__,
4077 	    vap,
4078 	    ni);
4079 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4080 	    __func__, ether_sprintf(ni->ni_bssid));
4081 
4082 	in->in_assoc = 0;
4083 	iv->iv_auth = 1;
4084 
4085 	/*
4086 	 * Firmware bug - it'll crash if the beacon interval is less
4087 	 * than 16. We can't avoid connecting at all, so refuse the
4088 	 * station state change, this will cause net80211 to abandon
4089 	 * attempts to connect to this AP, and eventually wpa_s will
4090 	 * blacklist the AP...
4091 	 */
4092 	if (ni->ni_intval < 16) {
4093 		device_printf(sc->sc_dev,
4094 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4095 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4096 		error = EINVAL;
4097 		goto out;
4098 	}
4099 
4100 	error = iwm_allow_mcast(vap, sc);
4101 	if (error) {
4102 		device_printf(sc->sc_dev,
4103 		    "%s: failed to set multicast\n", __func__);
4104 		goto out;
4105 	}
4106 
4107 	/*
4108 	 * This is where it deviates from what Linux does.
4109 	 *
4110 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4111 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4112 	 * and always does a mac_ctx_changed().
4113 	 *
4114 	 * The openbsd port doesn't attempt to do that - it reset things
4115 	 * at odd states and does the add here.
4116 	 *
4117 	 * So, until the state handling is fixed (ie, we never reset
4118 	 * the NIC except for a firmware failure, which should drag
4119 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4120 	 * contexts that are required), let's do a dirty hack here.
4121 	 */
4122 	if (iv->is_uploaded) {
4123 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4124 			device_printf(sc->sc_dev,
4125 			    "%s: failed to update MAC\n", __func__);
4126 			goto out;
4127 		}
4128 	} else {
4129 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4130 			device_printf(sc->sc_dev,
4131 			    "%s: failed to add MAC\n", __func__);
4132 			goto out;
4133 		}
4134 	}
4135 	sc->sc_firmware_state = 1;
4136 
4137 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4138 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4139 		device_printf(sc->sc_dev,
4140 		    "%s: failed update phy ctxt\n", __func__);
4141 		goto out;
4142 	}
4143 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4144 
4145 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4146 		device_printf(sc->sc_dev,
4147 		    "%s: binding update cmd\n", __func__);
4148 		goto out;
4149 	}
4150 	sc->sc_firmware_state = 2;
4151 	/*
4152 	 * Authentication becomes unreliable when powersaving is left enabled
4153 	 * here. Powersaving will be activated again when association has
4154 	 * finished or is aborted.
4155 	 */
4156 	iv->ps_disabled = TRUE;
4157 	error = iwm_power_update_mac(sc);
4158 	iv->ps_disabled = FALSE;
4159 	if (error != 0) {
4160 		device_printf(sc->sc_dev,
4161 		    "%s: failed to update power management\n",
4162 		    __func__);
4163 		goto out;
4164 	}
4165 	if ((error = iwm_add_sta(sc, in)) != 0) {
4166 		device_printf(sc->sc_dev,
4167 		    "%s: failed to add sta\n", __func__);
4168 		goto out;
4169 	}
4170 	sc->sc_firmware_state = 3;
4171 
4172 	/*
4173 	 * Prevent the FW from wandering off channel during association
4174 	 * by "protecting" the session with a time event.
4175 	 */
4176 	/* XXX duration is in units of TU, not MS */
4177 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4178 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4179 
4180 	error = 0;
4181 out:
4182 	if (error != 0)
4183 		iv->iv_auth = 0;
4184 	ieee80211_free_node(ni);
4185 	return (error);
4186 }
4187 
4188 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])4189 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4190 {
4191 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4192 	    M_NOWAIT | M_ZERO);
4193 }
4194 
4195 static uint8_t
iwm_rate_from_ucode_rate(uint32_t rate_n_flags)4196 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4197 {
4198 	uint8_t plcp = rate_n_flags & 0xff;
4199 	int i;
4200 
4201 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4202 		if (iwm_rates[i].plcp == plcp)
4203 			return iwm_rates[i].rate;
4204 	}
4205 	return 0;
4206 }
4207 
4208 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)4209 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4210 {
4211 	int i;
4212 	uint8_t rval;
4213 
4214 	for (i = 0; i < rs->rs_nrates; i++) {
4215 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4216 		if (rval == iwm_rates[ridx].rate)
4217 			return rs->rs_rates[i];
4218 	}
4219 
4220 	return 0;
4221 }
4222 
4223 static int
iwm_rate2ridx(struct iwm_softc * sc,uint8_t rate)4224 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4225 {
4226 	int i;
4227 
4228 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4229 		if (iwm_rates[i].rate == rate)
4230 			return i;
4231 	}
4232 
4233 	device_printf(sc->sc_dev,
4234 	    "%s: WARNING: device rate for %u not found!\n",
4235 	    __func__, rate);
4236 
4237 	return -1;
4238 }
4239 
4240 
4241 static void
iwm_setrates(struct iwm_softc * sc,struct iwm_node * in,int rix)4242 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4243 {
4244 	struct ieee80211_node *ni = &in->in_ni;
4245 	struct iwm_lq_cmd *lq = &in->in_lq;
4246 	struct ieee80211_rateset *rs = &ni->ni_rates;
4247 	int nrates = rs->rs_nrates;
4248 	int i, ridx, tab = 0;
4249 //	int txant = 0;
4250 
4251 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4252 
4253 	if (nrates > nitems(lq->rs_table)) {
4254 		device_printf(sc->sc_dev,
4255 		    "%s: node supports %d rates, driver handles "
4256 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4257 		return;
4258 	}
4259 	if (nrates == 0) {
4260 		device_printf(sc->sc_dev,
4261 		    "%s: node supports 0 rates, odd!\n", __func__);
4262 		return;
4263 	}
4264 	nrates = imin(rix + 1, nrates);
4265 
4266 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4267 	    "%s: nrates=%d\n", __func__, nrates);
4268 
4269 	/* then construct a lq_cmd based on those */
4270 	memset(lq, 0, sizeof(*lq));
4271 	lq->sta_id = IWM_STATION_ID;
4272 
4273 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4274 	if (ni->ni_flags & IEEE80211_NODE_HT)
4275 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4276 
4277 	/*
4278 	 * are these used? (we don't do SISO or MIMO)
4279 	 * need to set them to non-zero, though, or we get an error.
4280 	 */
4281 	lq->single_stream_ant_msk = 1;
4282 	lq->dual_stream_ant_msk = 1;
4283 
4284 	/*
4285 	 * Build the actual rate selection table.
4286 	 * The lowest bits are the rates.  Additionally,
4287 	 * CCK needs bit 9 to be set.  The rest of the bits
4288 	 * we add to the table select the tx antenna
4289 	 * Note that we add the rates in the highest rate first
4290 	 * (opposite of ni_rates).
4291 	 */
4292 	for (i = 0; i < nrates; i++) {
4293 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4294 		int nextant;
4295 
4296 		/* Map 802.11 rate to HW rate index. */
4297 		ridx = iwm_rate2ridx(sc, rate);
4298 		if (ridx == -1)
4299 			continue;
4300 
4301 #if 0
4302 		if (txant == 0)
4303 			txant = iwm_get_valid_tx_ant(sc);
4304 		nextant = 1<<(ffs(txant)-1);
4305 		txant &= ~nextant;
4306 #else
4307 		nextant = iwm_get_valid_tx_ant(sc);
4308 #endif
4309 		tab = iwm_rates[ridx].plcp;
4310 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4311 		if (IWM_RIDX_IS_CCK(ridx))
4312 			tab |= IWM_RATE_MCS_CCK_MSK;
4313 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4314 		    "station rate i=%d, rate=%d, hw=%x\n",
4315 		    i, iwm_rates[ridx].rate, tab);
4316 		lq->rs_table[i] = htole32(tab);
4317 	}
4318 	/* then fill the rest with the lowest possible rate */
4319 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4320 		KASSERT(tab != 0, ("invalid tab"));
4321 		lq->rs_table[i] = htole32(tab);
4322 	}
4323 }
4324 
4325 static void
iwm_bring_down_firmware(struct iwm_softc * sc,struct ieee80211vap * vap)4326 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4327 {
4328 	struct iwm_vap *ivp = IWM_VAP(vap);
4329 	int error;
4330 
4331 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4332 	sc->sc_tx_timer = 0;
4333 
4334 	ivp->iv_auth = 0;
4335 	if (sc->sc_firmware_state == 3) {
4336 		iwm_xmit_queue_drain(sc);
4337 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4338 		error = iwm_rm_sta(sc, vap, TRUE);
4339 		if (error) {
4340 			device_printf(sc->sc_dev,
4341 			    "%s: Failed to remove station: %d\n",
4342 			    __func__, error);
4343 		}
4344 	}
4345 	if (sc->sc_firmware_state == 3) {
4346 		error = iwm_mac_ctxt_changed(sc, vap);
4347 		if (error) {
4348 			device_printf(sc->sc_dev,
4349 			    "%s: Failed to change mac context: %d\n",
4350 			    __func__, error);
4351 		}
4352 	}
4353 	if (sc->sc_firmware_state == 3) {
4354 		error = iwm_sf_update(sc, vap, FALSE);
4355 		if (error) {
4356 			device_printf(sc->sc_dev,
4357 			    "%s: Failed to update smart FIFO: %d\n",
4358 			    __func__, error);
4359 		}
4360 	}
4361 	if (sc->sc_firmware_state == 3) {
4362 		error = iwm_rm_sta_id(sc, vap);
4363 		if (error) {
4364 			device_printf(sc->sc_dev,
4365 			    "%s: Failed to remove station id: %d\n",
4366 			    __func__, error);
4367 		}
4368 	}
4369 	if (sc->sc_firmware_state == 3) {
4370 		error = iwm_update_quotas(sc, NULL);
4371 		if (error) {
4372 			device_printf(sc->sc_dev,
4373 			    "%s: Failed to update PHY quota: %d\n",
4374 			    __func__, error);
4375 		}
4376 	}
4377 	if (sc->sc_firmware_state == 3) {
4378 		/* XXX Might need to specify bssid correctly. */
4379 		error = iwm_mac_ctxt_changed(sc, vap);
4380 		if (error) {
4381 			device_printf(sc->sc_dev,
4382 			    "%s: Failed to change mac context: %d\n",
4383 			    __func__, error);
4384 		}
4385 	}
4386 	if (sc->sc_firmware_state == 3) {
4387 		sc->sc_firmware_state = 2;
4388 	}
4389 	if (sc->sc_firmware_state > 1) {
4390 		error = iwm_binding_remove_vif(sc, ivp);
4391 		if (error) {
4392 			device_printf(sc->sc_dev,
4393 			    "%s: Failed to remove channel ctx: %d\n",
4394 			    __func__, error);
4395 		}
4396 	}
4397 	if (sc->sc_firmware_state > 1) {
4398 		sc->sc_firmware_state = 1;
4399 	}
4400 	ivp->phy_ctxt = NULL;
4401 	if (sc->sc_firmware_state > 0) {
4402 		error = iwm_mac_ctxt_changed(sc, vap);
4403 		if (error) {
4404 			device_printf(sc->sc_dev,
4405 			    "%s: Failed to change mac context: %d\n",
4406 			    __func__, error);
4407 		}
4408 	}
4409 	if (sc->sc_firmware_state > 0) {
4410 		error = iwm_power_update_mac(sc);
4411 		if (error != 0) {
4412 			device_printf(sc->sc_dev,
4413 			    "%s: failed to update power management\n",
4414 			    __func__);
4415 		}
4416 	}
4417 	sc->sc_firmware_state = 0;
4418 }
4419 
4420 static int
iwm_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4421 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4422 {
4423 	struct iwm_vap *ivp = IWM_VAP(vap);
4424 	struct ieee80211com *ic = vap->iv_ic;
4425 	struct iwm_softc *sc = ic->ic_softc;
4426 	struct iwm_node *in;
4427 	int error;
4428 
4429 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4430 	    "switching state %s -> %s arg=0x%x\n",
4431 	    ieee80211_state_name[vap->iv_state],
4432 	    ieee80211_state_name[nstate],
4433 	    arg);
4434 
4435 	IEEE80211_UNLOCK(ic);
4436 	IWM_LOCK(sc);
4437 
4438 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4439 	    (nstate == IEEE80211_S_AUTH ||
4440 	     nstate == IEEE80211_S_ASSOC ||
4441 	     nstate == IEEE80211_S_RUN)) {
4442 		/* Stop blinking for a scan, when authenticating. */
4443 		iwm_led_blink_stop(sc);
4444 	}
4445 
4446 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4447 		iwm_led_disable(sc);
4448 		/* disable beacon filtering if we're hopping out of RUN */
4449 		iwm_disable_beacon_filter(sc);
4450 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4451 			in->in_assoc = 0;
4452 	}
4453 
4454 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4455 	     vap->iv_state == IEEE80211_S_ASSOC ||
4456 	     vap->iv_state == IEEE80211_S_RUN) &&
4457 	    (nstate == IEEE80211_S_INIT ||
4458 	     nstate == IEEE80211_S_SCAN ||
4459 	     nstate == IEEE80211_S_AUTH)) {
4460 		iwm_stop_session_protection(sc, ivp);
4461 	}
4462 
4463 	if ((vap->iv_state == IEEE80211_S_RUN ||
4464 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4465 	    nstate == IEEE80211_S_INIT) {
4466 		/*
4467 		 * In this case, iv_newstate() wants to send an 80211 frame on
4468 		 * the network that we are leaving. So we need to call it,
4469 		 * before tearing down all the firmware state.
4470 		 */
4471 		IWM_UNLOCK(sc);
4472 		IEEE80211_LOCK(ic);
4473 		ivp->iv_newstate(vap, nstate, arg);
4474 		IEEE80211_UNLOCK(ic);
4475 		IWM_LOCK(sc);
4476 		iwm_bring_down_firmware(sc, vap);
4477 		IWM_UNLOCK(sc);
4478 		IEEE80211_LOCK(ic);
4479 		return 0;
4480 	}
4481 
4482 	switch (nstate) {
4483 	case IEEE80211_S_INIT:
4484 	case IEEE80211_S_SCAN:
4485 		break;
4486 
4487 	case IEEE80211_S_AUTH:
4488 		iwm_bring_down_firmware(sc, vap);
4489 		if ((error = iwm_auth(vap, sc)) != 0) {
4490 			device_printf(sc->sc_dev,
4491 			    "%s: could not move to auth state: %d\n",
4492 			    __func__, error);
4493 			iwm_bring_down_firmware(sc, vap);
4494 			IWM_UNLOCK(sc);
4495 			IEEE80211_LOCK(ic);
4496 			return 1;
4497 		}
4498 		break;
4499 
4500 	case IEEE80211_S_ASSOC:
4501 		/*
4502 		 * EBS may be disabled due to previous failures reported by FW.
4503 		 * Reset EBS status here assuming environment has been changed.
4504 		 */
4505 		sc->last_ebs_successful = TRUE;
4506 		break;
4507 
4508 	case IEEE80211_S_RUN:
4509 		in = IWM_NODE(vap->iv_bss);
4510 		/* Update the association state, now we have it all */
4511 		/* (eg associd comes in at this point */
4512 		error = iwm_update_sta(sc, in);
4513 		if (error != 0) {
4514 			device_printf(sc->sc_dev,
4515 			    "%s: failed to update STA\n", __func__);
4516 			IWM_UNLOCK(sc);
4517 			IEEE80211_LOCK(ic);
4518 			return error;
4519 		}
4520 		in->in_assoc = 1;
4521 		error = iwm_mac_ctxt_changed(sc, vap);
4522 		if (error != 0) {
4523 			device_printf(sc->sc_dev,
4524 			    "%s: failed to update MAC: %d\n", __func__, error);
4525 		}
4526 
4527 		iwm_sf_update(sc, vap, FALSE);
4528 		iwm_enable_beacon_filter(sc, ivp);
4529 		iwm_power_update_mac(sc);
4530 		iwm_update_quotas(sc, ivp);
4531 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4532 		iwm_setrates(sc, in, rix);
4533 
4534 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4535 			device_printf(sc->sc_dev,
4536 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4537 		}
4538 
4539 		iwm_led_enable(sc);
4540 		break;
4541 
4542 	default:
4543 		break;
4544 	}
4545 	IWM_UNLOCK(sc);
4546 	IEEE80211_LOCK(ic);
4547 
4548 	return (ivp->iv_newstate(vap, nstate, arg));
4549 }
4550 
4551 void
iwm_endscan_cb(void * arg,int pending)4552 iwm_endscan_cb(void *arg, int pending)
4553 {
4554 	struct iwm_softc *sc = arg;
4555 	struct ieee80211com *ic = &sc->sc_ic;
4556 
4557 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4558 	    "%s: scan ended\n",
4559 	    __func__);
4560 
4561 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4562 }
4563 
4564 static int
iwm_send_bt_init_conf(struct iwm_softc * sc)4565 iwm_send_bt_init_conf(struct iwm_softc *sc)
4566 {
4567 	struct iwm_bt_coex_cmd bt_cmd;
4568 
4569 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4570 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4571 
4572 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4573 	    &bt_cmd);
4574 }
4575 
4576 static boolean_t
iwm_is_lar_supported(struct iwm_softc * sc)4577 iwm_is_lar_supported(struct iwm_softc *sc)
4578 {
4579 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4580 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4581 
4582 	if (iwm_lar_disable)
4583 		return FALSE;
4584 
4585 	/*
4586 	 * Enable LAR only if it is supported by the FW (TLV) &&
4587 	 * enabled in the NVM
4588 	 */
4589 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4590 		return nvm_lar && tlv_lar;
4591 	else
4592 		return tlv_lar;
4593 }
4594 
4595 static boolean_t
iwm_is_wifi_mcc_supported(struct iwm_softc * sc)4596 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4597 {
4598 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4599 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4600 }
4601 
4602 static int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)4603 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4604 {
4605 	struct iwm_mcc_update_cmd mcc_cmd;
4606 	struct iwm_host_cmd hcmd = {
4607 		.id = IWM_MCC_UPDATE_CMD,
4608 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4609 		.data = { &mcc_cmd },
4610 	};
4611 	int ret;
4612 #ifdef IWM_DEBUG
4613 	struct iwm_rx_packet *pkt;
4614 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4615 	struct iwm_mcc_update_resp_v2 *mcc_resp;
4616 	int n_channels;
4617 	uint16_t mcc;
4618 #endif
4619 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4620 
4621 	if (!iwm_is_lar_supported(sc)) {
4622 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4623 		    __func__);
4624 		return 0;
4625 	}
4626 
4627 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4628 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4629 	if (iwm_is_wifi_mcc_supported(sc))
4630 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4631 	else
4632 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4633 
4634 	if (resp_v2)
4635 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4636 	else
4637 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4638 
4639 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4640 	    "send MCC update to FW with '%c%c' src = %d\n",
4641 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4642 
4643 	ret = iwm_send_cmd(sc, &hcmd);
4644 	if (ret)
4645 		return ret;
4646 
4647 #ifdef IWM_DEBUG
4648 	pkt = hcmd.resp_pkt;
4649 
4650 	/* Extract MCC response */
4651 	if (resp_v2) {
4652 		mcc_resp = (void *)pkt->data;
4653 		mcc = mcc_resp->mcc;
4654 		n_channels =  le32toh(mcc_resp->n_channels);
4655 	} else {
4656 		mcc_resp_v1 = (void *)pkt->data;
4657 		mcc = mcc_resp_v1->mcc;
4658 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4659 	}
4660 
4661 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4662 	if (mcc == 0)
4663 		mcc = 0x3030;  /* "00" - world */
4664 
4665 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4666 	    "regulatory domain '%c%c' (%d channels available)\n",
4667 	    mcc >> 8, mcc & 0xff, n_channels);
4668 #endif
4669 	iwm_free_resp(sc, &hcmd);
4670 
4671 	return 0;
4672 }
4673 
4674 static void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)4675 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4676 {
4677 	struct iwm_host_cmd cmd = {
4678 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4679 		.len = { sizeof(uint32_t), },
4680 		.data = { &backoff, },
4681 	};
4682 
4683 	if (iwm_send_cmd(sc, &cmd) != 0) {
4684 		device_printf(sc->sc_dev,
4685 		    "failed to change thermal tx backoff\n");
4686 	}
4687 }
4688 
4689 static int
iwm_init_hw(struct iwm_softc * sc)4690 iwm_init_hw(struct iwm_softc *sc)
4691 {
4692 	struct ieee80211com *ic = &sc->sc_ic;
4693 	int error, i, ac;
4694 
4695 	sc->sf_state = IWM_SF_UNINIT;
4696 
4697 	if ((error = iwm_start_hw(sc)) != 0) {
4698 		printf("iwm_start_hw: failed %d\n", error);
4699 		return error;
4700 	}
4701 
4702 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4703 		printf("iwm_run_init_ucode: failed %d\n", error);
4704 		return error;
4705 	}
4706 
4707 	/*
4708 	 * should stop and start HW since that INIT
4709 	 * image just loaded
4710 	 */
4711 	iwm_stop_device(sc);
4712 	sc->sc_ps_disabled = FALSE;
4713 	if ((error = iwm_start_hw(sc)) != 0) {
4714 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4715 		return error;
4716 	}
4717 
4718 	/* omstart, this time with the regular firmware */
4719 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4720 	if (error) {
4721 		device_printf(sc->sc_dev, "could not load firmware\n");
4722 		goto error;
4723 	}
4724 
4725 	error = iwm_sf_update(sc, NULL, FALSE);
4726 	if (error)
4727 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4728 
4729 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4730 		device_printf(sc->sc_dev, "bt init conf failed\n");
4731 		goto error;
4732 	}
4733 
4734 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4735 	if (error != 0) {
4736 		device_printf(sc->sc_dev, "antenna config failed\n");
4737 		goto error;
4738 	}
4739 
4740 	/* Send phy db control command and then phy db calibration */
4741 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4742 		goto error;
4743 
4744 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4745 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4746 		goto error;
4747 	}
4748 
4749 	/* Add auxiliary station for scanning */
4750 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4751 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4752 		goto error;
4753 	}
4754 
4755 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4756 		/*
4757 		 * The channel used here isn't relevant as it's
4758 		 * going to be overwritten in the other flows.
4759 		 * For now use the first channel we have.
4760 		 */
4761 		if ((error = iwm_phy_ctxt_add(sc,
4762 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4763 			goto error;
4764 	}
4765 
4766 	/* Initialize tx backoffs to the minimum. */
4767 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4768 		iwm_tt_tx_backoff(sc, 0);
4769 
4770 	if (iwm_config_ltr(sc) != 0)
4771 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4772 
4773 	error = iwm_power_update_device(sc);
4774 	if (error)
4775 		goto error;
4776 
4777 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4778 		goto error;
4779 
4780 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4781 		if ((error = iwm_config_umac_scan(sc)) != 0)
4782 			goto error;
4783 	}
4784 
4785 	/* Enable Tx queues. */
4786 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4787 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4788 		    iwm_ac_to_tx_fifo[ac]);
4789 		if (error)
4790 			goto error;
4791 	}
4792 
4793 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4794 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4795 		goto error;
4796 	}
4797 
4798 	return 0;
4799 
4800  error:
4801 	iwm_stop_device(sc);
4802 	return error;
4803 }
4804 
4805 /* Allow multicast from our BSSID. */
4806 static int
iwm_allow_mcast(struct ieee80211vap * vap,struct iwm_softc * sc)4807 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4808 {
4809 	struct ieee80211_node *ni = vap->iv_bss;
4810 	struct iwm_mcast_filter_cmd *cmd;
4811 	size_t size;
4812 	int error;
4813 
4814 	size = roundup(sizeof(*cmd), 4);
4815 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4816 	if (cmd == NULL)
4817 		return ENOMEM;
4818 	cmd->filter_own = 1;
4819 	cmd->port_id = 0;
4820 	cmd->count = 0;
4821 	cmd->pass_all = 1;
4822 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4823 
4824 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4825 	    IWM_CMD_SYNC, size, cmd);
4826 	free(cmd, M_DEVBUF);
4827 
4828 	return (error);
4829 }
4830 
4831 /*
4832  * ifnet interfaces
4833  */
4834 
4835 static void
iwm_init(struct iwm_softc * sc)4836 iwm_init(struct iwm_softc *sc)
4837 {
4838 	int error;
4839 
4840 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4841 		return;
4842 	}
4843 	sc->sc_generation++;
4844 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4845 
4846 	if ((error = iwm_init_hw(sc)) != 0) {
4847 		printf("iwm_init_hw failed %d\n", error);
4848 		iwm_stop(sc);
4849 		return;
4850 	}
4851 
4852 	/*
4853 	 * Ok, firmware loaded and we are jogging
4854 	 */
4855 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4856 }
4857 
4858 static int
iwm_transmit(struct ieee80211com * ic,struct mbuf * m)4859 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4860 {
4861 	struct iwm_softc *sc;
4862 	int error;
4863 
4864 	sc = ic->ic_softc;
4865 
4866 	IWM_LOCK(sc);
4867 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4868 		IWM_UNLOCK(sc);
4869 		return (ENXIO);
4870 	}
4871 	error = mbufq_enqueue(&sc->sc_snd, m);
4872 	if (error) {
4873 		IWM_UNLOCK(sc);
4874 		return (error);
4875 	}
4876 	iwm_start(sc);
4877 	IWM_UNLOCK(sc);
4878 	return (0);
4879 }
4880 
4881 /*
4882  * Dequeue packets from sendq and call send.
4883  */
4884 static void
iwm_start(struct iwm_softc * sc)4885 iwm_start(struct iwm_softc *sc)
4886 {
4887 	struct ieee80211_node *ni;
4888 	struct mbuf *m;
4889 	int ac = 0;
4890 
4891 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4892 	while (sc->qfullmsk == 0 &&
4893 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4894 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4895 		if (iwm_tx(sc, m, ni, ac) != 0) {
4896 			if_inc_counter(ni->ni_vap->iv_ifp,
4897 			    IFCOUNTER_OERRORS, 1);
4898 			ieee80211_free_node(ni);
4899 			continue;
4900 		}
4901 		if (sc->sc_tx_timer == 0) {
4902 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4903 			    sc);
4904 		}
4905 		sc->sc_tx_timer = 15;
4906 	}
4907 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4908 }
4909 
4910 static void
iwm_stop(struct iwm_softc * sc)4911 iwm_stop(struct iwm_softc *sc)
4912 {
4913 
4914 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4915 	sc->sc_flags |= IWM_FLAG_STOPPED;
4916 	sc->sc_generation++;
4917 	iwm_led_blink_stop(sc);
4918 	sc->sc_tx_timer = 0;
4919 	iwm_stop_device(sc);
4920 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4921 }
4922 
4923 static void
iwm_watchdog(void * arg)4924 iwm_watchdog(void *arg)
4925 {
4926 	struct iwm_softc *sc = arg;
4927 	struct ieee80211com *ic = &sc->sc_ic;
4928 
4929 	if (sc->sc_attached == 0)
4930 		return;
4931 
4932 	if (sc->sc_tx_timer > 0) {
4933 		if (--sc->sc_tx_timer == 0) {
4934 			device_printf(sc->sc_dev, "device timeout\n");
4935 #ifdef IWM_DEBUG
4936 			iwm_nic_error(sc);
4937 #endif
4938 			ieee80211_restart_all(ic);
4939 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4940 			return;
4941 		}
4942 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4943 	}
4944 }
4945 
4946 static void
iwm_parent(struct ieee80211com * ic)4947 iwm_parent(struct ieee80211com *ic)
4948 {
4949 	struct iwm_softc *sc = ic->ic_softc;
4950 	int startall = 0;
4951 	int rfkill = 0;
4952 
4953 	IWM_LOCK(sc);
4954 	if (ic->ic_nrunning > 0) {
4955 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4956 			iwm_init(sc);
4957 			rfkill = iwm_check_rfkill(sc);
4958 			if (!rfkill)
4959 				startall = 1;
4960 		}
4961 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4962 		iwm_stop(sc);
4963 	IWM_UNLOCK(sc);
4964 	if (startall)
4965 		ieee80211_start_all(ic);
4966 	else if (rfkill)
4967 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
4968 }
4969 
4970 static void
iwm_rftoggle_task(void * arg,int npending __unused)4971 iwm_rftoggle_task(void *arg, int npending __unused)
4972 {
4973 	struct iwm_softc *sc = arg;
4974 	struct ieee80211com *ic = &sc->sc_ic;
4975 	int rfkill;
4976 
4977 	IWM_LOCK(sc);
4978 	rfkill = iwm_check_rfkill(sc);
4979 	IWM_UNLOCK(sc);
4980 	if (rfkill) {
4981 		device_printf(sc->sc_dev,
4982 		    "%s: rfkill switch, disabling interface\n", __func__);
4983 		ieee80211_suspend_all(ic);
4984 		ieee80211_notify_radio(ic, 0);
4985 	} else {
4986 		device_printf(sc->sc_dev,
4987 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
4988 		ieee80211_resume_all(ic);
4989 		ieee80211_notify_radio(ic, 1);
4990 	}
4991 }
4992 
4993 /*
4994  * The interrupt side of things
4995  */
4996 
4997 /*
4998  * error dumping routines are from iwlwifi/mvm/utils.c
4999  */
5000 
5001 /*
5002  * Note: This structure is read from the device with IO accesses,
5003  * and the reading already does the endian conversion. As it is
5004  * read with uint32_t-sized accesses, any members with a different size
5005  * need to be ordered correctly though!
5006  */
5007 struct iwm_error_event_table {
5008 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5009 	uint32_t error_id;		/* type of error */
5010 	uint32_t trm_hw_status0;	/* TRM HW status */
5011 	uint32_t trm_hw_status1;	/* TRM HW status */
5012 	uint32_t blink2;		/* branch link */
5013 	uint32_t ilink1;		/* interrupt link */
5014 	uint32_t ilink2;		/* interrupt link */
5015 	uint32_t data1;		/* error-specific data */
5016 	uint32_t data2;		/* error-specific data */
5017 	uint32_t data3;		/* error-specific data */
5018 	uint32_t bcon_time;		/* beacon timer */
5019 	uint32_t tsf_low;		/* network timestamp function timer */
5020 	uint32_t tsf_hi;		/* network timestamp function timer */
5021 	uint32_t gp1;		/* GP1 timer register */
5022 	uint32_t gp2;		/* GP2 timer register */
5023 	uint32_t fw_rev_type;	/* firmware revision type */
5024 	uint32_t major;		/* uCode version major */
5025 	uint32_t minor;		/* uCode version minor */
5026 	uint32_t hw_ver;		/* HW Silicon version */
5027 	uint32_t brd_ver;		/* HW board version */
5028 	uint32_t log_pc;		/* log program counter */
5029 	uint32_t frame_ptr;		/* frame pointer */
5030 	uint32_t stack_ptr;		/* stack pointer */
5031 	uint32_t hcmd;		/* last host command header */
5032 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5033 				 * rxtx_flag */
5034 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5035 				 * host_flag */
5036 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5037 				 * enc_flag */
5038 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5039 				 * time_flag */
5040 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5041 				 * wico interrupt */
5042 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5043 	uint32_t wait_event;		/* wait event() caller address */
5044 	uint32_t l2p_control;	/* L2pControlField */
5045 	uint32_t l2p_duration;	/* L2pDurationField */
5046 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5047 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5048 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5049 				 * (LMPM_PMG_SEL) */
5050 	uint32_t u_timestamp;	/* indicate when the date and time of the
5051 				 * compilation */
5052 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5053 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5054 
5055 /*
5056  * UMAC error struct - relevant starting from family 8000 chip.
5057  * Note: This structure is read from the device with IO accesses,
5058  * and the reading already does the endian conversion. As it is
5059  * read with u32-sized accesses, any members with a different size
5060  * need to be ordered correctly though!
5061  */
5062 struct iwm_umac_error_event_table {
5063 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5064 	uint32_t error_id;	/* type of error */
5065 	uint32_t blink1;	/* branch link */
5066 	uint32_t blink2;	/* branch link */
5067 	uint32_t ilink1;	/* interrupt link */
5068 	uint32_t ilink2;	/* interrupt link */
5069 	uint32_t data1;		/* error-specific data */
5070 	uint32_t data2;		/* error-specific data */
5071 	uint32_t data3;		/* error-specific data */
5072 	uint32_t umac_major;
5073 	uint32_t umac_minor;
5074 	uint32_t frame_pointer;	/* core register 27*/
5075 	uint32_t stack_pointer;	/* core register 28 */
5076 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5077 	uint32_t nic_isr_pref;	/* ISR status register */
5078 } __packed;
5079 
5080 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5081 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5082 
5083 #ifdef IWM_DEBUG
5084 struct {
5085 	const char *name;
5086 	uint8_t num;
5087 } advanced_lookup[] = {
5088 	{ "NMI_INTERRUPT_WDG", 0x34 },
5089 	{ "SYSASSERT", 0x35 },
5090 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5091 	{ "BAD_COMMAND", 0x38 },
5092 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5093 	{ "FATAL_ERROR", 0x3D },
5094 	{ "NMI_TRM_HW_ERR", 0x46 },
5095 	{ "NMI_INTERRUPT_TRM", 0x4C },
5096 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5097 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5098 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5099 	{ "NMI_INTERRUPT_HOST", 0x66 },
5100 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5101 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5102 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5103 	{ "ADVANCED_SYSASSERT", 0 },
5104 };
5105 
5106 static const char *
iwm_desc_lookup(uint32_t num)5107 iwm_desc_lookup(uint32_t num)
5108 {
5109 	int i;
5110 
5111 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5112 		if (advanced_lookup[i].num == num)
5113 			return advanced_lookup[i].name;
5114 
5115 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5116 	return advanced_lookup[i].name;
5117 }
5118 
5119 static void
iwm_nic_umac_error(struct iwm_softc * sc)5120 iwm_nic_umac_error(struct iwm_softc *sc)
5121 {
5122 	struct iwm_umac_error_event_table table;
5123 	uint32_t base;
5124 
5125 	base = sc->umac_error_event_table;
5126 
5127 	if (base < 0x800000) {
5128 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5129 		    base);
5130 		return;
5131 	}
5132 
5133 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5134 		device_printf(sc->sc_dev, "reading errlog failed\n");
5135 		return;
5136 	}
5137 
5138 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5139 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5140 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5141 		    sc->sc_flags, table.valid);
5142 	}
5143 
5144 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5145 		iwm_desc_lookup(table.error_id));
5146 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5147 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5148 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5149 	    table.ilink1);
5150 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5151 	    table.ilink2);
5152 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5153 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5154 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5155 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5156 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5157 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5158 	    table.frame_pointer);
5159 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5160 	    table.stack_pointer);
5161 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5162 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5163 	    table.nic_isr_pref);
5164 }
5165 
5166 /*
5167  * Support for dumping the error log seemed like a good idea ...
5168  * but it's mostly hex junk and the only sensible thing is the
5169  * hw/ucode revision (which we know anyway).  Since it's here,
5170  * I'll just leave it in, just in case e.g. the Intel guys want to
5171  * help us decipher some "ADVANCED_SYSASSERT" later.
5172  */
5173 static void
iwm_nic_error(struct iwm_softc * sc)5174 iwm_nic_error(struct iwm_softc *sc)
5175 {
5176 	struct iwm_error_event_table table;
5177 	uint32_t base;
5178 
5179 	device_printf(sc->sc_dev, "dumping device error log\n");
5180 	base = sc->error_event_table[0];
5181 	if (base < 0x800000) {
5182 		device_printf(sc->sc_dev,
5183 		    "Invalid error log pointer 0x%08x\n", base);
5184 		return;
5185 	}
5186 
5187 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5188 		device_printf(sc->sc_dev, "reading errlog failed\n");
5189 		return;
5190 	}
5191 
5192 	if (!table.valid) {
5193 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5194 		return;
5195 	}
5196 
5197 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5198 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5199 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5200 		    sc->sc_flags, table.valid);
5201 	}
5202 
5203 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5204 	    iwm_desc_lookup(table.error_id));
5205 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5206 	    table.trm_hw_status0);
5207 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5208 	    table.trm_hw_status1);
5209 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5210 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5211 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5212 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5213 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5214 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5215 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5216 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5217 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5218 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5219 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5220 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5221 	    table.fw_rev_type);
5222 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5223 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5224 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5225 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5226 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5227 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5228 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5229 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5230 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5231 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5232 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5233 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5234 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5235 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5236 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5237 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5238 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5239 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5240 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5241 
5242 	if (sc->umac_error_event_table)
5243 		iwm_nic_umac_error(sc);
5244 }
5245 #endif
5246 
5247 static void
iwm_handle_rxb(struct iwm_softc * sc,struct mbuf * m)5248 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5249 {
5250 	struct ieee80211com *ic = &sc->sc_ic;
5251 	struct iwm_cmd_response *cresp;
5252 	struct mbuf *m1;
5253 	uint32_t offset = 0;
5254 	uint32_t maxoff = IWM_RBUF_SIZE;
5255 	uint32_t nextoff;
5256 	boolean_t stolen = FALSE;
5257 
5258 #define HAVEROOM(a)	\
5259     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5260 
5261 	while (HAVEROOM(offset)) {
5262 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5263 		    offset);
5264 		int qid, idx, code, len;
5265 
5266 		qid = pkt->hdr.qid;
5267 		idx = pkt->hdr.idx;
5268 
5269 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5270 
5271 		/*
5272 		 * randomly get these from the firmware, no idea why.
5273 		 * they at least seem harmless, so just ignore them for now
5274 		 */
5275 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5276 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5277 			break;
5278 		}
5279 
5280 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5281 		    "rx packet qid=%d idx=%d type=%x\n",
5282 		    qid & ~0x80, pkt->hdr.idx, code);
5283 
5284 		len = iwm_rx_packet_len(pkt);
5285 		len += sizeof(uint32_t); /* account for status word */
5286 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5287 
5288 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5289 
5290 		switch (code) {
5291 		case IWM_REPLY_RX_PHY_CMD:
5292 			iwm_rx_rx_phy_cmd(sc, pkt);
5293 			break;
5294 
5295 		case IWM_REPLY_RX_MPDU_CMD: {
5296 			/*
5297 			 * If this is the last frame in the RX buffer, we
5298 			 * can directly feed the mbuf to the sharks here.
5299 			 */
5300 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5301 			    struct iwm_rx_packet *, nextoff);
5302 			if (!HAVEROOM(nextoff) ||
5303 			    (nextpkt->hdr.code == 0 &&
5304 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5305 			     nextpkt->hdr.idx == 0) ||
5306 			    (nextpkt->len_n_flags ==
5307 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5308 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5309 					stolen = FALSE;
5310 					/* Make sure we abort the loop */
5311 					nextoff = maxoff;
5312 				}
5313 				break;
5314 			}
5315 
5316 			/*
5317 			 * Use m_copym instead of m_split, because that
5318 			 * makes it easier to keep a valid rx buffer in
5319 			 * the ring, when iwm_rx_mpdu() fails.
5320 			 *
5321 			 * We need to start m_copym() at offset 0, to get the
5322 			 * M_PKTHDR flag preserved.
5323 			 */
5324 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5325 			if (m1) {
5326 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5327 					stolen = TRUE;
5328 				else
5329 					m_freem(m1);
5330 			}
5331 			break;
5332 		}
5333 
5334 		case IWM_TX_CMD:
5335 			iwm_rx_tx_cmd(sc, pkt);
5336 			break;
5337 
5338 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5339 			struct iwm_missed_beacons_notif *resp;
5340 			int missed;
5341 
5342 			/* XXX look at mac_id to determine interface ID */
5343 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5344 
5345 			resp = (void *)pkt->data;
5346 			missed = le32toh(resp->consec_missed_beacons);
5347 
5348 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5349 			    "%s: MISSED_BEACON: mac_id=%d, "
5350 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5351 			    "num_rx=%d\n",
5352 			    __func__,
5353 			    le32toh(resp->mac_id),
5354 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5355 			    le32toh(resp->consec_missed_beacons),
5356 			    le32toh(resp->num_expected_beacons),
5357 			    le32toh(resp->num_recvd_beacons));
5358 
5359 			/* Be paranoid */
5360 			if (vap == NULL)
5361 				break;
5362 
5363 			/* XXX no net80211 locking? */
5364 			if (vap->iv_state == IEEE80211_S_RUN &&
5365 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5366 				if (missed > vap->iv_bmissthreshold) {
5367 					/* XXX bad locking; turn into task */
5368 					IWM_UNLOCK(sc);
5369 					ieee80211_beacon_miss(ic);
5370 					IWM_LOCK(sc);
5371 				}
5372 			}
5373 
5374 			break;
5375 		}
5376 
5377 		case IWM_MFUART_LOAD_NOTIFICATION:
5378 			break;
5379 
5380 		case IWM_ALIVE:
5381 			break;
5382 
5383 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5384 			break;
5385 
5386 		case IWM_STATISTICS_NOTIFICATION:
5387 			iwm_handle_rx_statistics(sc, pkt);
5388 			break;
5389 
5390 		case IWM_NVM_ACCESS_CMD:
5391 		case IWM_MCC_UPDATE_CMD:
5392 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5393 				memcpy(sc->sc_cmd_resp,
5394 				    pkt, sizeof(sc->sc_cmd_resp));
5395 			}
5396 			break;
5397 
5398 		case IWM_MCC_CHUB_UPDATE_CMD: {
5399 			struct iwm_mcc_chub_notif *notif;
5400 			notif = (void *)pkt->data;
5401 
5402 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5403 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5404 			sc->sc_fw_mcc[2] = '\0';
5405 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5406 			    "fw source %d sent CC '%s'\n",
5407 			    notif->source_id, sc->sc_fw_mcc);
5408 			break;
5409 		}
5410 
5411 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5412 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5413 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5414 			struct iwm_dts_measurement_notif_v1 *notif;
5415 
5416 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5417 				device_printf(sc->sc_dev,
5418 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5419 				break;
5420 			}
5421 			notif = (void *)pkt->data;
5422 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5423 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5424 			    notif->temp);
5425 			break;
5426 		}
5427 
5428 		case IWM_PHY_CONFIGURATION_CMD:
5429 		case IWM_TX_ANT_CONFIGURATION_CMD:
5430 		case IWM_ADD_STA:
5431 		case IWM_MAC_CONTEXT_CMD:
5432 		case IWM_REPLY_SF_CFG_CMD:
5433 		case IWM_POWER_TABLE_CMD:
5434 		case IWM_LTR_CONFIG:
5435 		case IWM_PHY_CONTEXT_CMD:
5436 		case IWM_BINDING_CONTEXT_CMD:
5437 		case IWM_TIME_EVENT_CMD:
5438 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5439 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5440 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5441 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5442 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5443 		case IWM_REPLY_BEACON_FILTERING_CMD:
5444 		case IWM_MAC_PM_POWER_TABLE:
5445 		case IWM_TIME_QUOTA_CMD:
5446 		case IWM_REMOVE_STA:
5447 		case IWM_TXPATH_FLUSH:
5448 		case IWM_LQ_CMD:
5449 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5450 				 IWM_FW_PAGING_BLOCK_CMD):
5451 		case IWM_BT_CONFIG:
5452 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5453 			cresp = (void *)pkt->data;
5454 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5455 				memcpy(sc->sc_cmd_resp,
5456 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5457 			}
5458 			break;
5459 
5460 		/* ignore */
5461 		case IWM_PHY_DB_CMD:
5462 			break;
5463 
5464 		case IWM_INIT_COMPLETE_NOTIF:
5465 			break;
5466 
5467 		case IWM_SCAN_OFFLOAD_COMPLETE:
5468 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5469 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5470 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5471 				ieee80211_runtask(ic, &sc->sc_es_task);
5472 			}
5473 			break;
5474 
5475 		case IWM_SCAN_ITERATION_COMPLETE: {
5476 			break;
5477 		}
5478 
5479 		case IWM_SCAN_COMPLETE_UMAC:
5480 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5481 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5482 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5483 				ieee80211_runtask(ic, &sc->sc_es_task);
5484 			}
5485 			break;
5486 
5487 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5488 #ifdef IWM_DEBUG
5489 			struct iwm_umac_scan_iter_complete_notif *notif;
5490 			notif = (void *)pkt->data;
5491 
5492 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5493 			    "complete, status=0x%x, %d channels scanned\n",
5494 			    notif->status, notif->scanned_channels);
5495 #endif
5496 			break;
5497 		}
5498 
5499 		case IWM_REPLY_ERROR: {
5500 			struct iwm_error_resp *resp;
5501 			resp = (void *)pkt->data;
5502 
5503 			device_printf(sc->sc_dev,
5504 			    "firmware error 0x%x, cmd 0x%x\n",
5505 			    le32toh(resp->error_type),
5506 			    resp->cmd_id);
5507 			break;
5508 		}
5509 
5510 		case IWM_TIME_EVENT_NOTIFICATION:
5511 			iwm_rx_time_event_notif(sc, pkt);
5512 			break;
5513 
5514 		/*
5515 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5516 		 * messages. Just ignore them for now.
5517 		 */
5518 		case IWM_DEBUG_LOG_MSG:
5519 			break;
5520 
5521 		case IWM_MCAST_FILTER_CMD:
5522 			break;
5523 
5524 		case IWM_SCD_QUEUE_CFG: {
5525 #ifdef IWM_DEBUG
5526 			struct iwm_scd_txq_cfg_rsp *rsp;
5527 			rsp = (void *)pkt->data;
5528 
5529 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5530 			    "queue cfg token=0x%x sta_id=%d "
5531 			    "tid=%d scd_queue=%d\n",
5532 			    rsp->token, rsp->sta_id, rsp->tid,
5533 			    rsp->scd_queue);
5534 #endif
5535 			break;
5536 		}
5537 
5538 		default:
5539 			device_printf(sc->sc_dev,
5540 			    "code %x, frame %d/%d %x unhandled\n",
5541 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5542 			break;
5543 		}
5544 
5545 		/*
5546 		 * Why test bit 0x80?  The Linux driver:
5547 		 *
5548 		 * There is one exception:  uCode sets bit 15 when it
5549 		 * originates the response/notification, i.e. when the
5550 		 * response/notification is not a direct response to a
5551 		 * command sent by the driver.  For example, uCode issues
5552 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5553 		 * it is not a direct response to any driver command.
5554 		 *
5555 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5556 		 * uses a slightly different format for pkt->hdr, and "qid"
5557 		 * is actually the upper byte of a two-byte field.
5558 		 */
5559 		if (!(qid & (1 << 7)))
5560 			iwm_cmd_done(sc, pkt);
5561 
5562 		offset = nextoff;
5563 	}
5564 	if (stolen)
5565 		m_freem(m);
5566 #undef HAVEROOM
5567 }
5568 
5569 /*
5570  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5571  * Basic structure from if_iwn
5572  */
5573 static void
iwm_notif_intr(struct iwm_softc * sc)5574 iwm_notif_intr(struct iwm_softc *sc)
5575 {
5576 	int count;
5577 	uint32_t wreg;
5578 	uint16_t hw;
5579 
5580 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5581 	    BUS_DMASYNC_POSTREAD);
5582 
5583 	if (sc->cfg->mqrx_supported) {
5584 		count = IWM_RX_MQ_RING_COUNT;
5585 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5586 	} else {
5587 		count = IWM_RX_LEGACY_RING_COUNT;
5588 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5589 	}
5590 
5591 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5592 
5593 	/*
5594 	 * Process responses
5595 	 */
5596 	while (sc->rxq.cur != hw) {
5597 		struct iwm_rx_ring *ring = &sc->rxq;
5598 		struct iwm_rx_data *data = &ring->data[ring->cur];
5599 
5600 		bus_dmamap_sync(ring->data_dmat, data->map,
5601 		    BUS_DMASYNC_POSTREAD);
5602 
5603 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5604 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5605 		iwm_handle_rxb(sc, data->m);
5606 
5607 		ring->cur = (ring->cur + 1) % count;
5608 	}
5609 
5610 	/*
5611 	 * Tell the firmware that it can reuse the ring entries that
5612 	 * we have just processed.
5613 	 * Seems like the hardware gets upset unless we align
5614 	 * the write by 8??
5615 	 */
5616 	hw = (hw == 0) ? count - 1 : hw - 1;
5617 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5618 }
5619 
5620 static void
iwm_intr(void * arg)5621 iwm_intr(void *arg)
5622 {
5623 	struct iwm_softc *sc = arg;
5624 	int handled = 0;
5625 	int r1, r2;
5626 	int isperiodic = 0;
5627 
5628 	IWM_LOCK(sc);
5629 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5630 
5631 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5632 		uint32_t *ict = sc->ict_dma.vaddr;
5633 		int tmp;
5634 
5635 		tmp = htole32(ict[sc->ict_cur]);
5636 		if (!tmp)
5637 			goto out_ena;
5638 
5639 		/*
5640 		 * ok, there was something.  keep plowing until we have all.
5641 		 */
5642 		r1 = r2 = 0;
5643 		while (tmp) {
5644 			r1 |= tmp;
5645 			ict[sc->ict_cur] = 0;
5646 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5647 			tmp = htole32(ict[sc->ict_cur]);
5648 		}
5649 
5650 		/* this is where the fun begins.  don't ask */
5651 		if (r1 == 0xffffffff)
5652 			r1 = 0;
5653 
5654 		/* i am not expected to understand this */
5655 		if (r1 & 0xc0000)
5656 			r1 |= 0x8000;
5657 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5658 	} else {
5659 		r1 = IWM_READ(sc, IWM_CSR_INT);
5660 		/* "hardware gone" (where, fishing?) */
5661 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5662 			goto out;
5663 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5664 	}
5665 	if (r1 == 0 && r2 == 0) {
5666 		goto out_ena;
5667 	}
5668 
5669 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5670 
5671 	/* Safely ignore these bits for debug checks below */
5672 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5673 
5674 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5675 		int i;
5676 		struct ieee80211com *ic = &sc->sc_ic;
5677 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5678 
5679 #ifdef IWM_DEBUG
5680 		iwm_nic_error(sc);
5681 #endif
5682 		/* Dump driver status (TX and RX rings) while we're here. */
5683 		device_printf(sc->sc_dev, "driver status:\n");
5684 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5685 			struct iwm_tx_ring *ring = &sc->txq[i];
5686 			device_printf(sc->sc_dev,
5687 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5688 			    "queued=%-3d\n",
5689 			    i, ring->qid, ring->cur, ring->queued);
5690 		}
5691 		device_printf(sc->sc_dev,
5692 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5693 		device_printf(sc->sc_dev,
5694 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5695 
5696 		/* Reset our firmware state tracking. */
5697 		sc->sc_firmware_state = 0;
5698 		/* Don't stop the device; just do a VAP restart */
5699 		IWM_UNLOCK(sc);
5700 
5701 		if (vap == NULL) {
5702 			printf("%s: null vap\n", __func__);
5703 			return;
5704 		}
5705 
5706 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5707 		    "restarting\n", __func__, vap->iv_state);
5708 
5709 		ieee80211_restart_all(ic);
5710 		return;
5711 	}
5712 
5713 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5714 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5715 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5716 		iwm_stop(sc);
5717 		goto out;
5718 	}
5719 
5720 	/* firmware chunk loaded */
5721 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5722 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5723 		handled |= IWM_CSR_INT_BIT_FH_TX;
5724 		sc->sc_fw_chunk_done = 1;
5725 		wakeup(&sc->sc_fw);
5726 	}
5727 
5728 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5729 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5730 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5731 	}
5732 
5733 	/*
5734 	 * The Linux driver uses periodic interrupts to avoid races.
5735 	 * We cargo-cult like it's going out of fashion.
5736 	 */
5737 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5738 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5739 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5740 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5741 			IWM_WRITE_1(sc,
5742 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5743 		isperiodic = 1;
5744 	}
5745 
5746 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5747 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5748 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5749 
5750 		iwm_notif_intr(sc);
5751 
5752 		/* enable periodic interrupt, see above */
5753 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5754 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5755 			    IWM_CSR_INT_PERIODIC_ENA);
5756 	}
5757 
5758 	if (__predict_false(r1 & ~handled))
5759 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5760 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5761  out_ena:
5762 	iwm_restore_interrupts(sc);
5763  out:
5764 	IWM_UNLOCK(sc);
5765 	return;
5766 }
5767 
5768 /*
5769  * Autoconf glue-sniffing
5770  */
5771 #define	PCI_VENDOR_INTEL		0x8086
5772 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5773 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5774 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5775 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5776 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5777 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5778 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5779 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5780 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5781 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5782 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5783 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5784 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5785 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5786 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5787 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5788 
5789 static const struct iwm_devices {
5790 	uint16_t		device;
5791 	const struct iwm_cfg	*cfg;
5792 } iwm_devices[] = {
5793 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5794 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5795 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5796 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5797 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5798 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5799 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5800 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5801 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5802 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5803 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5804 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5805 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5806 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5807 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5808 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5809 };
5810 
5811 static int
iwm_probe(device_t dev)5812 iwm_probe(device_t dev)
5813 {
5814 	int i;
5815 
5816 	for (i = 0; i < nitems(iwm_devices); i++) {
5817 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5818 		    pci_get_device(dev) == iwm_devices[i].device) {
5819 			device_set_desc(dev, iwm_devices[i].cfg->name);
5820 			return (BUS_PROBE_DEFAULT);
5821 		}
5822 	}
5823 
5824 	return (ENXIO);
5825 }
5826 
5827 static int
iwm_dev_check(device_t dev)5828 iwm_dev_check(device_t dev)
5829 {
5830 	struct iwm_softc *sc;
5831 	uint16_t devid;
5832 	int i;
5833 
5834 	sc = device_get_softc(dev);
5835 
5836 	devid = pci_get_device(dev);
5837 	for (i = 0; i < nitems(iwm_devices); i++) {
5838 		if (iwm_devices[i].device == devid) {
5839 			sc->cfg = iwm_devices[i].cfg;
5840 			return (0);
5841 		}
5842 	}
5843 	device_printf(dev, "unknown adapter type\n");
5844 	return ENXIO;
5845 }
5846 
5847 /* PCI registers */
5848 #define PCI_CFG_RETRY_TIMEOUT	0x041
5849 
5850 static int
iwm_pci_attach(device_t dev)5851 iwm_pci_attach(device_t dev)
5852 {
5853 	struct iwm_softc *sc;
5854 	int count, error, rid;
5855 	uint16_t reg;
5856 
5857 	sc = device_get_softc(dev);
5858 
5859 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5860 	 * PCI Tx retries from interfering with C3 CPU state */
5861 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5862 
5863 	/* Enable bus-mastering and hardware bug workaround. */
5864 	pci_enable_busmaster(dev);
5865 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5866 	/* if !MSI */
5867 	if (reg & PCIM_STATUS_INTxSTATE) {
5868 		reg &= ~PCIM_STATUS_INTxSTATE;
5869 	}
5870 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5871 
5872 	rid = PCIR_BAR(0);
5873 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5874 	    RF_ACTIVE);
5875 	if (sc->sc_mem == NULL) {
5876 		device_printf(sc->sc_dev, "can't map mem space\n");
5877 		return (ENXIO);
5878 	}
5879 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5880 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5881 
5882 	/* Install interrupt handler. */
5883 	count = 1;
5884 	rid = 0;
5885 	if (pci_alloc_msi(dev, &count) == 0)
5886 		rid = 1;
5887 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5888 	    (rid != 0 ? 0 : RF_SHAREABLE));
5889 	if (sc->sc_irq == NULL) {
5890 		device_printf(dev, "can't map interrupt\n");
5891 			return (ENXIO);
5892 	}
5893 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5894 	    NULL, iwm_intr, sc, &sc->sc_ih);
5895 	if (error != 0) {
5896 		device_printf(dev, "can't establish interrupt");
5897 		return (error);
5898 	}
5899 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5900 
5901 	return (0);
5902 }
5903 
5904 static void
iwm_pci_detach(device_t dev)5905 iwm_pci_detach(device_t dev)
5906 {
5907 	struct iwm_softc *sc = device_get_softc(dev);
5908 
5909 	if (sc->sc_irq != NULL) {
5910 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5911 		bus_release_resource(dev, SYS_RES_IRQ,
5912 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5913 		pci_release_msi(dev);
5914         }
5915 	if (sc->sc_mem != NULL)
5916 		bus_release_resource(dev, SYS_RES_MEMORY,
5917 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5918 }
5919 
5920 static int
iwm_attach(device_t dev)5921 iwm_attach(device_t dev)
5922 {
5923 	struct iwm_softc *sc = device_get_softc(dev);
5924 	struct ieee80211com *ic = &sc->sc_ic;
5925 	int error;
5926 	int txq_i, i;
5927 
5928 	sc->sc_dev = dev;
5929 	sc->sc_attached = 1;
5930 	IWM_LOCK_INIT(sc);
5931 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5932 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5933 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5934 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5935 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5936 
5937 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5938 	    taskqueue_thread_enqueue, &sc->sc_tq);
5939 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5940 	if (error != 0) {
5941 		device_printf(dev, "can't start taskq thread, error %d\n",
5942 		    error);
5943 		goto fail;
5944 	}
5945 
5946 	error = iwm_dev_check(dev);
5947 	if (error != 0)
5948 		goto fail;
5949 
5950 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5951 	if (sc->sc_notif_wait == NULL) {
5952 		device_printf(dev, "failed to init notification wait struct\n");
5953 		goto fail;
5954 	}
5955 
5956 	sc->sf_state = IWM_SF_UNINIT;
5957 
5958 	/* Init phy db */
5959 	sc->sc_phy_db = iwm_phy_db_init(sc);
5960 	if (!sc->sc_phy_db) {
5961 		device_printf(dev, "Cannot init phy_db\n");
5962 		goto fail;
5963 	}
5964 
5965 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5966 	sc->last_ebs_successful = TRUE;
5967 
5968 	/* PCI attach */
5969 	error = iwm_pci_attach(dev);
5970 	if (error != 0)
5971 		goto fail;
5972 
5973 	sc->sc_wantresp = -1;
5974 
5975 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5976 	/*
5977 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5978 	 * changed, and now the revision step also includes bit 0-1 (no more
5979 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5980 	 * in the old format.
5981 	 */
5982 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
5983 		int ret;
5984 		uint32_t hw_step;
5985 
5986 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5987 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5988 
5989 		if (iwm_prepare_card_hw(sc) != 0) {
5990 			device_printf(dev, "could not initialize hardware\n");
5991 			goto fail;
5992 		}
5993 
5994 		/*
5995 		 * In order to recognize C step the driver should read the
5996 		 * chip version id located at the AUX bus MISC address.
5997 		 */
5998 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5999 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6000 		DELAY(2);
6001 
6002 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6003 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6004 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6005 				   25000);
6006 		if (!ret) {
6007 			device_printf(sc->sc_dev,
6008 			    "Failed to wake up the nic\n");
6009 			goto fail;
6010 		}
6011 
6012 		if (iwm_nic_lock(sc)) {
6013 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6014 			hw_step |= IWM_ENABLE_WFPM;
6015 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6016 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6017 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6018 			if (hw_step == 0x3)
6019 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6020 						(IWM_SILICON_C_STEP << 2);
6021 			iwm_nic_unlock(sc);
6022 		} else {
6023 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6024 			goto fail;
6025 		}
6026 	}
6027 
6028 	/* special-case 7265D, it has the same PCI IDs. */
6029 	if (sc->cfg == &iwm7265_cfg &&
6030 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6031 		sc->cfg = &iwm7265d_cfg;
6032 	}
6033 
6034 	/* Allocate DMA memory for firmware transfers. */
6035 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6036 		device_printf(dev, "could not allocate memory for firmware\n");
6037 		goto fail;
6038 	}
6039 
6040 	/* Allocate "Keep Warm" page. */
6041 	if ((error = iwm_alloc_kw(sc)) != 0) {
6042 		device_printf(dev, "could not allocate keep warm page\n");
6043 		goto fail;
6044 	}
6045 
6046 	/* We use ICT interrupts */
6047 	if ((error = iwm_alloc_ict(sc)) != 0) {
6048 		device_printf(dev, "could not allocate ICT table\n");
6049 		goto fail;
6050 	}
6051 
6052 	/* Allocate TX scheduler "rings". */
6053 	if ((error = iwm_alloc_sched(sc)) != 0) {
6054 		device_printf(dev, "could not allocate TX scheduler rings\n");
6055 		goto fail;
6056 	}
6057 
6058 	/* Allocate TX rings */
6059 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6060 		if ((error = iwm_alloc_tx_ring(sc,
6061 		    &sc->txq[txq_i], txq_i)) != 0) {
6062 			device_printf(dev,
6063 			    "could not allocate TX ring %d\n",
6064 			    txq_i);
6065 			goto fail;
6066 		}
6067 	}
6068 
6069 	/* Allocate RX ring. */
6070 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6071 		device_printf(dev, "could not allocate RX ring\n");
6072 		goto fail;
6073 	}
6074 
6075 	/* Clear pending interrupts. */
6076 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6077 
6078 	ic->ic_softc = sc;
6079 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6080 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6081 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6082 
6083 	/* Set device capabilities. */
6084 	ic->ic_caps =
6085 	    IEEE80211_C_STA |
6086 	    IEEE80211_C_WPA |		/* WPA/RSN */
6087 	    IEEE80211_C_WME |
6088 	    IEEE80211_C_PMGT |
6089 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6090 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6091 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6092 	    ;
6093 	/* Advertise full-offload scanning */
6094 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6095 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6096 		sc->sc_phyctxt[i].id = i;
6097 		sc->sc_phyctxt[i].color = 0;
6098 		sc->sc_phyctxt[i].ref = 0;
6099 		sc->sc_phyctxt[i].channel = NULL;
6100 	}
6101 
6102 	/* Default noise floor */
6103 	sc->sc_noise = -96;
6104 
6105 	/* Max RSSI */
6106 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6107 
6108 #ifdef IWM_DEBUG
6109 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6110 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6111 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6112 #endif
6113 
6114 	error = iwm_read_firmware(sc);
6115 	if (error) {
6116 		goto fail;
6117 	} else if (sc->sc_fw.fw_fp == NULL) {
6118 		/*
6119 		 * XXX Add a solution for properly deferring firmware load
6120 		 *     during bootup.
6121 		 */
6122 		goto fail;
6123 	} else {
6124 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6125 		sc->sc_preinit_hook.ich_arg = sc;
6126 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6127 			device_printf(dev,
6128 			    "config_intrhook_establish failed\n");
6129 			goto fail;
6130 		}
6131 	}
6132 
6133 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6134 	    "<-%s\n", __func__);
6135 
6136 	return 0;
6137 
6138 	/* Free allocated memory if something failed during attachment. */
6139 fail:
6140 	iwm_detach_local(sc, 0);
6141 
6142 	return ENXIO;
6143 }
6144 
6145 static int
iwm_is_valid_ether_addr(uint8_t * addr)6146 iwm_is_valid_ether_addr(uint8_t *addr)
6147 {
6148 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6149 
6150 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6151 		return (FALSE);
6152 
6153 	return (TRUE);
6154 }
6155 
6156 static int
iwm_wme_update(struct ieee80211com * ic)6157 iwm_wme_update(struct ieee80211com *ic)
6158 {
6159 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6160 	struct iwm_softc *sc = ic->ic_softc;
6161 	struct chanAccParams chp;
6162 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6163 	struct iwm_vap *ivp = IWM_VAP(vap);
6164 	struct iwm_node *in;
6165 	struct wmeParams tmp[WME_NUM_AC];
6166 	int aci, error;
6167 
6168 	if (vap == NULL)
6169 		return (0);
6170 
6171 	ieee80211_wme_ic_getparams(ic, &chp);
6172 
6173 	IEEE80211_LOCK(ic);
6174 	for (aci = 0; aci < WME_NUM_AC; aci++)
6175 		tmp[aci] = chp.cap_wmeParams[aci];
6176 	IEEE80211_UNLOCK(ic);
6177 
6178 	IWM_LOCK(sc);
6179 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6180 		const struct wmeParams *ac = &tmp[aci];
6181 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6182 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6183 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6184 		ivp->queue_params[aci].edca_txop =
6185 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6186 	}
6187 	ivp->have_wme = TRUE;
6188 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6189 		in = IWM_NODE(vap->iv_bss);
6190 		if (in->in_assoc) {
6191 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6192 				device_printf(sc->sc_dev,
6193 				    "%s: failed to update MAC\n", __func__);
6194 			}
6195 		}
6196 	}
6197 	IWM_UNLOCK(sc);
6198 
6199 	return (0);
6200 #undef IWM_EXP2
6201 }
6202 
6203 static void
iwm_preinit(void * arg)6204 iwm_preinit(void *arg)
6205 {
6206 	struct iwm_softc *sc = arg;
6207 	device_t dev = sc->sc_dev;
6208 	struct ieee80211com *ic = &sc->sc_ic;
6209 	int error;
6210 
6211 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6212 	    "->%s\n", __func__);
6213 
6214 	IWM_LOCK(sc);
6215 	if ((error = iwm_start_hw(sc)) != 0) {
6216 		device_printf(dev, "could not initialize hardware\n");
6217 		IWM_UNLOCK(sc);
6218 		goto fail;
6219 	}
6220 
6221 	error = iwm_run_init_ucode(sc, 1);
6222 	iwm_stop_device(sc);
6223 	if (error) {
6224 		IWM_UNLOCK(sc);
6225 		goto fail;
6226 	}
6227 	device_printf(dev,
6228 	    "hw rev 0x%x, fw ver %s, address %s\n",
6229 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6230 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6231 
6232 	/* not all hardware can do 5GHz band */
6233 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6234 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6235 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6236 	IWM_UNLOCK(sc);
6237 
6238 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6239 	    ic->ic_channels);
6240 
6241 	/*
6242 	 * At this point we've committed - if we fail to do setup,
6243 	 * we now also have to tear down the net80211 state.
6244 	 */
6245 	ieee80211_ifattach(ic);
6246 	ic->ic_vap_create = iwm_vap_create;
6247 	ic->ic_vap_delete = iwm_vap_delete;
6248 	ic->ic_raw_xmit = iwm_raw_xmit;
6249 	ic->ic_node_alloc = iwm_node_alloc;
6250 	ic->ic_scan_start = iwm_scan_start;
6251 	ic->ic_scan_end = iwm_scan_end;
6252 	ic->ic_update_mcast = iwm_update_mcast;
6253 	ic->ic_getradiocaps = iwm_init_channel_map;
6254 	ic->ic_set_channel = iwm_set_channel;
6255 	ic->ic_scan_curchan = iwm_scan_curchan;
6256 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6257 	ic->ic_wme.wme_update = iwm_wme_update;
6258 	ic->ic_parent = iwm_parent;
6259 	ic->ic_transmit = iwm_transmit;
6260 	iwm_radiotap_attach(sc);
6261 	if (bootverbose)
6262 		ieee80211_announce(ic);
6263 
6264 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6265 	    "<-%s\n", __func__);
6266 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6267 
6268 	return;
6269 fail:
6270 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6271 	iwm_detach_local(sc, 0);
6272 }
6273 
6274 /*
6275  * Attach the interface to 802.11 radiotap.
6276  */
6277 static void
iwm_radiotap_attach(struct iwm_softc * sc)6278 iwm_radiotap_attach(struct iwm_softc *sc)
6279 {
6280         struct ieee80211com *ic = &sc->sc_ic;
6281 
6282 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6283 	    "->%s begin\n", __func__);
6284         ieee80211_radiotap_attach(ic,
6285             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6286                 IWM_TX_RADIOTAP_PRESENT,
6287             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6288                 IWM_RX_RADIOTAP_PRESENT);
6289 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6290 	    "->%s end\n", __func__);
6291 }
6292 
6293 static struct ieee80211vap *
iwm_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])6294 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6295     enum ieee80211_opmode opmode, int flags,
6296     const uint8_t bssid[IEEE80211_ADDR_LEN],
6297     const uint8_t mac[IEEE80211_ADDR_LEN])
6298 {
6299 	struct iwm_vap *ivp;
6300 	struct ieee80211vap *vap;
6301 
6302 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6303 		return NULL;
6304 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6305 	vap = &ivp->iv_vap;
6306 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6307 	vap->iv_bmissthreshold = 10;            /* override default */
6308 	/* Override with driver methods. */
6309 	ivp->iv_newstate = vap->iv_newstate;
6310 	vap->iv_newstate = iwm_newstate;
6311 
6312 	ivp->id = IWM_DEFAULT_MACID;
6313 	ivp->color = IWM_DEFAULT_COLOR;
6314 
6315 	ivp->have_wme = FALSE;
6316 	ivp->ps_disabled = FALSE;
6317 
6318 	ieee80211_ratectl_init(vap);
6319 	/* Complete setup. */
6320 	ieee80211_vap_attach(vap, ieee80211_media_change,
6321 	    ieee80211_media_status, mac);
6322 	ic->ic_opmode = opmode;
6323 
6324 	return vap;
6325 }
6326 
6327 static void
iwm_vap_delete(struct ieee80211vap * vap)6328 iwm_vap_delete(struct ieee80211vap *vap)
6329 {
6330 	struct iwm_vap *ivp = IWM_VAP(vap);
6331 
6332 	ieee80211_ratectl_deinit(vap);
6333 	ieee80211_vap_detach(vap);
6334 	free(ivp, M_80211_VAP);
6335 }
6336 
6337 static void
iwm_xmit_queue_drain(struct iwm_softc * sc)6338 iwm_xmit_queue_drain(struct iwm_softc *sc)
6339 {
6340 	struct mbuf *m;
6341 	struct ieee80211_node *ni;
6342 
6343 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6344 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6345 		ieee80211_free_node(ni);
6346 		m_freem(m);
6347 	}
6348 }
6349 
6350 static void
iwm_scan_start(struct ieee80211com * ic)6351 iwm_scan_start(struct ieee80211com *ic)
6352 {
6353 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6354 	struct iwm_softc *sc = ic->ic_softc;
6355 	int error;
6356 
6357 	IWM_LOCK(sc);
6358 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6359 		/* This should not be possible */
6360 		device_printf(sc->sc_dev,
6361 		    "%s: Previous scan not completed yet\n", __func__);
6362 	}
6363 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6364 		error = iwm_umac_scan(sc);
6365 	else
6366 		error = iwm_lmac_scan(sc);
6367 	if (error != 0) {
6368 		device_printf(sc->sc_dev, "could not initiate scan\n");
6369 		IWM_UNLOCK(sc);
6370 		ieee80211_cancel_scan(vap);
6371 	} else {
6372 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6373 		iwm_led_blink_start(sc);
6374 		IWM_UNLOCK(sc);
6375 	}
6376 }
6377 
6378 static void
iwm_scan_end(struct ieee80211com * ic)6379 iwm_scan_end(struct ieee80211com *ic)
6380 {
6381 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6382 	struct iwm_softc *sc = ic->ic_softc;
6383 
6384 	IWM_LOCK(sc);
6385 	iwm_led_blink_stop(sc);
6386 	if (vap->iv_state == IEEE80211_S_RUN)
6387 		iwm_led_enable(sc);
6388 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6389 		/*
6390 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6391 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6392 		 * taskqueue.
6393 		 */
6394 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6395 		iwm_scan_stop_wait(sc);
6396 	}
6397 	IWM_UNLOCK(sc);
6398 
6399 	/*
6400 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6401 	 * This is to make sure that it won't call ieee80211_scan_done
6402 	 * when we have already started the next scan.
6403 	 */
6404 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6405 }
6406 
6407 static void
iwm_update_mcast(struct ieee80211com * ic)6408 iwm_update_mcast(struct ieee80211com *ic)
6409 {
6410 }
6411 
6412 static void
iwm_set_channel(struct ieee80211com * ic)6413 iwm_set_channel(struct ieee80211com *ic)
6414 {
6415 }
6416 
6417 static void
iwm_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)6418 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6419 {
6420 }
6421 
6422 static void
iwm_scan_mindwell(struct ieee80211_scan_state * ss)6423 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6424 {
6425 }
6426 
6427 void
iwm_init_task(void * arg1)6428 iwm_init_task(void *arg1)
6429 {
6430 	struct iwm_softc *sc = arg1;
6431 
6432 	IWM_LOCK(sc);
6433 	while (sc->sc_flags & IWM_FLAG_BUSY)
6434 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6435 	sc->sc_flags |= IWM_FLAG_BUSY;
6436 	iwm_stop(sc);
6437 	if (sc->sc_ic.ic_nrunning > 0)
6438 		iwm_init(sc);
6439 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6440 	wakeup(&sc->sc_flags);
6441 	IWM_UNLOCK(sc);
6442 }
6443 
6444 static int
iwm_resume(device_t dev)6445 iwm_resume(device_t dev)
6446 {
6447 	struct iwm_softc *sc = device_get_softc(dev);
6448 	int do_reinit = 0;
6449 
6450 	/*
6451 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6452 	 * PCI Tx retries from interfering with C3 CPU state.
6453 	 */
6454 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6455 
6456 	if (!sc->sc_attached)
6457 		return 0;
6458 
6459 	iwm_init_task(device_get_softc(dev));
6460 
6461 	IWM_LOCK(sc);
6462 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6463 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6464 		do_reinit = 1;
6465 	}
6466 	IWM_UNLOCK(sc);
6467 
6468 	if (do_reinit)
6469 		ieee80211_resume_all(&sc->sc_ic);
6470 
6471 	return 0;
6472 }
6473 
6474 static int
iwm_suspend(device_t dev)6475 iwm_suspend(device_t dev)
6476 {
6477 	int do_stop = 0;
6478 	struct iwm_softc *sc = device_get_softc(dev);
6479 
6480 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6481 
6482 	if (!sc->sc_attached)
6483 		return (0);
6484 
6485 	ieee80211_suspend_all(&sc->sc_ic);
6486 
6487 	if (do_stop) {
6488 		IWM_LOCK(sc);
6489 		iwm_stop(sc);
6490 		sc->sc_flags |= IWM_FLAG_SCANNING;
6491 		IWM_UNLOCK(sc);
6492 	}
6493 
6494 	return (0);
6495 }
6496 
6497 static int
iwm_detach_local(struct iwm_softc * sc,int do_net80211)6498 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6499 {
6500 	struct iwm_fw_info *fw = &sc->sc_fw;
6501 	device_t dev = sc->sc_dev;
6502 	int i;
6503 
6504 	if (!sc->sc_attached)
6505 		return 0;
6506 	sc->sc_attached = 0;
6507 	if (do_net80211) {
6508 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6509 	}
6510 	iwm_stop_device(sc);
6511 	taskqueue_drain_all(sc->sc_tq);
6512 	taskqueue_free(sc->sc_tq);
6513 	if (do_net80211) {
6514 		IWM_LOCK(sc);
6515 		iwm_xmit_queue_drain(sc);
6516 		IWM_UNLOCK(sc);
6517 		ieee80211_ifdetach(&sc->sc_ic);
6518 	}
6519 	callout_drain(&sc->sc_led_blink_to);
6520 	callout_drain(&sc->sc_watchdog_to);
6521 
6522 	iwm_phy_db_free(sc->sc_phy_db);
6523 	sc->sc_phy_db = NULL;
6524 
6525 	iwm_free_nvm_data(sc->nvm_data);
6526 
6527 	/* Free descriptor rings */
6528 	iwm_free_rx_ring(sc, &sc->rxq);
6529 	for (i = 0; i < nitems(sc->txq); i++)
6530 		iwm_free_tx_ring(sc, &sc->txq[i]);
6531 
6532 	/* Free firmware */
6533 	if (fw->fw_fp != NULL)
6534 		iwm_fw_info_free(fw);
6535 
6536 	/* Free scheduler */
6537 	iwm_dma_contig_free(&sc->sched_dma);
6538 	iwm_dma_contig_free(&sc->ict_dma);
6539 	iwm_dma_contig_free(&sc->kw_dma);
6540 	iwm_dma_contig_free(&sc->fw_dma);
6541 
6542 	iwm_free_fw_paging(sc);
6543 
6544 	/* Finished with the hardware - detach things */
6545 	iwm_pci_detach(dev);
6546 
6547 	if (sc->sc_notif_wait != NULL) {
6548 		iwm_notification_wait_free(sc->sc_notif_wait);
6549 		sc->sc_notif_wait = NULL;
6550 	}
6551 
6552 	IWM_LOCK_DESTROY(sc);
6553 
6554 	return (0);
6555 }
6556 
6557 static int
iwm_detach(device_t dev)6558 iwm_detach(device_t dev)
6559 {
6560 	struct iwm_softc *sc = device_get_softc(dev);
6561 
6562 	return (iwm_detach_local(sc, 1));
6563 }
6564 
6565 static device_method_t iwm_pci_methods[] = {
6566         /* Device interface */
6567         DEVMETHOD(device_probe,         iwm_probe),
6568         DEVMETHOD(device_attach,        iwm_attach),
6569         DEVMETHOD(device_detach,        iwm_detach),
6570         DEVMETHOD(device_suspend,       iwm_suspend),
6571         DEVMETHOD(device_resume,        iwm_resume),
6572 
6573         DEVMETHOD_END
6574 };
6575 
6576 static driver_t iwm_pci_driver = {
6577         "iwm",
6578         iwm_pci_methods,
6579         sizeof (struct iwm_softc)
6580 };
6581 
6582 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL);
6583 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6584     iwm_devices, nitems(iwm_devices));
6585 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6586 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6587 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6588