xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 5e49712ca8fef53262f2f6769c119f27e0e175ee)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 #include "opt_wlan.h"
107 #include "opt_iwm.h"
108 
109 #include <sys/param.h>
110 #include <sys/bus.h>
111 #include <sys/conf.h>
112 #include <sys/endian.h>
113 #include <sys/firmware.h>
114 #include <sys/kernel.h>
115 #include <sys/malloc.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/module.h>
119 #include <sys/proc.h>
120 #include <sys/rman.h>
121 #include <sys/socket.h>
122 #include <sys/sockio.h>
123 #include <sys/sysctl.h>
124 #include <sys/linker.h>
125 
126 #include <machine/bus.h>
127 #include <machine/endian.h>
128 #include <machine/resource.h>
129 
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcireg.h>
132 
133 #include <net/bpf.h>
134 
135 #include <net/if.h>
136 #include <net/if_var.h>
137 #include <net/if_arp.h>
138 #include <net/if_dl.h>
139 #include <net/if_media.h>
140 #include <net/if_types.h>
141 
142 #include <netinet/in.h>
143 #include <netinet/in_systm.h>
144 #include <netinet/if_ether.h>
145 #include <netinet/ip.h>
146 
147 #include <net80211/ieee80211_var.h>
148 #include <net80211/ieee80211_regdomain.h>
149 #include <net80211/ieee80211_ratectl.h>
150 #include <net80211/ieee80211_radiotap.h>
151 
152 #include <dev/iwm/if_iwmreg.h>
153 #include <dev/iwm/if_iwmvar.h>
154 #include <dev/iwm/if_iwm_config.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165 #include <dev/iwm/if_iwm_sf.h>
166 #include <dev/iwm/if_iwm_sta.h>
167 
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171 
172 /* From DragonflyBSD */
173 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174 
175 const uint8_t iwm_nvm_channels[] = {
176 	/* 2.4 GHz */
177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 	/* 5 GHz */
179 	36, 40, 44, 48, 52, 56, 60, 64,
180 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 	149, 153, 157, 161, 165
182 };
183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184     "IWM_NUM_CHANNELS is too small");
185 
186 const uint8_t iwm_nvm_channels_8000[] = {
187 	/* 2.4 GHz */
188 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189 	/* 5 GHz */
190 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192 	149, 153, 157, 161, 165, 169, 173, 177, 181
193 };
194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195     "IWM_NUM_CHANNELS_8000 is too small");
196 
197 #define IWM_NUM_2GHZ_CHANNELS	14
198 #define IWM_N_HW_ADDR_MASK	0xF
199 
200 const struct iwm_rate {
201 	uint16_t rate;
202 	uint8_t plcp;
203 	uint8_t ht_plcp;
204 } iwm_rates[] = {
205 		/* Legacy */		/* HT */
206 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
207 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
208 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
209 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
210 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
211 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
212 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
213 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
214 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
215 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
216 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
217 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
218 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
219 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
220 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
222 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
223 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
224 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
225 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
226 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
227 };
228 #define IWM_RIDX_CCK	0
229 #define IWM_RIDX_OFDM	4
230 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
231 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
232 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
233 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
234 
235 /* Convert an MCS index into an iwm_rates[] index. */
236 const int iwm_mcs2ridx[] = {
237 	IWM_RATE_MCS_0_INDEX,
238 	IWM_RATE_MCS_1_INDEX,
239 	IWM_RATE_MCS_2_INDEX,
240 	IWM_RATE_MCS_3_INDEX,
241 	IWM_RATE_MCS_4_INDEX,
242 	IWM_RATE_MCS_5_INDEX,
243 	IWM_RATE_MCS_6_INDEX,
244 	IWM_RATE_MCS_7_INDEX,
245 	IWM_RATE_MCS_8_INDEX,
246 	IWM_RATE_MCS_9_INDEX,
247 	IWM_RATE_MCS_10_INDEX,
248 	IWM_RATE_MCS_11_INDEX,
249 	IWM_RATE_MCS_12_INDEX,
250 	IWM_RATE_MCS_13_INDEX,
251 	IWM_RATE_MCS_14_INDEX,
252 	IWM_RATE_MCS_15_INDEX,
253 };
254 
255 struct iwm_nvm_section {
256 	uint16_t length;
257 	uint8_t *data;
258 };
259 
260 #define IWM_UCODE_ALIVE_TIMEOUT	hz
261 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
262 
263 struct iwm_alive_data {
264 	int valid;
265 	uint32_t scd_base_addr;
266 };
267 
268 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
269 static int	iwm_firmware_store_section(struct iwm_softc *,
270                                            enum iwm_ucode_type,
271                                            const uint8_t *, size_t);
272 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
273 static void	iwm_fw_info_free(struct iwm_fw_info *);
274 static int	iwm_read_firmware(struct iwm_softc *);
275 static int	iwm_alloc_fwmem(struct iwm_softc *);
276 static int	iwm_alloc_sched(struct iwm_softc *);
277 static int	iwm_alloc_kw(struct iwm_softc *);
278 static int	iwm_alloc_ict(struct iwm_softc *);
279 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
282 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
283                                   int);
284 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
285 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
286 static void	iwm_enable_interrupts(struct iwm_softc *);
287 static void	iwm_restore_interrupts(struct iwm_softc *);
288 static void	iwm_disable_interrupts(struct iwm_softc *);
289 static void	iwm_ict_reset(struct iwm_softc *);
290 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
291 static void	iwm_stop_device(struct iwm_softc *);
292 static void	iwm_nic_config(struct iwm_softc *);
293 static int	iwm_nic_rx_init(struct iwm_softc *);
294 static int	iwm_nic_tx_init(struct iwm_softc *);
295 static int	iwm_nic_init(struct iwm_softc *);
296 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
297 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
298                                    uint16_t, uint8_t *, uint16_t *);
299 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
300 				     uint16_t *, uint32_t);
301 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
302 static void	iwm_add_channel_band(struct iwm_softc *,
303 		    struct ieee80211_channel[], int, int *, int, size_t,
304 		    const uint8_t[]);
305 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
306 		    struct ieee80211_channel[]);
307 static struct iwm_nvm_data *
308 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
309 			   const uint16_t *, const uint16_t *,
310 			   const uint16_t *, const uint16_t *,
311 			   const uint16_t *);
312 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
313 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
314 					       struct iwm_nvm_data *,
315 					       const uint16_t *,
316 					       const uint16_t *);
317 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
318 			    const uint16_t *);
319 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
320 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
321 				  const uint16_t *);
322 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
323 				   const uint16_t *);
324 static void	iwm_set_radio_cfg(const struct iwm_softc *,
325 				  struct iwm_nvm_data *, uint32_t);
326 static struct iwm_nvm_data *
327 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
328 static int	iwm_nvm_init(struct iwm_softc *);
329 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
330 				      const struct iwm_fw_desc *);
331 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
332 					     bus_addr_t, uint32_t);
333 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
334 						const struct iwm_fw_img *,
335 						int, int *);
336 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
337 					   const struct iwm_fw_img *,
338 					   int, int *);
339 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
340 					       const struct iwm_fw_img *);
341 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
342 					  const struct iwm_fw_img *);
343 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
344 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
345 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
346 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
347                                               enum iwm_ucode_type);
348 static int	iwm_run_init_ucode(struct iwm_softc *, int);
349 static int	iwm_config_ltr(struct iwm_softc *sc);
350 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
351 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
352                                       struct iwm_rx_packet *);
353 static int	iwm_get_noise(struct iwm_softc *,
354 		    const struct iwm_statistics_rx_non_phy *);
355 static void	iwm_handle_rx_statistics(struct iwm_softc *,
356 		    struct iwm_rx_packet *);
357 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
358 		    uint32_t, bool);
359 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
360                                          struct iwm_rx_packet *,
361 				         struct iwm_node *);
362 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
363 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
364 #if 0
365 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
366                                  uint16_t);
367 #endif
368 static const struct iwm_rate *
369 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
370 			struct mbuf *, struct iwm_tx_cmd *);
371 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
372                        struct ieee80211_node *, int);
373 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
374 			     const struct ieee80211_bpf_params *);
375 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
376 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
377 static struct ieee80211_node *
378 		iwm_node_alloc(struct ieee80211vap *,
379 		               const uint8_t[IEEE80211_ADDR_LEN]);
380 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
381 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
382 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
383 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
384 static void	iwm_endscan_cb(void *, int);
385 static int	iwm_send_bt_init_conf(struct iwm_softc *);
386 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
387 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
388 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
389 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
390 static int	iwm_init_hw(struct iwm_softc *);
391 static void	iwm_init(struct iwm_softc *);
392 static void	iwm_start(struct iwm_softc *);
393 static void	iwm_stop(struct iwm_softc *);
394 static void	iwm_watchdog(void *);
395 static void	iwm_parent(struct ieee80211com *);
396 #ifdef IWM_DEBUG
397 static const char *
398 		iwm_desc_lookup(uint32_t);
399 static void	iwm_nic_error(struct iwm_softc *);
400 static void	iwm_nic_umac_error(struct iwm_softc *);
401 #endif
402 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
403 static void	iwm_notif_intr(struct iwm_softc *);
404 static void	iwm_intr(void *);
405 static int	iwm_attach(device_t);
406 static int	iwm_is_valid_ether_addr(uint8_t *);
407 static void	iwm_preinit(void *);
408 static int	iwm_detach_local(struct iwm_softc *sc, int);
409 static void	iwm_init_task(void *);
410 static void	iwm_radiotap_attach(struct iwm_softc *);
411 static struct ieee80211vap *
412 		iwm_vap_create(struct ieee80211com *,
413 		               const char [IFNAMSIZ], int,
414 		               enum ieee80211_opmode, int,
415 		               const uint8_t [IEEE80211_ADDR_LEN],
416 		               const uint8_t [IEEE80211_ADDR_LEN]);
417 static void	iwm_vap_delete(struct ieee80211vap *);
418 static void	iwm_xmit_queue_drain(struct iwm_softc *);
419 static void	iwm_scan_start(struct ieee80211com *);
420 static void	iwm_scan_end(struct ieee80211com *);
421 static void	iwm_update_mcast(struct ieee80211com *);
422 static void	iwm_set_channel(struct ieee80211com *);
423 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
424 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
425 static int	iwm_detach(device_t);
426 
427 static int	iwm_lar_disable = 0;
428 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
429 
430 /*
431  * Firmware parser.
432  */
433 
434 static int
iwm_store_cscheme(struct iwm_softc * sc,const uint8_t * data,size_t dlen)435 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
436 {
437 	const struct iwm_fw_cscheme_list *l = (const void *)data;
438 
439 	if (dlen < sizeof(*l) ||
440 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
441 		return EINVAL;
442 
443 	/* we don't actually store anything for now, always use s/w crypto */
444 
445 	return 0;
446 }
447 
448 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,const uint8_t * data,size_t dlen)449 iwm_firmware_store_section(struct iwm_softc *sc,
450     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
451 {
452 	struct iwm_fw_img *fws;
453 	struct iwm_fw_desc *fwone;
454 
455 	if (type >= IWM_UCODE_TYPE_MAX)
456 		return EINVAL;
457 	if (dlen < sizeof(uint32_t))
458 		return EINVAL;
459 
460 	fws = &sc->sc_fw.img[type];
461 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
462 		return EINVAL;
463 
464 	fwone = &fws->sec[fws->fw_count];
465 
466 	/* first 32bit are device load offset */
467 	memcpy(&fwone->offset, data, sizeof(uint32_t));
468 
469 	/* rest is data */
470 	fwone->data = data + sizeof(uint32_t);
471 	fwone->len = dlen - sizeof(uint32_t);
472 
473 	fws->fw_count++;
474 
475 	return 0;
476 }
477 
478 #define IWM_DEFAULT_SCAN_CHANNELS 40
479 
480 /* iwlwifi: iwl-drv.c */
481 struct iwm_tlv_calib_data {
482 	uint32_t ucode_type;
483 	struct iwm_tlv_calib_ctrl calib;
484 } __packed;
485 
486 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)487 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
488 {
489 	const struct iwm_tlv_calib_data *def_calib = data;
490 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
491 
492 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
493 		device_printf(sc->sc_dev,
494 		    "Wrong ucode_type %u for default "
495 		    "calibration.\n", ucode_type);
496 		return EINVAL;
497 	}
498 
499 	sc->sc_default_calib[ucode_type].flow_trigger =
500 	    def_calib->calib.flow_trigger;
501 	sc->sc_default_calib[ucode_type].event_trigger =
502 	    def_calib->calib.event_trigger;
503 
504 	return 0;
505 }
506 
507 static int
iwm_set_ucode_api_flags(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)508 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
509 			struct iwm_ucode_capabilities *capa)
510 {
511 	const struct iwm_ucode_api *ucode_api = (const void *)data;
512 	uint32_t api_index = le32toh(ucode_api->api_index);
513 	uint32_t api_flags = le32toh(ucode_api->api_flags);
514 	int i;
515 
516 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
517 		device_printf(sc->sc_dev,
518 		    "api flags index %d larger than supported by driver\n",
519 		    api_index);
520 		/* don't return an error so we can load FW that has more bits */
521 		return 0;
522 	}
523 
524 	for (i = 0; i < 32; i++) {
525 		if (api_flags & (1U << i))
526 			setbit(capa->enabled_api, i + 32 * api_index);
527 	}
528 
529 	return 0;
530 }
531 
532 static int
iwm_set_ucode_capabilities(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)533 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
534 			   struct iwm_ucode_capabilities *capa)
535 {
536 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
537 	uint32_t api_index = le32toh(ucode_capa->api_index);
538 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
539 	int i;
540 
541 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
542 		device_printf(sc->sc_dev,
543 		    "capa flags index %d larger than supported by driver\n",
544 		    api_index);
545 		/* don't return an error so we can load FW that has more bits */
546 		return 0;
547 	}
548 
549 	for (i = 0; i < 32; i++) {
550 		if (api_flags & (1U << i))
551 			setbit(capa->enabled_capa, i + 32 * api_index);
552 	}
553 
554 	return 0;
555 }
556 
557 static void
iwm_fw_info_free(struct iwm_fw_info * fw)558 iwm_fw_info_free(struct iwm_fw_info *fw)
559 {
560 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
561 	fw->fw_fp = NULL;
562 	memset(fw->img, 0, sizeof(fw->img));
563 }
564 
565 static int
iwm_read_firmware(struct iwm_softc * sc)566 iwm_read_firmware(struct iwm_softc *sc)
567 {
568 	struct iwm_fw_info *fw = &sc->sc_fw;
569 	const struct iwm_tlv_ucode_header *uhdr;
570 	const struct iwm_ucode_tlv *tlv;
571 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
572 	enum iwm_ucode_tlv_type tlv_type;
573 	const struct firmware *fwp;
574 	const uint8_t *data;
575 	uint32_t tlv_len;
576 	uint32_t usniffer_img;
577 	const uint8_t *tlv_data;
578 	uint32_t paging_mem_size;
579 	int num_of_cpus;
580 	int error = 0;
581 	size_t len;
582 
583 	/*
584 	 * Load firmware into driver memory.
585 	 * fw_fp will be set.
586 	 */
587 	fwp = firmware_get(sc->cfg->fw_name);
588 	if (fwp == NULL) {
589 		device_printf(sc->sc_dev,
590 		    "could not read firmware %s (error %d)\n",
591 		    sc->cfg->fw_name, error);
592 		goto out;
593 	}
594 	fw->fw_fp = fwp;
595 
596 	/* (Re-)Initialize default values. */
597 	capa->flags = 0;
598 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
599 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
600 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
601 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
602 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
603 
604 	/*
605 	 * Parse firmware contents
606 	 */
607 
608 	uhdr = (const void *)fw->fw_fp->data;
609 	if (*(const uint32_t *)fw->fw_fp->data != 0
610 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
611 		device_printf(sc->sc_dev, "invalid firmware %s\n",
612 		    sc->cfg->fw_name);
613 		error = EINVAL;
614 		goto out;
615 	}
616 
617 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
618 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
619 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
620 	    IWM_UCODE_API(le32toh(uhdr->ver)));
621 	data = uhdr->data;
622 	len = fw->fw_fp->datasize - sizeof(*uhdr);
623 
624 	while (len >= sizeof(*tlv)) {
625 		len -= sizeof(*tlv);
626 		tlv = (const void *)data;
627 
628 		tlv_len = le32toh(tlv->length);
629 		tlv_type = le32toh(tlv->type);
630 		tlv_data = tlv->data;
631 
632 		if (len < tlv_len) {
633 			device_printf(sc->sc_dev,
634 			    "firmware too short: %zu bytes\n",
635 			    len);
636 			error = EINVAL;
637 			goto parse_out;
638 		}
639 		len -= roundup2(tlv_len, 4);
640 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
641 
642 		switch ((int)tlv_type) {
643 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
644 			if (tlv_len != sizeof(uint32_t)) {
645 				device_printf(sc->sc_dev,
646 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
647 				    __func__, tlv_len);
648 				error = EINVAL;
649 				goto parse_out;
650 			}
651 			capa->max_probe_length =
652 			    le32_to_cpup((const uint32_t *)tlv_data);
653 			/* limit it to something sensible */
654 			if (capa->max_probe_length >
655 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
656 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
657 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
658 				    "ridiculous\n", __func__);
659 				error = EINVAL;
660 				goto parse_out;
661 			}
662 			break;
663 		case IWM_UCODE_TLV_PAN:
664 			if (tlv_len) {
665 				device_printf(sc->sc_dev,
666 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
667 				    __func__, tlv_len);
668 				error = EINVAL;
669 				goto parse_out;
670 			}
671 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
672 			break;
673 		case IWM_UCODE_TLV_FLAGS:
674 			if (tlv_len < sizeof(uint32_t)) {
675 				device_printf(sc->sc_dev,
676 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
677 				    __func__, tlv_len);
678 				error = EINVAL;
679 				goto parse_out;
680 			}
681 			if (tlv_len % sizeof(uint32_t)) {
682 				device_printf(sc->sc_dev,
683 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
684 				    __func__, tlv_len);
685 				error = EINVAL;
686 				goto parse_out;
687 			}
688 			/*
689 			 * Apparently there can be many flags, but Linux driver
690 			 * parses only the first one, and so do we.
691 			 *
692 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
693 			 * Intentional or a bug?  Observations from
694 			 * current firmware file:
695 			 *  1) TLV_PAN is parsed first
696 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
697 			 * ==> this resets TLV_PAN to itself... hnnnk
698 			 */
699 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
700 			break;
701 		case IWM_UCODE_TLV_CSCHEME:
702 			if ((error = iwm_store_cscheme(sc,
703 			    tlv_data, tlv_len)) != 0) {
704 				device_printf(sc->sc_dev,
705 				    "%s: iwm_store_cscheme(): returned %d\n",
706 				    __func__, error);
707 				goto parse_out;
708 			}
709 			break;
710 		case IWM_UCODE_TLV_NUM_OF_CPU:
711 			if (tlv_len != sizeof(uint32_t)) {
712 				device_printf(sc->sc_dev,
713 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
714 				    __func__, tlv_len);
715 				error = EINVAL;
716 				goto parse_out;
717 			}
718 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
719 			if (num_of_cpus == 2) {
720 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
721 					TRUE;
722 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
723 					TRUE;
724 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
725 					TRUE;
726 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
727 				device_printf(sc->sc_dev,
728 				    "%s: Driver supports only 1 or 2 CPUs\n",
729 				    __func__);
730 				error = EINVAL;
731 				goto parse_out;
732 			}
733 			break;
734 		case IWM_UCODE_TLV_SEC_RT:
735 			if ((error = iwm_firmware_store_section(sc,
736 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
737 				device_printf(sc->sc_dev,
738 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
739 				    __func__, error);
740 				goto parse_out;
741 			}
742 			break;
743 		case IWM_UCODE_TLV_SEC_INIT:
744 			if ((error = iwm_firmware_store_section(sc,
745 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
746 				device_printf(sc->sc_dev,
747 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
748 				    __func__, error);
749 				goto parse_out;
750 			}
751 			break;
752 		case IWM_UCODE_TLV_SEC_WOWLAN:
753 			if ((error = iwm_firmware_store_section(sc,
754 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
755 				device_printf(sc->sc_dev,
756 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
757 				    __func__, error);
758 				goto parse_out;
759 			}
760 			break;
761 		case IWM_UCODE_TLV_DEF_CALIB:
762 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
763 				device_printf(sc->sc_dev,
764 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
765 				    __func__, tlv_len,
766 				    sizeof(struct iwm_tlv_calib_data));
767 				error = EINVAL;
768 				goto parse_out;
769 			}
770 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
771 				device_printf(sc->sc_dev,
772 				    "%s: iwm_set_default_calib() failed: %d\n",
773 				    __func__, error);
774 				goto parse_out;
775 			}
776 			break;
777 		case IWM_UCODE_TLV_PHY_SKU:
778 			if (tlv_len != sizeof(uint32_t)) {
779 				error = EINVAL;
780 				device_printf(sc->sc_dev,
781 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
782 				    __func__, tlv_len);
783 				goto parse_out;
784 			}
785 			sc->sc_fw.phy_config =
786 			    le32_to_cpup((const uint32_t *)tlv_data);
787 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
788 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
789 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
790 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
791 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
792 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
793 			break;
794 
795 		case IWM_UCODE_TLV_API_CHANGES_SET: {
796 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
797 				error = EINVAL;
798 				goto parse_out;
799 			}
800 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
801 				error = EINVAL;
802 				goto parse_out;
803 			}
804 			break;
805 		}
806 
807 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
808 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
809 				error = EINVAL;
810 				goto parse_out;
811 			}
812 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
813 				error = EINVAL;
814 				goto parse_out;
815 			}
816 			break;
817 		}
818 
819 		case IWM_UCODE_TLV_CMD_VERSIONS:
820 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
821 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
822 			/* ignore, not used by current driver */
823 			break;
824 
825 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
826 			if ((error = iwm_firmware_store_section(sc,
827 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
828 			    tlv_len)) != 0)
829 				goto parse_out;
830 			break;
831 
832 		case IWM_UCODE_TLV_PAGING:
833 			if (tlv_len != sizeof(uint32_t)) {
834 				error = EINVAL;
835 				goto parse_out;
836 			}
837 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
838 
839 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
840 			    "%s: Paging: paging enabled (size = %u bytes)\n",
841 			    __func__, paging_mem_size);
842 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
843 				device_printf(sc->sc_dev,
844 					"%s: Paging: driver supports up to %u bytes for paging image\n",
845 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
846 				error = EINVAL;
847 				goto out;
848 			}
849 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
850 				device_printf(sc->sc_dev,
851 				    "%s: Paging: image isn't multiple %u\n",
852 				    __func__, IWM_FW_PAGING_SIZE);
853 				error = EINVAL;
854 				goto out;
855 			}
856 
857 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
858 			    paging_mem_size;
859 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
860 			sc->sc_fw.img[usniffer_img].paging_mem_size =
861 			    paging_mem_size;
862 			break;
863 
864 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
865 			if (tlv_len != sizeof(uint32_t)) {
866 				error = EINVAL;
867 				goto parse_out;
868 			}
869 			capa->n_scan_channels =
870 			    le32_to_cpup((const uint32_t *)tlv_data);
871 			break;
872 
873 		case IWM_UCODE_TLV_FW_VERSION:
874 			if (tlv_len != sizeof(uint32_t) * 3) {
875 				error = EINVAL;
876 				goto parse_out;
877 			}
878 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
879 			    "%u.%u.%u",
880 			    le32toh(((const uint32_t *)tlv_data)[0]),
881 			    le32toh(((const uint32_t *)tlv_data)[1]),
882 			    le32toh(((const uint32_t *)tlv_data)[2]));
883 			break;
884 
885 		case IWM_UCODE_TLV_FW_MEM_SEG:
886 			break;
887 
888 		default:
889 			device_printf(sc->sc_dev,
890 			    "%s: unknown firmware section %d, abort\n",
891 			    __func__, tlv_type);
892 			error = EINVAL;
893 			goto parse_out;
894 		}
895 	}
896 
897 	KASSERT(error == 0, ("unhandled error"));
898 
899  parse_out:
900 	if (error) {
901 		device_printf(sc->sc_dev, "firmware parse error %d, "
902 		    "section type %d\n", error, tlv_type);
903 	}
904 
905  out:
906 	if (error) {
907 		if (fw->fw_fp != NULL)
908 			iwm_fw_info_free(fw);
909 	}
910 
911 	return error;
912 }
913 
914 /*
915  * DMA resource routines
916  */
917 
918 /* fwmem is used to load firmware onto the card */
919 static int
iwm_alloc_fwmem(struct iwm_softc * sc)920 iwm_alloc_fwmem(struct iwm_softc *sc)
921 {
922 	/* Must be aligned on a 16-byte boundary. */
923 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
924 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
925 }
926 
927 /* tx scheduler rings.  not used? */
928 static int
iwm_alloc_sched(struct iwm_softc * sc)929 iwm_alloc_sched(struct iwm_softc *sc)
930 {
931 	/* TX scheduler rings must be aligned on a 1KB boundary. */
932 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
933 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
934 }
935 
936 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
937 static int
iwm_alloc_kw(struct iwm_softc * sc)938 iwm_alloc_kw(struct iwm_softc *sc)
939 {
940 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
941 }
942 
943 /* interrupt cause table */
944 static int
iwm_alloc_ict(struct iwm_softc * sc)945 iwm_alloc_ict(struct iwm_softc *sc)
946 {
947 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
948 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
949 }
950 
951 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)952 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
953 {
954 	bus_size_t size;
955 	size_t descsz;
956 	int count, i, error;
957 
958 	ring->cur = 0;
959 	if (sc->cfg->mqrx_supported) {
960 		count = IWM_RX_MQ_RING_COUNT;
961 		descsz = sizeof(uint64_t);
962 	} else {
963 		count = IWM_RX_LEGACY_RING_COUNT;
964 		descsz = sizeof(uint32_t);
965 	}
966 
967 	/* Allocate RX descriptors (256-byte aligned). */
968 	size = count * descsz;
969 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
970 	    256);
971 	if (error != 0) {
972 		device_printf(sc->sc_dev,
973 		    "could not allocate RX ring DMA memory\n");
974 		goto fail;
975 	}
976 	ring->desc = ring->free_desc_dma.vaddr;
977 
978 	/* Allocate RX status area (16-byte aligned). */
979 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
980 	    sizeof(*ring->stat), 16);
981 	if (error != 0) {
982 		device_printf(sc->sc_dev,
983 		    "could not allocate RX status DMA memory\n");
984 		goto fail;
985 	}
986 	ring->stat = ring->stat_dma.vaddr;
987 
988 	if (sc->cfg->mqrx_supported) {
989 		size = count * sizeof(uint32_t);
990 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
991 		    size, 256);
992 		if (error != 0) {
993 			device_printf(sc->sc_dev,
994 			    "could not allocate RX ring DMA memory\n");
995 			goto fail;
996 		}
997 	}
998 
999         /* Create RX buffer DMA tag. */
1000         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1001             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1002             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1003         if (error != 0) {
1004                 device_printf(sc->sc_dev,
1005                     "%s: could not create RX buf DMA tag, error %d\n",
1006                     __func__, error);
1007                 goto fail;
1008         }
1009 
1010 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1011 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1012 	if (error != 0) {
1013 		device_printf(sc->sc_dev,
1014 		    "%s: could not create RX buf DMA map, error %d\n",
1015 		    __func__, error);
1016 		goto fail;
1017 	}
1018 
1019 	/*
1020 	 * Allocate and map RX buffers.
1021 	 */
1022 	for (i = 0; i < count; i++) {
1023 		struct iwm_rx_data *data = &ring->data[i];
1024 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1025 		if (error != 0) {
1026 			device_printf(sc->sc_dev,
1027 			    "%s: could not create RX buf DMA map, error %d\n",
1028 			    __func__, error);
1029 			goto fail;
1030 		}
1031 		data->m = NULL;
1032 
1033 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1034 			goto fail;
1035 		}
1036 	}
1037 	return 0;
1038 
1039 fail:	iwm_free_rx_ring(sc, ring);
1040 	return error;
1041 }
1042 
1043 static void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1044 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1045 {
1046 	/* Reset the ring state */
1047 	ring->cur = 0;
1048 
1049 	/*
1050 	 * The hw rx ring index in shared memory must also be cleared,
1051 	 * otherwise the discrepancy can cause reprocessing chaos.
1052 	 */
1053 	if (sc->rxq.stat)
1054 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1055 }
1056 
1057 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1058 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1059 {
1060 	int count, i;
1061 
1062 	iwm_dma_contig_free(&ring->free_desc_dma);
1063 	iwm_dma_contig_free(&ring->stat_dma);
1064 	iwm_dma_contig_free(&ring->used_desc_dma);
1065 
1066 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1067 	    IWM_RX_LEGACY_RING_COUNT;
1068 
1069 	for (i = 0; i < count; i++) {
1070 		struct iwm_rx_data *data = &ring->data[i];
1071 
1072 		if (data->m != NULL) {
1073 			bus_dmamap_sync(ring->data_dmat, data->map,
1074 			    BUS_DMASYNC_POSTREAD);
1075 			bus_dmamap_unload(ring->data_dmat, data->map);
1076 			m_freem(data->m);
1077 			data->m = NULL;
1078 		}
1079 		if (data->map != NULL) {
1080 			bus_dmamap_destroy(ring->data_dmat, data->map);
1081 			data->map = NULL;
1082 		}
1083 	}
1084 	if (ring->spare_map != NULL) {
1085 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1086 		ring->spare_map = NULL;
1087 	}
1088 	if (ring->data_dmat != NULL) {
1089 		bus_dma_tag_destroy(ring->data_dmat);
1090 		ring->data_dmat = NULL;
1091 	}
1092 }
1093 
1094 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1095 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1096 {
1097 	bus_addr_t paddr;
1098 	bus_size_t size;
1099 	size_t maxsize;
1100 	int nsegments;
1101 	int i, error;
1102 
1103 	ring->qid = qid;
1104 	ring->queued = 0;
1105 	ring->cur = 0;
1106 
1107 	/* Allocate TX descriptors (256-byte aligned). */
1108 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1109 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1110 	if (error != 0) {
1111 		device_printf(sc->sc_dev,
1112 		    "could not allocate TX ring DMA memory\n");
1113 		goto fail;
1114 	}
1115 	ring->desc = ring->desc_dma.vaddr;
1116 
1117 	/*
1118 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1119 	 * to allocate commands space for other rings.
1120 	 */
1121 	if (qid > IWM_CMD_QUEUE)
1122 		return 0;
1123 
1124 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1125 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1126 	if (error != 0) {
1127 		device_printf(sc->sc_dev,
1128 		    "could not allocate TX cmd DMA memory\n");
1129 		goto fail;
1130 	}
1131 	ring->cmd = ring->cmd_dma.vaddr;
1132 
1133 	/* FW commands may require more mapped space than packets. */
1134 	if (qid == IWM_CMD_QUEUE) {
1135 		maxsize = IWM_RBUF_SIZE;
1136 		nsegments = 1;
1137 	} else {
1138 		maxsize = MCLBYTES;
1139 		nsegments = IWM_MAX_SCATTER - 2;
1140 	}
1141 
1142 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1143 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1144             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1145 	if (error != 0) {
1146 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1147 		goto fail;
1148 	}
1149 
1150 	paddr = ring->cmd_dma.paddr;
1151 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1152 		struct iwm_tx_data *data = &ring->data[i];
1153 
1154 		data->cmd_paddr = paddr;
1155 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1156 		    + offsetof(struct iwm_tx_cmd, scratch);
1157 		paddr += sizeof(struct iwm_device_cmd);
1158 
1159 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1160 		if (error != 0) {
1161 			device_printf(sc->sc_dev,
1162 			    "could not create TX buf DMA map\n");
1163 			goto fail;
1164 		}
1165 	}
1166 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1167 	    ("invalid physical address"));
1168 	return 0;
1169 
1170 fail:	iwm_free_tx_ring(sc, ring);
1171 	return error;
1172 }
1173 
1174 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1175 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1176 {
1177 	int i;
1178 
1179 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1180 		struct iwm_tx_data *data = &ring->data[i];
1181 
1182 		if (data->m != NULL) {
1183 			bus_dmamap_sync(ring->data_dmat, data->map,
1184 			    BUS_DMASYNC_POSTWRITE);
1185 			bus_dmamap_unload(ring->data_dmat, data->map);
1186 			m_freem(data->m);
1187 			data->m = NULL;
1188 		}
1189 	}
1190 	/* Clear TX descriptors. */
1191 	memset(ring->desc, 0, ring->desc_dma.size);
1192 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1193 	    BUS_DMASYNC_PREWRITE);
1194 	sc->qfullmsk &= ~(1 << ring->qid);
1195 	ring->queued = 0;
1196 	ring->cur = 0;
1197 
1198 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1199 		iwm_pcie_clear_cmd_in_flight(sc);
1200 }
1201 
1202 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1203 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1204 {
1205 	int i;
1206 
1207 	iwm_dma_contig_free(&ring->desc_dma);
1208 	iwm_dma_contig_free(&ring->cmd_dma);
1209 
1210 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1211 		struct iwm_tx_data *data = &ring->data[i];
1212 
1213 		if (data->m != NULL) {
1214 			bus_dmamap_sync(ring->data_dmat, data->map,
1215 			    BUS_DMASYNC_POSTWRITE);
1216 			bus_dmamap_unload(ring->data_dmat, data->map);
1217 			m_freem(data->m);
1218 			data->m = NULL;
1219 		}
1220 		if (data->map != NULL) {
1221 			bus_dmamap_destroy(ring->data_dmat, data->map);
1222 			data->map = NULL;
1223 		}
1224 	}
1225 	if (ring->data_dmat != NULL) {
1226 		bus_dma_tag_destroy(ring->data_dmat);
1227 		ring->data_dmat = NULL;
1228 	}
1229 }
1230 
1231 /*
1232  * High-level hardware frobbing routines
1233  */
1234 
1235 static void
iwm_enable_interrupts(struct iwm_softc * sc)1236 iwm_enable_interrupts(struct iwm_softc *sc)
1237 {
1238 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1239 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1240 }
1241 
1242 static void
iwm_restore_interrupts(struct iwm_softc * sc)1243 iwm_restore_interrupts(struct iwm_softc *sc)
1244 {
1245 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1246 }
1247 
1248 static void
iwm_disable_interrupts(struct iwm_softc * sc)1249 iwm_disable_interrupts(struct iwm_softc *sc)
1250 {
1251 	/* disable interrupts */
1252 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1253 
1254 	/* acknowledge all interrupts */
1255 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1256 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1257 }
1258 
1259 static void
iwm_ict_reset(struct iwm_softc * sc)1260 iwm_ict_reset(struct iwm_softc *sc)
1261 {
1262 	iwm_disable_interrupts(sc);
1263 
1264 	/* Reset ICT table. */
1265 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1266 	sc->ict_cur = 0;
1267 
1268 	/* Set physical address of ICT table (4KB aligned). */
1269 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1270 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1271 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1272 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1273 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1274 
1275 	/* Switch to ICT interrupt mode in driver. */
1276 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1277 
1278 	/* Re-enable interrupts. */
1279 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1280 	iwm_enable_interrupts(sc);
1281 }
1282 
1283 /* iwlwifi pcie/trans.c */
1284 
1285 /*
1286  * Since this .. hard-resets things, it's time to actually
1287  * mark the first vap (if any) as having no mac context.
1288  * It's annoying, but since the driver is potentially being
1289  * stop/start'ed whilst active (thanks openbsd port!) we
1290  * have to correctly track this.
1291  */
1292 static void
iwm_stop_device(struct iwm_softc * sc)1293 iwm_stop_device(struct iwm_softc *sc)
1294 {
1295 	struct ieee80211com *ic = &sc->sc_ic;
1296 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1297 	int chnl, qid;
1298 	uint32_t mask = 0;
1299 
1300 	/* tell the device to stop sending interrupts */
1301 	iwm_disable_interrupts(sc);
1302 
1303 	/*
1304 	 * FreeBSD-local: mark the first vap as not-uploaded,
1305 	 * so the next transition through auth/assoc
1306 	 * will correctly populate the MAC context.
1307 	 */
1308 	if (vap) {
1309 		struct iwm_vap *iv = IWM_VAP(vap);
1310 		iv->phy_ctxt = NULL;
1311 		iv->is_uploaded = 0;
1312 	}
1313 	sc->sc_firmware_state = 0;
1314 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1315 
1316 	/* device going down, Stop using ICT table */
1317 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1318 
1319 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1320 
1321 	if (iwm_nic_lock(sc)) {
1322 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1323 
1324 		/* Stop each Tx DMA channel */
1325 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1326 			IWM_WRITE(sc,
1327 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1328 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1329 		}
1330 
1331 		/* Wait for DMA channels to be idle */
1332 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1333 		    5000)) {
1334 			device_printf(sc->sc_dev,
1335 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1336 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1337 		}
1338 		iwm_nic_unlock(sc);
1339 	}
1340 	iwm_pcie_rx_stop(sc);
1341 
1342 	/* Stop RX ring. */
1343 	iwm_reset_rx_ring(sc, &sc->rxq);
1344 
1345 	/* Reset all TX rings. */
1346 	for (qid = 0; qid < nitems(sc->txq); qid++)
1347 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1348 
1349 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1350 		/* Power-down device's busmaster DMA clocks */
1351 		if (iwm_nic_lock(sc)) {
1352 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1353 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1354 			iwm_nic_unlock(sc);
1355 		}
1356 		DELAY(5);
1357 	}
1358 
1359 	/* Make sure (redundant) we've released our request to stay awake */
1360 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1361 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1362 
1363 	/* Stop the device, and put it in low power state */
1364 	iwm_apm_stop(sc);
1365 
1366 	/* stop and reset the on-board processor */
1367 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1368 	DELAY(5000);
1369 
1370 	/*
1371 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1372 	 */
1373 	iwm_disable_interrupts(sc);
1374 
1375 	/*
1376 	 * Even if we stop the HW, we still want the RF kill
1377 	 * interrupt
1378 	 */
1379 	iwm_enable_rfkill_int(sc);
1380 	iwm_check_rfkill(sc);
1381 
1382 	iwm_prepare_card_hw(sc);
1383 }
1384 
1385 /* iwlwifi: mvm/ops.c */
1386 static void
iwm_nic_config(struct iwm_softc * sc)1387 iwm_nic_config(struct iwm_softc *sc)
1388 {
1389 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1390 	uint32_t reg_val = 0;
1391 	uint32_t phy_config = iwm_get_phy_config(sc);
1392 
1393 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1394 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1395 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1396 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1397 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1398 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1399 
1400 	/* SKU control */
1401 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1402 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1403 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1404 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1405 
1406 	/* radio configuration */
1407 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1408 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1409 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1410 
1411 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1412 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1413 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1414 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1415 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1416 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1417 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1418 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1419 	    reg_val);
1420 
1421 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1422 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1423 	    radio_cfg_step, radio_cfg_dash);
1424 
1425 	/*
1426 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1427 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1428 	 * to lose ownership and not being able to obtain it back.
1429 	 */
1430 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1431 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1432 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1433 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1434 	}
1435 }
1436 
1437 static int
iwm_nic_rx_mq_init(struct iwm_softc * sc)1438 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1439 {
1440 	int enabled;
1441 
1442 	if (!iwm_nic_lock(sc))
1443 		return EBUSY;
1444 
1445 	/* Stop RX DMA. */
1446 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1447 	/* Disable RX used and free queue operation. */
1448 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1449 
1450 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1451 	    sc->rxq.free_desc_dma.paddr);
1452 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1453 	    sc->rxq.used_desc_dma.paddr);
1454 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1455 	    sc->rxq.stat_dma.paddr);
1456 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1457 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1458 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1459 
1460 	/* We configure only queue 0 for now. */
1461 	enabled = ((1 << 0) << 16) | (1 << 0);
1462 
1463 	/* Enable RX DMA, 4KB buffer size. */
1464 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1465 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1466 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1467 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1468 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1469 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1470 
1471 	/* Enable RX DMA snooping. */
1472 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1473 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1474 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1475 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1476 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1477 
1478 	/* Enable the configured queue(s). */
1479 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1480 
1481 	iwm_nic_unlock(sc);
1482 
1483 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1484 
1485 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1486 
1487 	return (0);
1488 }
1489 
1490 static int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)1491 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1492 {
1493 
1494 	/* Stop Rx DMA */
1495 	iwm_pcie_rx_stop(sc);
1496 
1497 	if (!iwm_nic_lock(sc))
1498 		return EBUSY;
1499 
1500 	/* reset and flush pointers */
1501 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1502 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1503 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1504 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1505 
1506 	/* Set physical address of RX ring (256-byte aligned). */
1507 	IWM_WRITE(sc,
1508 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1509 	    sc->rxq.free_desc_dma.paddr >> 8);
1510 
1511 	/* Set physical address of RX status (16-byte aligned). */
1512 	IWM_WRITE(sc,
1513 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1514 
1515 	/* Enable Rx DMA
1516 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1517 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1518 	 *      the credit mechanism in 5000 HW RX FIFO
1519 	 * Direct rx interrupts to hosts
1520 	 * Rx buffer size 4 or 8k or 12k
1521 	 * RB timeout 0x10
1522 	 * 256 RBDs
1523 	 */
1524 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1525 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1526 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1527 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1528 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1529 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1530 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1531 
1532 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1533 
1534 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1535 	if (sc->cfg->host_interrupt_operation_mode)
1536 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1537 
1538 	iwm_nic_unlock(sc);
1539 
1540 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1541 
1542 	return 0;
1543 }
1544 
1545 static int
iwm_nic_rx_init(struct iwm_softc * sc)1546 iwm_nic_rx_init(struct iwm_softc *sc)
1547 {
1548 	if (sc->cfg->mqrx_supported)
1549 		return iwm_nic_rx_mq_init(sc);
1550 	else
1551 		return iwm_nic_rx_legacy_init(sc);
1552 }
1553 
1554 static int
iwm_nic_tx_init(struct iwm_softc * sc)1555 iwm_nic_tx_init(struct iwm_softc *sc)
1556 {
1557 	int qid;
1558 
1559 	if (!iwm_nic_lock(sc))
1560 		return EBUSY;
1561 
1562 	/* Deactivate TX scheduler. */
1563 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1564 
1565 	/* Set physical address of "keep warm" page (16-byte aligned). */
1566 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1567 
1568 	/* Initialize TX rings. */
1569 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1570 		struct iwm_tx_ring *txq = &sc->txq[qid];
1571 
1572 		/* Set physical address of TX ring (256-byte aligned). */
1573 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1574 		    txq->desc_dma.paddr >> 8);
1575 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1576 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1577 		    __func__,
1578 		    qid, txq->desc,
1579 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1580 	}
1581 
1582 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1583 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1584 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1585 
1586 	iwm_nic_unlock(sc);
1587 
1588 	return 0;
1589 }
1590 
1591 static int
iwm_nic_init(struct iwm_softc * sc)1592 iwm_nic_init(struct iwm_softc *sc)
1593 {
1594 	int error;
1595 
1596 	iwm_apm_init(sc);
1597 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1598 		iwm_set_pwr(sc);
1599 
1600 	iwm_nic_config(sc);
1601 
1602 	if ((error = iwm_nic_rx_init(sc)) != 0)
1603 		return error;
1604 
1605 	/*
1606 	 * Ditto for TX, from iwn
1607 	 */
1608 	if ((error = iwm_nic_tx_init(sc)) != 0)
1609 		return error;
1610 
1611 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1612 	    "%s: shadow registers enabled\n", __func__);
1613 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1614 
1615 	return 0;
1616 }
1617 
1618 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo)1619 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1620 {
1621 	int qmsk;
1622 
1623 	qmsk = 1 << qid;
1624 
1625 	if (!iwm_nic_lock(sc)) {
1626 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1627 		    __func__, qid);
1628 		return EBUSY;
1629 	}
1630 
1631 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1632 
1633 	if (qid == IWM_CMD_QUEUE) {
1634 		/* Disable the scheduler. */
1635 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1636 
1637 		/* Stop the TX queue prior to configuration. */
1638 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1639 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1640 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1641 
1642 		iwm_nic_unlock(sc);
1643 
1644 		/* Disable aggregations for this queue. */
1645 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1646 
1647 		if (!iwm_nic_lock(sc)) {
1648 			device_printf(sc->sc_dev,
1649 			    "%s: cannot enable txq %d\n", __func__, qid);
1650 			return EBUSY;
1651 		}
1652 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1653 		iwm_nic_unlock(sc);
1654 
1655 		iwm_write_mem32(sc,
1656 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1657 		/* Set scheduler window size and frame limit. */
1658 		iwm_write_mem32(sc,
1659 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1660 		    sizeof(uint32_t),
1661 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1662 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1663 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1664 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1665 
1666 		if (!iwm_nic_lock(sc)) {
1667 			device_printf(sc->sc_dev,
1668 			    "%s: cannot enable txq %d\n", __func__, qid);
1669 			return EBUSY;
1670 		}
1671 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1672 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1673 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1674 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1675 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1676 
1677 		/* Enable the scheduler for this queue. */
1678 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1679 	} else {
1680 		struct iwm_scd_txq_cfg_cmd cmd;
1681 		int error;
1682 
1683 		iwm_nic_unlock(sc);
1684 
1685 		memset(&cmd, 0, sizeof(cmd));
1686 		cmd.scd_queue = qid;
1687 		cmd.enable = 1;
1688 		cmd.sta_id = sta_id;
1689 		cmd.tx_fifo = fifo;
1690 		cmd.aggregate = 0;
1691 		cmd.window = IWM_FRAME_LIMIT;
1692 
1693 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1694 		    sizeof(cmd), &cmd);
1695 		if (error) {
1696 			device_printf(sc->sc_dev,
1697 			    "cannot enable txq %d\n", qid);
1698 			return error;
1699 		}
1700 
1701 		if (!iwm_nic_lock(sc))
1702 			return EBUSY;
1703 	}
1704 
1705 	iwm_nic_unlock(sc);
1706 
1707 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1708 	    __func__, qid, fifo);
1709 
1710 	return 0;
1711 }
1712 
1713 static int
iwm_trans_pcie_fw_alive(struct iwm_softc * sc,uint32_t scd_base_addr)1714 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1715 {
1716 	int error, chnl;
1717 
1718 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1719 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1720 
1721 	if (!iwm_nic_lock(sc))
1722 		return EBUSY;
1723 
1724 	iwm_ict_reset(sc);
1725 
1726 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1727 	if (scd_base_addr != 0 &&
1728 	    scd_base_addr != sc->scd_base_addr) {
1729 		device_printf(sc->sc_dev,
1730 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1731 		    __func__, sc->scd_base_addr, scd_base_addr);
1732 	}
1733 
1734 	iwm_nic_unlock(sc);
1735 
1736 	/* reset context data, TX status and translation data */
1737 	error = iwm_write_mem(sc,
1738 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1739 	    NULL, clear_dwords);
1740 	if (error)
1741 		return EBUSY;
1742 
1743 	if (!iwm_nic_lock(sc))
1744 		return EBUSY;
1745 
1746 	/* Set physical address of TX scheduler rings (1KB aligned). */
1747 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1748 
1749 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1750 
1751 	iwm_nic_unlock(sc);
1752 
1753 	/* enable command channel */
1754 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1755 	if (error)
1756 		return error;
1757 
1758 	if (!iwm_nic_lock(sc))
1759 		return EBUSY;
1760 
1761 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1762 
1763 	/* Enable DMA channels. */
1764 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1765 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1766 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1767 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1768 	}
1769 
1770 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1771 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1772 
1773 	iwm_nic_unlock(sc);
1774 
1775 	/* Enable L1-Active */
1776 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1777 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1778 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1779 	}
1780 
1781 	return error;
1782 }
1783 
1784 /*
1785  * NVM read access and content parsing.  We do not support
1786  * external NVM or writing NVM.
1787  * iwlwifi/mvm/nvm.c
1788  */
1789 
1790 /* Default NVM size to read */
1791 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1792 
1793 #define IWM_NVM_WRITE_OPCODE 1
1794 #define IWM_NVM_READ_OPCODE 0
1795 
1796 /* load nvm chunk response */
1797 enum {
1798 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1799 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1800 };
1801 
1802 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)1803 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1804 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1805 {
1806 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1807 		.offset = htole16(offset),
1808 		.length = htole16(length),
1809 		.type = htole16(section),
1810 		.op_code = IWM_NVM_READ_OPCODE,
1811 	};
1812 	struct iwm_nvm_access_resp *nvm_resp;
1813 	struct iwm_rx_packet *pkt;
1814 	struct iwm_host_cmd cmd = {
1815 		.id = IWM_NVM_ACCESS_CMD,
1816 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1817 		.data = { &nvm_access_cmd, },
1818 	};
1819 	int ret, bytes_read, offset_read;
1820 	uint8_t *resp_data;
1821 
1822 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1823 
1824 	ret = iwm_send_cmd(sc, &cmd);
1825 	if (ret) {
1826 		device_printf(sc->sc_dev,
1827 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1828 		return ret;
1829 	}
1830 
1831 	pkt = cmd.resp_pkt;
1832 
1833 	/* Extract NVM response */
1834 	nvm_resp = (void *)pkt->data;
1835 	ret = le16toh(nvm_resp->status);
1836 	bytes_read = le16toh(nvm_resp->length);
1837 	offset_read = le16toh(nvm_resp->offset);
1838 	resp_data = nvm_resp->data;
1839 	if (ret) {
1840 		if ((offset != 0) &&
1841 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1842 			/*
1843 			 * meaning of NOT_VALID_ADDRESS:
1844 			 * driver try to read chunk from address that is
1845 			 * multiple of 2K and got an error since addr is empty.
1846 			 * meaning of (offset != 0): driver already
1847 			 * read valid data from another chunk so this case
1848 			 * is not an error.
1849 			 */
1850 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1851 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1852 				    offset);
1853 			*len = 0;
1854 			ret = 0;
1855 		} else {
1856 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1857 				    "NVM access command failed with status %d\n", ret);
1858 			ret = EIO;
1859 		}
1860 		goto exit;
1861 	}
1862 
1863 	if (offset_read != offset) {
1864 		device_printf(sc->sc_dev,
1865 		    "NVM ACCESS response with invalid offset %d\n",
1866 		    offset_read);
1867 		ret = EINVAL;
1868 		goto exit;
1869 	}
1870 
1871 	if (bytes_read > length) {
1872 		device_printf(sc->sc_dev,
1873 		    "NVM ACCESS response with too much data "
1874 		    "(%d bytes requested, %d bytes received)\n",
1875 		    length, bytes_read);
1876 		ret = EINVAL;
1877 		goto exit;
1878 	}
1879 
1880 	/* Write data to NVM */
1881 	memcpy(data + offset, resp_data, bytes_read);
1882 	*len = bytes_read;
1883 
1884  exit:
1885 	iwm_free_resp(sc, &cmd);
1886 	return ret;
1887 }
1888 
1889 /*
1890  * Reads an NVM section completely.
1891  * NICs prior to 7000 family don't have a real NVM, but just read
1892  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1893  * by uCode, we need to manually check in this case that we don't
1894  * overflow and try to read more than the EEPROM size.
1895  * For 7000 family NICs, we supply the maximal size we can read, and
1896  * the uCode fills the response with as much data as we can,
1897  * without overflowing, so no check is needed.
1898  */
1899 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,uint32_t size_read)1900 iwm_nvm_read_section(struct iwm_softc *sc,
1901 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1902 {
1903 	uint16_t seglen, length, offset = 0;
1904 	int ret;
1905 
1906 	/* Set nvm section read length */
1907 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1908 
1909 	seglen = length;
1910 
1911 	/* Read the NVM until exhausted (reading less than requested) */
1912 	while (seglen == length) {
1913 		/* Check no memory assumptions fail and cause an overflow */
1914 		if ((size_read + offset + length) >
1915 		    sc->cfg->eeprom_size) {
1916 			device_printf(sc->sc_dev,
1917 			    "EEPROM size is too small for NVM\n");
1918 			return ENOBUFS;
1919 		}
1920 
1921 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1922 		if (ret) {
1923 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1924 				    "Cannot read NVM from section %d offset %d, length %d\n",
1925 				    section, offset, length);
1926 			return ret;
1927 		}
1928 		offset += seglen;
1929 	}
1930 
1931 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1932 		    "NVM section %d read completed\n", section);
1933 	*len = offset;
1934 	return 0;
1935 }
1936 
1937 /*
1938  * BEGIN IWM_NVM_PARSE
1939  */
1940 
1941 /* iwlwifi/iwl-nvm-parse.c */
1942 
1943 /*
1944  * Translate EEPROM flags to net80211.
1945  */
1946 static uint32_t
iwm_eeprom_channel_flags(uint16_t ch_flags)1947 iwm_eeprom_channel_flags(uint16_t ch_flags)
1948 {
1949 	uint32_t nflags;
1950 
1951 	nflags = 0;
1952 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1953 		nflags |= IEEE80211_CHAN_PASSIVE;
1954 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1955 		nflags |= IEEE80211_CHAN_NOADHOC;
1956 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1957 		nflags |= IEEE80211_CHAN_DFS;
1958 		/* Just in case. */
1959 		nflags |= IEEE80211_CHAN_NOADHOC;
1960 	}
1961 
1962 	return (nflags);
1963 }
1964 
1965 static void
iwm_add_channel_band(struct iwm_softc * sc,struct ieee80211_channel chans[],int maxchans,int * nchans,int ch_idx,size_t ch_num,const uint8_t bands[])1966 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1967     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1968     const uint8_t bands[])
1969 {
1970 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1971 	uint32_t nflags;
1972 	uint16_t ch_flags;
1973 	uint8_t ieee;
1974 	int error;
1975 
1976 	for (; ch_idx < ch_num; ch_idx++) {
1977 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1978 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1979 			ieee = iwm_nvm_channels[ch_idx];
1980 		else
1981 			ieee = iwm_nvm_channels_8000[ch_idx];
1982 
1983 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1984 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1985 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1986 			    ieee, ch_flags,
1987 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1988 			    "5.2" : "2.4");
1989 			continue;
1990 		}
1991 
1992 		nflags = iwm_eeprom_channel_flags(ch_flags);
1993 		error = ieee80211_add_channel(chans, maxchans, nchans,
1994 		    ieee, 0, 0, nflags, bands);
1995 		if (error != 0)
1996 			break;
1997 
1998 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1999 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2000 		    ieee, ch_flags,
2001 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2002 		    "5.2" : "2.4");
2003 	}
2004 }
2005 
2006 static void
iwm_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])2007 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2008     struct ieee80211_channel chans[])
2009 {
2010 	struct iwm_softc *sc = ic->ic_softc;
2011 	struct iwm_nvm_data *data = sc->nvm_data;
2012 	uint8_t bands[IEEE80211_MODE_BYTES];
2013 	size_t ch_num;
2014 
2015 	memset(bands, 0, sizeof(bands));
2016 	/* 1-13: 11b/g channels. */
2017 	setbit(bands, IEEE80211_MODE_11B);
2018 	setbit(bands, IEEE80211_MODE_11G);
2019 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2020 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2021 
2022 	/* 14: 11b channel only. */
2023 	clrbit(bands, IEEE80211_MODE_11G);
2024 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2025 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2026 
2027 	if (data->sku_cap_band_52GHz_enable) {
2028 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2029 			ch_num = nitems(iwm_nvm_channels);
2030 		else
2031 			ch_num = nitems(iwm_nvm_channels_8000);
2032 		memset(bands, 0, sizeof(bands));
2033 		setbit(bands, IEEE80211_MODE_11A);
2034 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2035 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2036 	}
2037 }
2038 
2039 static void
iwm_set_hw_address_family_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)2040 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2041 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2042 {
2043 	const uint8_t *hw_addr;
2044 
2045 	if (mac_override) {
2046 		static const uint8_t reserved_mac[] = {
2047 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2048 		};
2049 
2050 		hw_addr = (const uint8_t *)(mac_override +
2051 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2052 
2053 		/*
2054 		 * Store the MAC address from MAO section.
2055 		 * No byte swapping is required in MAO section
2056 		 */
2057 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2058 
2059 		/*
2060 		 * Force the use of the OTP MAC address in case of reserved MAC
2061 		 * address in the NVM, or if address is given but invalid.
2062 		 */
2063 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2064 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2065 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2066 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2067 			return;
2068 
2069 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2070 		    "%s: mac address from nvm override section invalid\n",
2071 		    __func__);
2072 	}
2073 
2074 	if (nvm_hw) {
2075 		/* read the mac address from WFMP registers */
2076 		uint32_t mac_addr0 =
2077 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2078 		uint32_t mac_addr1 =
2079 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2080 
2081 		hw_addr = (const uint8_t *)&mac_addr0;
2082 		data->hw_addr[0] = hw_addr[3];
2083 		data->hw_addr[1] = hw_addr[2];
2084 		data->hw_addr[2] = hw_addr[1];
2085 		data->hw_addr[3] = hw_addr[0];
2086 
2087 		hw_addr = (const uint8_t *)&mac_addr1;
2088 		data->hw_addr[4] = hw_addr[1];
2089 		data->hw_addr[5] = hw_addr[0];
2090 
2091 		return;
2092 	}
2093 
2094 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2095 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2096 }
2097 
2098 static int
iwm_get_sku(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2099 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2100 	    const uint16_t *phy_sku)
2101 {
2102 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2103 		return le16_to_cpup(nvm_sw + IWM_SKU);
2104 
2105 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2106 }
2107 
2108 static int
iwm_get_nvm_version(const struct iwm_softc * sc,const uint16_t * nvm_sw)2109 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2110 {
2111 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2112 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2113 	else
2114 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2115 						IWM_NVM_VERSION_8000));
2116 }
2117 
2118 static int
iwm_get_radio_cfg(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2119 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2120 		  const uint16_t *phy_sku)
2121 {
2122         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2123                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2124 
2125         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2126 }
2127 
2128 static int
iwm_get_n_hw_addrs(const struct iwm_softc * sc,const uint16_t * nvm_sw)2129 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2130 {
2131 	int n_hw_addr;
2132 
2133 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2134 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2135 
2136 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2137 
2138         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2139 }
2140 
2141 static void
iwm_set_radio_cfg(const struct iwm_softc * sc,struct iwm_nvm_data * data,uint32_t radio_cfg)2142 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2143 		  uint32_t radio_cfg)
2144 {
2145 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2146 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2147 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2148 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2149 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2150 		return;
2151 	}
2152 
2153 	/* set the radio configuration for family 8000 */
2154 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2155 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2156 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2157 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2158 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2159 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2160 }
2161 
2162 static int
iwm_set_hw_address(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * nvm_hw,const uint16_t * mac_override)2163 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2164 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2165 {
2166 #ifdef notyet /* for FAMILY 9000 */
2167 	if (cfg->mac_addr_from_csr) {
2168 		iwm_set_hw_address_from_csr(sc, data);
2169         } else
2170 #endif
2171 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2172 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2173 
2174 		/* The byte order is little endian 16 bit, meaning 214365 */
2175 		data->hw_addr[0] = hw_addr[1];
2176 		data->hw_addr[1] = hw_addr[0];
2177 		data->hw_addr[2] = hw_addr[3];
2178 		data->hw_addr[3] = hw_addr[2];
2179 		data->hw_addr[4] = hw_addr[5];
2180 		data->hw_addr[5] = hw_addr[4];
2181 	} else {
2182 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2183 	}
2184 
2185 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2186 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2187 		return EINVAL;
2188 	}
2189 
2190 	return 0;
2191 }
2192 
2193 static struct iwm_nvm_data *
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory)2194 iwm_parse_nvm_data(struct iwm_softc *sc,
2195 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2196 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2197 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2198 {
2199 	struct iwm_nvm_data *data;
2200 	uint32_t sku, radio_cfg;
2201 	uint16_t lar_config;
2202 
2203 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2204 		data = malloc(sizeof(*data) +
2205 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2206 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2207 	} else {
2208 		data = malloc(sizeof(*data) +
2209 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2210 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2211 	}
2212 	if (!data)
2213 		return NULL;
2214 
2215 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2216 
2217 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2218 	iwm_set_radio_cfg(sc, data, radio_cfg);
2219 
2220 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2221 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2222 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2223 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2224 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2225 
2226 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2227 
2228 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2229 		/* TODO: use IWL_NVM_EXT */
2230 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2231 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2232 				       IWM_NVM_LAR_OFFSET_8000;
2233 
2234 		lar_config = le16_to_cpup(regulatory + lar_offset);
2235 		data->lar_enabled = !!(lar_config &
2236 				       IWM_NVM_LAR_ENABLED_8000);
2237 	}
2238 
2239 	/* If no valid mac address was found - bail out */
2240 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2241 		free(data, M_DEVBUF);
2242 		return NULL;
2243 	}
2244 
2245 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2246 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2247 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2248 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2249 	} else {
2250 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2251 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2252 	}
2253 
2254 	return data;
2255 }
2256 
2257 static void
iwm_free_nvm_data(struct iwm_nvm_data * data)2258 iwm_free_nvm_data(struct iwm_nvm_data *data)
2259 {
2260 	if (data != NULL)
2261 		free(data, M_DEVBUF);
2262 }
2263 
2264 static struct iwm_nvm_data *
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)2265 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2266 {
2267 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2268 
2269 	/* Checking for required sections */
2270 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2271 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2272 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2273 			device_printf(sc->sc_dev,
2274 			    "Can't parse empty OTP/NVM sections\n");
2275 			return NULL;
2276 		}
2277 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2278 		/* SW and REGULATORY sections are mandatory */
2279 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2280 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2281 			device_printf(sc->sc_dev,
2282 			    "Can't parse empty OTP/NVM sections\n");
2283 			return NULL;
2284 		}
2285 		/* MAC_OVERRIDE or at least HW section must exist */
2286 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2287 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2288 			device_printf(sc->sc_dev,
2289 			    "Can't parse mac_address, empty sections\n");
2290 			return NULL;
2291 		}
2292 
2293 		/* PHY_SKU section is mandatory in B0 */
2294 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2295 			device_printf(sc->sc_dev,
2296 			    "Can't parse phy_sku in B0, empty sections\n");
2297 			return NULL;
2298 		}
2299 	} else {
2300 		panic("unknown device family %d\n", sc->cfg->device_family);
2301 	}
2302 
2303 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2304 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2305 	calib = (const uint16_t *)
2306 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2307 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2308 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2309 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2310 	mac_override = (const uint16_t *)
2311 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2312 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2313 
2314 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2315 	    phy_sku, regulatory);
2316 }
2317 
2318 static int
iwm_nvm_init(struct iwm_softc * sc)2319 iwm_nvm_init(struct iwm_softc *sc)
2320 {
2321 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2322 	int i, ret, section;
2323 	uint32_t size_read = 0;
2324 	uint8_t *nvm_buffer, *temp;
2325 	uint16_t len;
2326 
2327 	memset(nvm_sections, 0, sizeof(nvm_sections));
2328 
2329 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2330 		return EINVAL;
2331 
2332 	/* load NVM values from nic */
2333 	/* Read From FW NVM */
2334 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2335 
2336 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2337 	if (!nvm_buffer)
2338 		return ENOMEM;
2339 	for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2340 		/* we override the constness for initial read */
2341 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2342 					   &len, size_read);
2343 		if (ret)
2344 			continue;
2345 		size_read += len;
2346 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2347 		if (!temp) {
2348 			ret = ENOMEM;
2349 			break;
2350 		}
2351 		memcpy(temp, nvm_buffer, len);
2352 
2353 		nvm_sections[section].data = temp;
2354 		nvm_sections[section].length = len;
2355 	}
2356 	if (!size_read)
2357 		device_printf(sc->sc_dev, "OTP is blank\n");
2358 	free(nvm_buffer, M_DEVBUF);
2359 
2360 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2361 	if (!sc->nvm_data)
2362 		return EINVAL;
2363 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2364 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2365 
2366 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2367 		if (nvm_sections[i].data != NULL)
2368 			free(nvm_sections[i].data, M_DEVBUF);
2369 	}
2370 
2371 	return 0;
2372 }
2373 
2374 static int
iwm_pcie_load_section(struct iwm_softc * sc,uint8_t section_num,const struct iwm_fw_desc * section)2375 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2376 	const struct iwm_fw_desc *section)
2377 {
2378 	struct iwm_dma_info *dma = &sc->fw_dma;
2379 	uint8_t *v_addr;
2380 	bus_addr_t p_addr;
2381 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2382 	int ret = 0;
2383 
2384 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2385 		    "%s: [%d] uCode section being loaded...\n",
2386 		    __func__, section_num);
2387 
2388 	v_addr = dma->vaddr;
2389 	p_addr = dma->paddr;
2390 
2391 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2392 		uint32_t copy_size, dst_addr;
2393 		int extended_addr = FALSE;
2394 
2395 		copy_size = MIN(chunk_sz, section->len - offset);
2396 		dst_addr = section->offset + offset;
2397 
2398 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2399 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2400 			extended_addr = TRUE;
2401 
2402 		if (extended_addr)
2403 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2404 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2405 
2406 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2407 		    copy_size);
2408 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2409 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2410 						   copy_size);
2411 
2412 		if (extended_addr)
2413 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2414 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2415 
2416 		if (ret) {
2417 			device_printf(sc->sc_dev,
2418 			    "%s: Could not load the [%d] uCode section\n",
2419 			    __func__, section_num);
2420 			break;
2421 		}
2422 	}
2423 
2424 	return ret;
2425 }
2426 
2427 /*
2428  * ucode
2429  */
2430 static int
iwm_pcie_load_firmware_chunk(struct iwm_softc * sc,uint32_t dst_addr,bus_addr_t phy_addr,uint32_t byte_cnt)2431 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2432 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2433 {
2434 	sc->sc_fw_chunk_done = 0;
2435 
2436 	if (!iwm_nic_lock(sc))
2437 		return EBUSY;
2438 
2439 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2440 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2441 
2442 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2443 	    dst_addr);
2444 
2445 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2446 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2447 
2448 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2449 	    (iwm_get_dma_hi_addr(phy_addr)
2450 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2451 
2452 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2453 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2454 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2455 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2456 
2457 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2458 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2459 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2460 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2461 
2462 	iwm_nic_unlock(sc);
2463 
2464 	/* wait up to 5s for this segment to load */
2465 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2466 
2467 	if (!sc->sc_fw_chunk_done) {
2468 		device_printf(sc->sc_dev,
2469 		    "fw chunk addr 0x%x len %d failed to load\n",
2470 		    dst_addr, byte_cnt);
2471 		return ETIMEDOUT;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
2477 static int
iwm_pcie_load_cpu_sections_8000(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2478 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2479 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2480 {
2481 	int shift_param;
2482 	int i, ret = 0, sec_num = 0x1;
2483 	uint32_t val, last_read_idx = 0;
2484 
2485 	if (cpu == 1) {
2486 		shift_param = 0;
2487 		*first_ucode_section = 0;
2488 	} else {
2489 		shift_param = 16;
2490 		(*first_ucode_section)++;
2491 	}
2492 
2493 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2494 		last_read_idx = i;
2495 
2496 		/*
2497 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2498 		 * CPU1 to CPU2.
2499 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2500 		 * CPU2 non paged to CPU2 paging sec.
2501 		 */
2502 		if (!image->sec[i].data ||
2503 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2504 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2505 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2506 				    "Break since Data not valid or Empty section, sec = %d\n",
2507 				    i);
2508 			break;
2509 		}
2510 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2511 		if (ret)
2512 			return ret;
2513 
2514 		/* Notify the ucode of the loaded section number and status */
2515 		if (iwm_nic_lock(sc)) {
2516 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2517 			val = val | (sec_num << shift_param);
2518 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2519 			sec_num = (sec_num << 1) | 0x1;
2520 			iwm_nic_unlock(sc);
2521 		}
2522 	}
2523 
2524 	*first_ucode_section = last_read_idx;
2525 
2526 	iwm_enable_interrupts(sc);
2527 
2528 	if (iwm_nic_lock(sc)) {
2529 		if (cpu == 1)
2530 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2531 		else
2532 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2533 		iwm_nic_unlock(sc);
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 static int
iwm_pcie_load_cpu_sections(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2540 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2541 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2542 {
2543 	int i, ret = 0;
2544 	uint32_t last_read_idx = 0;
2545 
2546 	if (cpu == 1) {
2547 		*first_ucode_section = 0;
2548 	} else {
2549 		(*first_ucode_section)++;
2550 	}
2551 
2552 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2553 		last_read_idx = i;
2554 
2555 		/*
2556 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2557 		 * CPU1 to CPU2.
2558 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2559 		 * CPU2 non paged to CPU2 paging sec.
2560 		 */
2561 		if (!image->sec[i].data ||
2562 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2563 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2564 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2565 				    "Break since Data not valid or Empty section, sec = %d\n",
2566 				     i);
2567 			break;
2568 		}
2569 
2570 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2571 		if (ret)
2572 			return ret;
2573 	}
2574 
2575 	*first_ucode_section = last_read_idx;
2576 
2577 	return 0;
2578 
2579 }
2580 
2581 static int
iwm_pcie_load_given_ucode(struct iwm_softc * sc,const struct iwm_fw_img * image)2582 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2583 {
2584 	int ret = 0;
2585 	int first_ucode_section;
2586 
2587 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588 		     image->is_dual_cpus ? "Dual" : "Single");
2589 
2590 	/* load to FW the binary non secured sections of CPU1 */
2591 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592 	if (ret)
2593 		return ret;
2594 
2595 	if (image->is_dual_cpus) {
2596 		/* set CPU2 header address */
2597 		if (iwm_nic_lock(sc)) {
2598 			iwm_write_prph(sc,
2599 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601 			iwm_nic_unlock(sc);
2602 		}
2603 
2604 		/* load to FW the binary sections of CPU2 */
2605 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606 						 &first_ucode_section);
2607 		if (ret)
2608 			return ret;
2609 	}
2610 
2611 	iwm_enable_interrupts(sc);
2612 
2613 	/* release CPU reset */
2614 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615 
2616 	return 0;
2617 }
2618 
2619 int
iwm_pcie_load_given_ucode_8000(struct iwm_softc * sc,const struct iwm_fw_img * image)2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621 	const struct iwm_fw_img *image)
2622 {
2623 	int ret = 0;
2624 	int first_ucode_section;
2625 
2626 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627 		    image->is_dual_cpus ? "Dual" : "Single");
2628 
2629 	/* configure the ucode to be ready to get the secured image */
2630 	/* release CPU reset */
2631 	if (iwm_nic_lock(sc)) {
2632 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633 		    IWM_RELEASE_CPU_RESET_BIT);
2634 		iwm_nic_unlock(sc);
2635 	}
2636 
2637 	/* load to FW the binary Secured sections of CPU1 */
2638 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639 	    &first_ucode_section);
2640 	if (ret)
2641 		return ret;
2642 
2643 	/* load to FW the binary sections of CPU2 */
2644 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645 	    &first_ucode_section);
2646 }
2647 
2648 /* XXX Get rid of this definition */
2649 static inline void
iwm_enable_fw_load_int(struct iwm_softc * sc)2650 iwm_enable_fw_load_int(struct iwm_softc *sc)
2651 {
2652 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655 }
2656 
2657 /* XXX Add proper rfkill support code */
2658 static int
iwm_start_fw(struct iwm_softc * sc,const struct iwm_fw_img * fw)2659 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2660 {
2661 	int ret;
2662 
2663 	/* This may fail if AMT took ownership of the device */
2664 	if (iwm_prepare_card_hw(sc)) {
2665 		device_printf(sc->sc_dev,
2666 		    "%s: Exit HW not ready\n", __func__);
2667 		ret = EIO;
2668 		goto out;
2669 	}
2670 
2671 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2672 
2673 	iwm_disable_interrupts(sc);
2674 
2675 	/* make sure rfkill handshake bits are cleared */
2676 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2677 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2678 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2679 
2680 	/* clear (again), then enable host interrupts */
2681 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2682 
2683 	ret = iwm_nic_init(sc);
2684 	if (ret) {
2685 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2686 		goto out;
2687 	}
2688 
2689 	/*
2690 	 * Now, we load the firmware and don't want to be interrupted, even
2691 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2692 	 * FH_TX interrupt which is needed to load the firmware). If the
2693 	 * RF-Kill switch is toggled, we will find out after having loaded
2694 	 * the firmware and return the proper value to the caller.
2695 	 */
2696 	iwm_enable_fw_load_int(sc);
2697 
2698 	/* really make sure rfkill handshake bits are cleared */
2699 	/* maybe we should write a few times more?  just to make sure */
2700 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702 
2703 	/* Load the given image to the HW */
2704 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2705 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2706 	else
2707 		ret = iwm_pcie_load_given_ucode(sc, fw);
2708 
2709 	/* XXX re-check RF-Kill state */
2710 
2711 out:
2712 	return ret;
2713 }
2714 
2715 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)2716 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2717 {
2718 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2719 		.valid = htole32(valid_tx_ant),
2720 	};
2721 
2722 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2723 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2724 }
2725 
2726 /* iwlwifi: mvm/fw.c */
2727 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)2728 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2729 {
2730 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2731 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2732 
2733 	/* Set parameters */
2734 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2735 	phy_cfg_cmd.calib_control.event_trigger =
2736 	    sc->sc_default_calib[ucode_type].event_trigger;
2737 	phy_cfg_cmd.calib_control.flow_trigger =
2738 	    sc->sc_default_calib[ucode_type].flow_trigger;
2739 
2740 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2741 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2742 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2743 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2744 }
2745 
2746 static int
iwm_alive_fn(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2747 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2748 {
2749 	struct iwm_alive_data *alive_data = data;
2750 	struct iwm_alive_resp_v3 *palive3;
2751 	struct iwm_alive_resp *palive;
2752 	struct iwm_umac_alive *umac;
2753 	struct iwm_lmac_alive *lmac1;
2754 	struct iwm_lmac_alive *lmac2 = NULL;
2755 	uint16_t status;
2756 
2757 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2758 		palive = (void *)pkt->data;
2759 		umac = &palive->umac_data;
2760 		lmac1 = &palive->lmac_data[0];
2761 		lmac2 = &palive->lmac_data[1];
2762 		status = le16toh(palive->status);
2763 	} else {
2764 		palive3 = (void *)pkt->data;
2765 		umac = &palive3->umac_data;
2766 		lmac1 = &palive3->lmac_data;
2767 		status = le16toh(palive3->status);
2768 	}
2769 
2770 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2771 	if (lmac2)
2772 		sc->error_event_table[1] =
2773 			le32toh(lmac2->error_event_table_ptr);
2774 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2775 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2776 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2777 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2778 	if (sc->umac_error_event_table)
2779 		sc->support_umac_log = TRUE;
2780 
2781 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2782 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2783 		    status, lmac1->ver_type, lmac1->ver_subtype);
2784 
2785 	if (lmac2)
2786 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2787 
2788 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2789 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2790 		    le32toh(umac->umac_major),
2791 		    le32toh(umac->umac_minor));
2792 
2793 	return TRUE;
2794 }
2795 
2796 static int
iwm_wait_phy_db_entry(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2797 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2798 	struct iwm_rx_packet *pkt, void *data)
2799 {
2800 	struct iwm_phy_db *phy_db = data;
2801 
2802 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2803 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2804 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2805 			    __func__, pkt->hdr.code);
2806 		}
2807 		return TRUE;
2808 	}
2809 
2810 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2811 		device_printf(sc->sc_dev,
2812 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2813 	}
2814 
2815 	return FALSE;
2816 }
2817 
2818 static int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2819 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2820 	enum iwm_ucode_type ucode_type)
2821 {
2822 	struct iwm_notification_wait alive_wait;
2823 	struct iwm_alive_data alive_data;
2824 	const struct iwm_fw_img *fw;
2825 	enum iwm_ucode_type old_type = sc->cur_ucode;
2826 	int error;
2827 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2828 
2829 	fw = &sc->sc_fw.img[ucode_type];
2830 	sc->cur_ucode = ucode_type;
2831 	sc->ucode_loaded = FALSE;
2832 
2833 	memset(&alive_data, 0, sizeof(alive_data));
2834 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2835 				   alive_cmd, nitems(alive_cmd),
2836 				   iwm_alive_fn, &alive_data);
2837 
2838 	error = iwm_start_fw(sc, fw);
2839 	if (error) {
2840 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2841 		sc->cur_ucode = old_type;
2842 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2843 		return error;
2844 	}
2845 
2846 	/*
2847 	 * Some things may run in the background now, but we
2848 	 * just wait for the ALIVE notification here.
2849 	 */
2850 	IWM_UNLOCK(sc);
2851 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2852 				      IWM_UCODE_ALIVE_TIMEOUT);
2853 	IWM_LOCK(sc);
2854 	if (error) {
2855 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2856 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2857 			if (iwm_nic_lock(sc)) {
2858 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2859 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2860 				iwm_nic_unlock(sc);
2861 			}
2862 			device_printf(sc->sc_dev,
2863 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2864 			    a, b);
2865 		}
2866 		sc->cur_ucode = old_type;
2867 		return error;
2868 	}
2869 
2870 	if (!alive_data.valid) {
2871 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2872 		    __func__);
2873 		sc->cur_ucode = old_type;
2874 		return EIO;
2875 	}
2876 
2877 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2878 
2879 	/*
2880 	 * configure and operate fw paging mechanism.
2881 	 * driver configures the paging flow only once, CPU2 paging image
2882 	 * included in the IWM_UCODE_INIT image.
2883 	 */
2884 	if (fw->paging_mem_size) {
2885 		error = iwm_save_fw_paging(sc, fw);
2886 		if (error) {
2887 			device_printf(sc->sc_dev,
2888 			    "%s: failed to save the FW paging image\n",
2889 			    __func__);
2890 			return error;
2891 		}
2892 
2893 		error = iwm_send_paging_cmd(sc, fw);
2894 		if (error) {
2895 			device_printf(sc->sc_dev,
2896 			    "%s: failed to send the paging cmd\n", __func__);
2897 			iwm_free_fw_paging(sc);
2898 			return error;
2899 		}
2900 	}
2901 
2902 	if (!error)
2903 		sc->ucode_loaded = TRUE;
2904 	return error;
2905 }
2906 
2907 /*
2908  * mvm misc bits
2909  */
2910 
2911 /*
2912  * follows iwlwifi/fw.c
2913  */
2914 static int
iwm_run_init_ucode(struct iwm_softc * sc,int justnvm)2915 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2916 {
2917 	struct iwm_notification_wait calib_wait;
2918 	static const uint16_t init_complete[] = {
2919 		IWM_INIT_COMPLETE_NOTIF,
2920 		IWM_CALIB_RES_NOTIF_PHY_DB
2921 	};
2922 	int ret;
2923 
2924 	/* do not operate with rfkill switch turned on */
2925 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2926 		device_printf(sc->sc_dev,
2927 		    "radio is disabled by hardware switch\n");
2928 		return EPERM;
2929 	}
2930 
2931 	iwm_init_notification_wait(sc->sc_notif_wait,
2932 				   &calib_wait,
2933 				   init_complete,
2934 				   nitems(init_complete),
2935 				   iwm_wait_phy_db_entry,
2936 				   sc->sc_phy_db);
2937 
2938 	/* Will also start the device */
2939 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2940 	if (ret) {
2941 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2942 		    ret);
2943 		goto error;
2944 	}
2945 
2946 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2947 		ret = iwm_send_bt_init_conf(sc);
2948 		if (ret) {
2949 			device_printf(sc->sc_dev,
2950 			    "failed to send bt coex configuration: %d\n", ret);
2951 			goto error;
2952 		}
2953 	}
2954 
2955 	if (justnvm) {
2956 		/* Read nvm */
2957 		ret = iwm_nvm_init(sc);
2958 		if (ret) {
2959 			device_printf(sc->sc_dev, "failed to read nvm\n");
2960 			goto error;
2961 		}
2962 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2963 		goto error;
2964 	}
2965 
2966 	/* Send TX valid antennas before triggering calibrations */
2967 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2968 	if (ret) {
2969 		device_printf(sc->sc_dev,
2970 		    "failed to send antennas before calibration: %d\n", ret);
2971 		goto error;
2972 	}
2973 
2974 	/*
2975 	 * Send phy configurations command to init uCode
2976 	 * to start the 16.0 uCode init image internal calibrations.
2977 	 */
2978 	ret = iwm_send_phy_cfg_cmd(sc);
2979 	if (ret) {
2980 		device_printf(sc->sc_dev,
2981 		    "%s: Failed to run INIT calibrations: %d\n",
2982 		    __func__, ret);
2983 		goto error;
2984 	}
2985 
2986 	/*
2987 	 * Nothing to do but wait for the init complete notification
2988 	 * from the firmware.
2989 	 */
2990 	IWM_UNLOCK(sc);
2991 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2992 	    IWM_UCODE_CALIB_TIMEOUT);
2993 	IWM_LOCK(sc);
2994 
2995 
2996 	goto out;
2997 
2998 error:
2999 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3000 out:
3001 	return ret;
3002 }
3003 
3004 static int
iwm_config_ltr(struct iwm_softc * sc)3005 iwm_config_ltr(struct iwm_softc *sc)
3006 {
3007 	struct iwm_ltr_config_cmd cmd = {
3008 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3009 	};
3010 
3011 	if (!sc->sc_ltr_enabled)
3012 		return 0;
3013 
3014 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3015 }
3016 
3017 /*
3018  * receive side
3019  */
3020 
3021 /* (re)stock rx ring, called at init-time and at runtime */
3022 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)3023 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3024 {
3025 	struct iwm_rx_ring *ring = &sc->rxq;
3026 	struct iwm_rx_data *data = &ring->data[idx];
3027 	struct mbuf *m;
3028 	bus_dmamap_t dmamap;
3029 	bus_dma_segment_t seg;
3030 	int nsegs, error;
3031 
3032 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3033 	if (m == NULL)
3034 		return ENOBUFS;
3035 
3036 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3037 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3038 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3039 	if (error != 0) {
3040 		device_printf(sc->sc_dev,
3041 		    "%s: can't map mbuf, error %d\n", __func__, error);
3042 		m_freem(m);
3043 		return error;
3044 	}
3045 
3046 	if (data->m != NULL)
3047 		bus_dmamap_unload(ring->data_dmat, data->map);
3048 
3049 	/* Swap ring->spare_map with data->map */
3050 	dmamap = data->map;
3051 	data->map = ring->spare_map;
3052 	ring->spare_map = dmamap;
3053 
3054 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3055 	data->m = m;
3056 
3057 	/* Update RX descriptor. */
3058 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3059 	if (sc->cfg->mqrx_supported)
3060 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3061 	else
3062 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3063 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3064 	    BUS_DMASYNC_PREWRITE);
3065 
3066 	return 0;
3067 }
3068 
3069 static void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3070 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3071 {
3072 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3073 
3074 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3075 
3076 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3077 }
3078 
3079 /*
3080  * Retrieve the average noise (in dBm) among receivers.
3081  */
3082 static int
iwm_get_noise(struct iwm_softc * sc,const struct iwm_statistics_rx_non_phy * stats)3083 iwm_get_noise(struct iwm_softc *sc,
3084     const struct iwm_statistics_rx_non_phy *stats)
3085 {
3086 	int i, noise;
3087 #ifdef IWM_DEBUG
3088 	int nbant, total;
3089 #else
3090 	int nbant __unused, total __unused;
3091 #endif
3092 
3093 	total = nbant = noise = 0;
3094 	for (i = 0; i < 3; i++) {
3095 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3096 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3097 		    __func__,
3098 		    i,
3099 		    noise);
3100 
3101 		if (noise) {
3102 			total += noise;
3103 			nbant++;
3104 		}
3105 	}
3106 
3107 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3108 	    __func__, nbant, total);
3109 #if 0
3110 	/* There should be at least one antenna but check anyway. */
3111 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3112 #else
3113 	/* For now, just hard-code it to -96 to be safe */
3114 	return (-96);
3115 #endif
3116 }
3117 
3118 static void
iwm_handle_rx_statistics(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3119 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3120 {
3121 	struct iwm_notif_statistics *stats = (void *)&pkt->data;
3122 
3123 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3124 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3125 }
3126 
3127 /* iwlwifi: mvm/rx.c */
3128 /*
3129  * iwm_get_signal_strength - use new rx PHY INFO API
3130  * values are reported by the fw as positive values - need to negate
3131  * to obtain their dBM.  Account for missing antennas by replacing 0
3132  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3133  */
3134 static int
iwm_rx_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3135 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3136     struct iwm_rx_phy_info *phy_info)
3137 {
3138 	int energy_a, energy_b, energy_c, max_energy;
3139 	uint32_t val;
3140 
3141 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3142 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3143 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3144 	energy_a = energy_a ? -energy_a : -256;
3145 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3146 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3147 	energy_b = energy_b ? -energy_b : -256;
3148 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3149 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3150 	energy_c = energy_c ? -energy_c : -256;
3151 	max_energy = MAX(energy_a, energy_b);
3152 	max_energy = MAX(max_energy, energy_c);
3153 
3154 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3155 	    "energy In A %d B %d C %d , and max %d\n",
3156 	    energy_a, energy_b, energy_c, max_energy);
3157 
3158 	return max_energy;
3159 }
3160 
3161 static int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)3162 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3163     struct iwm_rx_mpdu_desc *desc)
3164 {
3165 	int energy_a, energy_b;
3166 
3167 	energy_a = desc->v1.energy_a;
3168 	energy_b = desc->v1.energy_b;
3169 	energy_a = energy_a ? -energy_a : -256;
3170 	energy_b = energy_b ? -energy_b : -256;
3171 	return MAX(energy_a, energy_b);
3172 }
3173 
3174 /*
3175  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3176  *
3177  * Handles the actual data of the Rx packet from the fw
3178  */
3179 static bool
iwm_rx_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3180 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3181     bool stolen)
3182 {
3183 	struct ieee80211com *ic = &sc->sc_ic;
3184 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3185 	struct ieee80211_rx_stats rxs;
3186 	struct iwm_rx_phy_info *phy_info;
3187 	struct iwm_rx_mpdu_res_start *rx_res;
3188 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3189 	uint32_t len;
3190 	uint32_t rx_pkt_status;
3191 	int rssi;
3192 
3193 	phy_info = &sc->sc_last_phy_info;
3194 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3195 	len = le16toh(rx_res->byte_count);
3196 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3197 
3198 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3199 		device_printf(sc->sc_dev,
3200 		    "dsp size out of range [0,20]: %d\n",
3201 		    phy_info->cfg_phy_cnt);
3202 		return false;
3203 	}
3204 
3205 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3206 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3207 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3208 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3209 		return false;
3210 	}
3211 
3212 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3213 
3214 	/* Map it to relative value */
3215 	rssi = rssi - sc->sc_noise;
3216 
3217 	/* replenish ring for the buffer we're going to feed to the sharks */
3218 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3219 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3220 		    __func__);
3221 		return false;
3222 	}
3223 
3224 	m->m_data = pkt->data + sizeof(*rx_res);
3225 	m->m_pkthdr.len = m->m_len = len;
3226 
3227 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3228 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3229 
3230 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3231 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3232 	    __func__,
3233 	    le16toh(phy_info->channel),
3234 	    le16toh(phy_info->phy_flags));
3235 
3236 	/*
3237 	 * Populate an RX state struct with the provided information.
3238 	 */
3239 	bzero(&rxs, sizeof(rxs));
3240 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3241 	rxs.r_flags |= IEEE80211_R_BAND;
3242 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3243 	rxs.c_ieee = le16toh(phy_info->channel);
3244 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3245 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3246 		rxs.c_band = IEEE80211_CHAN_2GHZ;
3247 	} else {
3248 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3249 		rxs.c_band = IEEE80211_CHAN_5GHZ;
3250 	}
3251 
3252 	/* rssi is in 1/2db units */
3253 	rxs.c_rssi = rssi * 2;
3254 	rxs.c_nf = sc->sc_noise;
3255 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3256 		return false;
3257 
3258 	if (ieee80211_radiotap_active_vap(vap)) {
3259 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3260 
3261 		tap->wr_flags = 0;
3262 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3263 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3264 		tap->wr_chan_freq = htole16(rxs.c_freq);
3265 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3266 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3267 		tap->wr_dbm_antsignal = (int8_t)rssi;
3268 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3269 		tap->wr_tsft = phy_info->system_timestamp;
3270 		switch (phy_info->rate) {
3271 		/* CCK rates. */
3272 		case  10: tap->wr_rate =   2; break;
3273 		case  20: tap->wr_rate =   4; break;
3274 		case  55: tap->wr_rate =  11; break;
3275 		case 110: tap->wr_rate =  22; break;
3276 		/* OFDM rates. */
3277 		case 0xd: tap->wr_rate =  12; break;
3278 		case 0xf: tap->wr_rate =  18; break;
3279 		case 0x5: tap->wr_rate =  24; break;
3280 		case 0x7: tap->wr_rate =  36; break;
3281 		case 0x9: tap->wr_rate =  48; break;
3282 		case 0xb: tap->wr_rate =  72; break;
3283 		case 0x1: tap->wr_rate =  96; break;
3284 		case 0x3: tap->wr_rate = 108; break;
3285 		/* Unknown rate: should not happen. */
3286 		default:  tap->wr_rate =   0;
3287 		}
3288 	}
3289 
3290 	return true;
3291 }
3292 
3293 static bool
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3294 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3295     bool stolen)
3296 {
3297 	struct ieee80211com *ic = &sc->sc_ic;
3298 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3299 	struct ieee80211_frame *wh;
3300 	struct ieee80211_rx_stats rxs;
3301 	struct iwm_rx_mpdu_desc *desc;
3302 	struct iwm_rx_packet *pkt;
3303 	int rssi;
3304 	uint32_t hdrlen, len, rate_n_flags;
3305 	uint16_t phy_info;
3306 	uint8_t channel;
3307 
3308 	pkt = mtodo(m, offset);
3309 	desc = (void *)pkt->data;
3310 
3311 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3312 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3313 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3314 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3315 		return false;
3316 	}
3317 
3318 	channel = desc->v1.channel;
3319 	len = le16toh(desc->mpdu_len);
3320 	phy_info = le16toh(desc->phy_info);
3321 	rate_n_flags = desc->v1.rate_n_flags;
3322 
3323 	wh = mtodo(m, sizeof(*desc));
3324 	m->m_data = pkt->data + sizeof(*desc);
3325 	m->m_pkthdr.len = m->m_len = len;
3326 	m->m_len = len;
3327 
3328 	/* Account for padding following the frame header. */
3329 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3330 		hdrlen = ieee80211_anyhdrsize(wh);
3331 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3332 		m->m_data = mtodo(m, 2);
3333 		wh = mtod(m, struct ieee80211_frame *);
3334 	}
3335 
3336 	/* Map it to relative value */
3337 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3338 	rssi = rssi - sc->sc_noise;
3339 
3340 	/* replenish ring for the buffer we're going to feed to the sharks */
3341 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3342 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3343 		    __func__);
3344 		return false;
3345 	}
3346 
3347 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3348 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3349 
3350 	/*
3351 	 * Populate an RX state struct with the provided information.
3352 	 */
3353 	bzero(&rxs, sizeof(rxs));
3354 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3355 	rxs.r_flags |= IEEE80211_R_BAND;
3356 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3357 	rxs.c_ieee = channel;
3358 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3359 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3360 	rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3361 
3362 	/* rssi is in 1/2db units */
3363 	rxs.c_rssi = rssi * 2;
3364 	rxs.c_nf = sc->sc_noise;
3365 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3366 		return false;
3367 
3368 	if (ieee80211_radiotap_active_vap(vap)) {
3369 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3370 
3371 		tap->wr_flags = 0;
3372 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3373 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3374 		tap->wr_chan_freq = htole16(rxs.c_freq);
3375 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3376 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3377 		tap->wr_dbm_antsignal = (int8_t)rssi;
3378 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3379 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3380 		switch ((rate_n_flags & 0xff)) {
3381 		/* CCK rates. */
3382 		case  10: tap->wr_rate =   2; break;
3383 		case  20: tap->wr_rate =   4; break;
3384 		case  55: tap->wr_rate =  11; break;
3385 		case 110: tap->wr_rate =  22; break;
3386 		/* OFDM rates. */
3387 		case 0xd: tap->wr_rate =  12; break;
3388 		case 0xf: tap->wr_rate =  18; break;
3389 		case 0x5: tap->wr_rate =  24; break;
3390 		case 0x7: tap->wr_rate =  36; break;
3391 		case 0x9: tap->wr_rate =  48; break;
3392 		case 0xb: tap->wr_rate =  72; break;
3393 		case 0x1: tap->wr_rate =  96; break;
3394 		case 0x3: tap->wr_rate = 108; break;
3395 		/* Unknown rate: should not happen. */
3396 		default:  tap->wr_rate =   0;
3397 		}
3398 	}
3399 
3400 	return true;
3401 }
3402 
3403 static bool
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3404 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3405     bool stolen)
3406 {
3407 	struct ieee80211com *ic;
3408 	struct ieee80211_frame *wh;
3409 	struct ieee80211_node *ni;
3410 	bool ret;
3411 
3412 	ic = &sc->sc_ic;
3413 
3414 	ret = sc->cfg->mqrx_supported ?
3415 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3416 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3417 	if (!ret) {
3418 		counter_u64_add(ic->ic_ierrors, 1);
3419 		return (ret);
3420 	}
3421 
3422 	wh = mtod(m, struct ieee80211_frame *);
3423 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3424 
3425 	IWM_UNLOCK(sc);
3426 	if (ni != NULL) {
3427 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3428 		ieee80211_input_mimo(ni, m);
3429 		ieee80211_free_node(ni);
3430 	} else {
3431 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3432 		ieee80211_input_mimo_all(ic, m);
3433 	}
3434 	IWM_LOCK(sc);
3435 
3436 	return true;
3437 }
3438 
3439 static int
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)3440 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3441 	struct iwm_node *in)
3442 {
3443 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3444 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3445 	struct ieee80211_node *ni = &in->in_ni;
3446 	struct ieee80211vap *vap = ni->ni_vap;
3447 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3448 	int new_rate, cur_rate;
3449 	boolean_t rate_matched;
3450 	uint8_t tx_resp_rate;
3451 
3452 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3453 
3454 	/* Update rate control statistics. */
3455 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3456 	    __func__,
3457 	    (int) le16toh(tx_resp->status.status),
3458 	    (int) le16toh(tx_resp->status.sequence),
3459 	    tx_resp->frame_count,
3460 	    tx_resp->bt_kill_count,
3461 	    tx_resp->failure_rts,
3462 	    tx_resp->failure_frame,
3463 	    le32toh(tx_resp->initial_rate),
3464 	    (int) le16toh(tx_resp->wireless_media_time));
3465 
3466 	cur_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
3467 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3468 
3469 	/* For rate control, ignore frames sent at different initial rate */
3470 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3471 
3472 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3473 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3474 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3475 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3476 	}
3477 
3478 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3479 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3480 	txs->short_retries = tx_resp->failure_rts;
3481 	txs->long_retries = tx_resp->failure_frame;
3482 	if (status != IWM_TX_STATUS_SUCCESS &&
3483 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3484 		switch (status) {
3485 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3486 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3487 			break;
3488 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3489 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3490 			break;
3491 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3492 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3493 			break;
3494 		default:
3495 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3496 			break;
3497 		}
3498 	} else {
3499 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3500 	}
3501 
3502 	if (rate_matched) {
3503 		ieee80211_ratectl_tx_complete(ni, txs);
3504 
3505 		ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3506 		new_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
3507 		if (new_rate != 0 && new_rate != cur_rate) {
3508 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3509 			iwm_setrates(sc, in, new_rate);
3510 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3511 		}
3512  	}
3513 
3514 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3515 }
3516 
3517 static void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3518 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3519 {
3520 	struct iwm_cmd_header *cmd_hdr;
3521 	struct iwm_tx_ring *ring;
3522 	struct iwm_tx_data *txd;
3523 	struct iwm_node *in;
3524 	struct mbuf *m;
3525 	int idx, qid, qmsk, status;
3526 
3527 	cmd_hdr = &pkt->hdr;
3528 	idx = cmd_hdr->idx;
3529 	qid = cmd_hdr->qid;
3530 
3531 	ring = &sc->txq[qid];
3532 	txd = &ring->data[idx];
3533 	in = txd->in;
3534 	m = txd->m;
3535 
3536 	KASSERT(txd->done == 0, ("txd not done"));
3537 	KASSERT(txd->in != NULL, ("txd without node"));
3538 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3539 
3540 	sc->sc_tx_timer = 0;
3541 
3542 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3543 
3544 	/* Unmap and free mbuf. */
3545 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3546 	bus_dmamap_unload(ring->data_dmat, txd->map);
3547 
3548 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3549 	    "free txd %p, in %p\n", txd, txd->in);
3550 	txd->done = 1;
3551 	txd->m = NULL;
3552 	txd->in = NULL;
3553 
3554 	ieee80211_tx_complete(&in->in_ni, m, status);
3555 
3556 	qmsk = 1 << qid;
3557 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3558 		sc->qfullmsk &= ~qmsk;
3559 		if (sc->qfullmsk == 0)
3560 			iwm_start(sc);
3561 	}
3562 }
3563 
3564 /*
3565  * transmit side
3566  */
3567 
3568 /*
3569  * Process a "command done" firmware notification.  This is where we wakeup
3570  * processes waiting for a synchronous command completion.
3571  * from if_iwn
3572  */
3573 static void
iwm_cmd_done(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3574 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3575 {
3576 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3577 	struct iwm_tx_data *data;
3578 
3579 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3580 		return;	/* Not a command ack. */
3581 	}
3582 
3583 	/* XXX wide commands? */
3584 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3585 	    "cmd notification type 0x%x qid %d idx %d\n",
3586 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3587 
3588 	data = &ring->data[pkt->hdr.idx];
3589 
3590 	/* If the command was mapped in an mbuf, free it. */
3591 	if (data->m != NULL) {
3592 		bus_dmamap_sync(ring->data_dmat, data->map,
3593 		    BUS_DMASYNC_POSTWRITE);
3594 		bus_dmamap_unload(ring->data_dmat, data->map);
3595 		m_freem(data->m);
3596 		data->m = NULL;
3597 	}
3598 	wakeup(&ring->desc[pkt->hdr.idx]);
3599 
3600 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3601 		device_printf(sc->sc_dev,
3602 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3603 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3604 		/* XXX call iwm_force_nmi() */
3605 	}
3606 
3607 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3608 	ring->queued--;
3609 	if (ring->queued == 0)
3610 		iwm_pcie_clear_cmd_in_flight(sc);
3611 }
3612 
3613 #if 0
3614 /*
3615  * necessary only for block ack mode
3616  */
3617 void
3618 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3619 	uint16_t len)
3620 {
3621 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3622 	uint16_t w_val;
3623 
3624 	scd_bc_tbl = sc->sched_dma.vaddr;
3625 
3626 	len += 8; /* magic numbers came naturally from paris */
3627 	len = roundup(len, 4) / 4;
3628 
3629 	w_val = htole16(sta_id << 12 | len);
3630 
3631 	/* Update TX scheduler. */
3632 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3633 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3634 	    BUS_DMASYNC_PREWRITE);
3635 
3636 	/* I really wonder what this is ?!? */
3637 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3638 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3639 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3640 		    BUS_DMASYNC_PREWRITE);
3641 	}
3642 }
3643 #endif
3644 
3645 static int
iwm_tx_rateidx_global_lookup(struct iwm_softc * sc,uint8_t rate)3646 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3647 {
3648 	int i;
3649 
3650 	for (i = 0; i < nitems(iwm_rates); i++) {
3651 		if (iwm_rates[i].rate == rate)
3652 			return (i);
3653 	}
3654 	/* XXX error? */
3655 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3656 	    "%s: couldn't find an entry for rate=%d\n",
3657 	    __func__,
3658 	    rate);
3659 	return (0);
3660 }
3661 
3662 /*
3663  * Fill in the rate related information for a transmit command.
3664  */
3665 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct mbuf * m,struct iwm_tx_cmd * tx)3666 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3667 	struct mbuf *m, struct iwm_tx_cmd *tx)
3668 {
3669 	struct ieee80211_node *ni = &in->in_ni;
3670 	struct ieee80211_frame *wh;
3671 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3672 	const struct iwm_rate *rinfo;
3673 	int type;
3674 	int ridx, rate_flags;
3675 
3676 	wh = mtod(m, struct ieee80211_frame *);
3677 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3678 
3679 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3680 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3681 
3682 	if (type == IEEE80211_FC0_TYPE_MGT ||
3683 	    type == IEEE80211_FC0_TYPE_CTL ||
3684 	    (m->m_flags & M_EAPOL) != 0) {
3685 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3686 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3687 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3688 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3689 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3690 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3691 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3692 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3693 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3694 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3695 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3696 	} else {
3697 		/* for data frames, use RS table */
3698 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3699 		ridx = iwm_rate2ridx(sc,
3700 		    ieee80211_node_get_txrate_dot11rate(ni));
3701 		if (ridx == -1)
3702 			ridx = 0;
3703 
3704 		/* This is the index into the programmed table */
3705 		tx->initial_rate_index = 0;
3706 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3707 	}
3708 
3709 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3710 	    "%s: frame type=%d txrate %d\n",
3711 	        __func__, type, iwm_rates[ridx].rate);
3712 
3713 	rinfo = &iwm_rates[ridx];
3714 
3715 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3716 	    __func__, ridx,
3717 	    rinfo->rate,
3718 	    !! (IWM_RIDX_IS_CCK(ridx))
3719 	    );
3720 
3721 	/* XXX TODO: hard-coded TX antenna? */
3722 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3723 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3724 	else
3725 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3726 	if (IWM_RIDX_IS_CCK(ridx))
3727 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3728 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3729 
3730 	return rinfo;
3731 }
3732 
3733 #define TB0_SIZE 16
3734 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)3735 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3736 {
3737 	struct ieee80211com *ic = &sc->sc_ic;
3738 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3739 	struct iwm_node *in = IWM_NODE(ni);
3740 	struct iwm_tx_ring *ring;
3741 	struct iwm_tx_data *data;
3742 	struct iwm_tfd *desc;
3743 	struct iwm_device_cmd *cmd;
3744 	struct iwm_tx_cmd *tx;
3745 	struct ieee80211_frame *wh;
3746 	struct ieee80211_key *k = NULL;
3747 	struct mbuf *m1;
3748 	const struct iwm_rate *rinfo;
3749 	uint32_t flags;
3750 	u_int hdrlen;
3751 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3752 	int nsegs;
3753 	uint8_t tid, type;
3754 	int i, totlen, error, pad;
3755 
3756 	wh = mtod(m, struct ieee80211_frame *);
3757 	hdrlen = ieee80211_anyhdrsize(wh);
3758 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3759 	tid = 0;
3760 	ring = &sc->txq[ac];
3761 	desc = &ring->desc[ring->cur];
3762 	data = &ring->data[ring->cur];
3763 
3764 	/* Fill out iwm_tx_cmd to send to the firmware */
3765 	cmd = &ring->cmd[ring->cur];
3766 	cmd->hdr.code = IWM_TX_CMD;
3767 	cmd->hdr.flags = 0;
3768 	cmd->hdr.qid = ring->qid;
3769 	cmd->hdr.idx = ring->cur;
3770 
3771 	tx = (void *)cmd->data;
3772 	memset(tx, 0, sizeof(*tx));
3773 
3774 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3775 
3776 	/* Offloaded sequence number assignment; non-AMPDU case */
3777 	if ((m->m_flags & M_AMPDU_MPDU) == 0)
3778 		ieee80211_output_seqno_assign(ni, -1, m);
3779 
3780 	/* Encrypt the frame if need be. */
3781 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3782 		/* Retrieve key for TX && do software encryption. */
3783 		k = ieee80211_crypto_encap(ni, m);
3784 		if (k == NULL) {
3785 			m_freem(m);
3786 			return (ENOBUFS);
3787 		}
3788 		/* 802.11 header may have moved. */
3789 		wh = mtod(m, struct ieee80211_frame *);
3790 	}
3791 
3792 	if (ieee80211_radiotap_active_vap(vap)) {
3793 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3794 
3795 		tap->wt_flags = 0;
3796 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3797 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3798 		tap->wt_rate = rinfo->rate;
3799 		if (k != NULL)
3800 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3801 		ieee80211_radiotap_tx(vap, m);
3802 	}
3803 
3804 	flags = 0;
3805 	totlen = m->m_pkthdr.len;
3806 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3807 		flags |= IWM_TX_CMD_FLG_ACK;
3808 	}
3809 
3810 	if (type == IEEE80211_FC0_TYPE_DATA &&
3811 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3812 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3813 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3814 	}
3815 
3816 	tx->sta_id = IWM_STATION_ID;
3817 
3818 	if (type == IEEE80211_FC0_TYPE_MGT) {
3819 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3820 
3821 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3822 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3823 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3824 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3825 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3826 		} else {
3827 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3828 		}
3829 	} else {
3830 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3831 	}
3832 
3833 	if (hdrlen & 3) {
3834 		/* First segment length must be a multiple of 4. */
3835 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3836 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3837 		pad = 4 - (hdrlen & 3);
3838 	} else {
3839 		tx->offload_assist = 0;
3840 		pad = 0;
3841 	}
3842 
3843 	tx->len = htole16(totlen);
3844 	tx->tid_tspec = tid;
3845 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3846 
3847 	/* Set physical address of "scratch area". */
3848 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3849 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3850 
3851 	/* Copy 802.11 header in TX command. */
3852 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3853 
3854 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3855 
3856 	tx->sec_ctl = 0;
3857 	tx->tx_flags |= htole32(flags);
3858 
3859 	/* Trim 802.11 header. */
3860 	m_adj(m, hdrlen);
3861 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3862 	    segs, &nsegs, BUS_DMA_NOWAIT);
3863 	if (error != 0) {
3864 		if (error != EFBIG) {
3865 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3866 			    error);
3867 			m_freem(m);
3868 			return error;
3869 		}
3870 		/* Too many DMA segments, linearize mbuf. */
3871 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3872 		if (m1 == NULL) {
3873 			device_printf(sc->sc_dev,
3874 			    "%s: could not defrag mbuf\n", __func__);
3875 			m_freem(m);
3876 			return (ENOBUFS);
3877 		}
3878 		m = m1;
3879 
3880 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3881 		    segs, &nsegs, BUS_DMA_NOWAIT);
3882 		if (error != 0) {
3883 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3884 			    error);
3885 			m_freem(m);
3886 			return error;
3887 		}
3888 	}
3889 	data->m = m;
3890 	data->in = in;
3891 	data->done = 0;
3892 
3893 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3894 	    "sending txd %p, in %p\n", data, data->in);
3895 	KASSERT(data->in != NULL, ("node is NULL"));
3896 
3897 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3898 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3899 	    ring->qid, ring->cur, totlen, nsegs,
3900 	    le32toh(tx->tx_flags),
3901 	    le32toh(tx->rate_n_flags),
3902 	    tx->initial_rate_index
3903 	    );
3904 
3905 	/* Fill TX descriptor. */
3906 	memset(desc, 0, sizeof(*desc));
3907 	desc->num_tbs = 2 + nsegs;
3908 
3909 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3910 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3911 	    (TB0_SIZE << 4));
3912 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3913 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3914 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3915 	    hdrlen + pad - TB0_SIZE) << 4));
3916 
3917 	/* Other DMA segments are for data payload. */
3918 	for (i = 0; i < nsegs; i++) {
3919 		seg = &segs[i];
3920 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3921 		desc->tbs[i + 2].hi_n_len =
3922 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3923 		    (seg->ds_len << 4);
3924 	}
3925 
3926 	bus_dmamap_sync(ring->data_dmat, data->map,
3927 	    BUS_DMASYNC_PREWRITE);
3928 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3929 	    BUS_DMASYNC_PREWRITE);
3930 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3931 	    BUS_DMASYNC_PREWRITE);
3932 
3933 #if 0
3934 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3935 #endif
3936 
3937 	/* Kick TX ring. */
3938 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3939 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3940 
3941 	/* Mark TX ring as full if we reach a certain threshold. */
3942 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3943 		sc->qfullmsk |= 1 << ring->qid;
3944 	}
3945 
3946 	return 0;
3947 }
3948 
3949 static int
iwm_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3950 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3951     const struct ieee80211_bpf_params *params)
3952 {
3953 	struct ieee80211com *ic = ni->ni_ic;
3954 	struct iwm_softc *sc = ic->ic_softc;
3955 	int error = 0;
3956 
3957 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3958 	    "->%s begin\n", __func__);
3959 
3960 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3961 		m_freem(m);
3962 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3963 		    "<-%s not RUNNING\n", __func__);
3964 		return (ENETDOWN);
3965         }
3966 
3967 	IWM_LOCK(sc);
3968 	/* XXX fix this */
3969         if (params == NULL) {
3970 		error = iwm_tx(sc, m, ni, 0);
3971 	} else {
3972 		error = iwm_tx(sc, m, ni, 0);
3973 	}
3974 	if (sc->sc_tx_timer == 0)
3975 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3976 	sc->sc_tx_timer = 5;
3977 	IWM_UNLOCK(sc);
3978 
3979         return (error);
3980 }
3981 
3982 /*
3983  * mvm/tx.c
3984  */
3985 
3986 /*
3987  * Note that there are transports that buffer frames before they reach
3988  * the firmware. This means that after flush_tx_path is called, the
3989  * queue might not be empty. The race-free way to handle this is to:
3990  * 1) set the station as draining
3991  * 2) flush the Tx path
3992  * 3) wait for the transport queues to be empty
3993  */
3994 int
iwm_flush_tx_path(struct iwm_softc * sc,uint32_t tfd_msk,uint32_t flags)3995 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3996 {
3997 	int ret;
3998 	struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3999 		.queues_ctl = htole32(tfd_msk),
4000 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4001 	};
4002 
4003 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4004 	    sizeof(flush_cmd), &flush_cmd);
4005 	if (ret)
4006                 device_printf(sc->sc_dev,
4007 		    "Flushing tx queue failed: %d\n", ret);
4008 	return ret;
4009 }
4010 
4011 /*
4012  * BEGIN mvm/quota.c
4013  */
4014 
4015 static int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_vap * ivp)4016 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4017 {
4018 	struct iwm_time_quota_cmd_v1 cmd;
4019 	int i, idx, ret, num_active_macs, quota, quota_rem;
4020 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4021 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4022 	uint16_t id;
4023 
4024 	memset(&cmd, 0, sizeof(cmd));
4025 
4026 	/* currently, PHY ID == binding ID */
4027 	if (ivp) {
4028 		id = ivp->phy_ctxt->id;
4029 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4030 		colors[id] = ivp->phy_ctxt->color;
4031 
4032 		if (1)
4033 			n_ifs[id] = 1;
4034 	}
4035 
4036 	/*
4037 	 * The FW's scheduling session consists of
4038 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4039 	 * equally between all the bindings that require quota
4040 	 */
4041 	num_active_macs = 0;
4042 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4043 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4044 		num_active_macs += n_ifs[i];
4045 	}
4046 
4047 	quota = 0;
4048 	quota_rem = 0;
4049 	if (num_active_macs) {
4050 		quota = IWM_MAX_QUOTA / num_active_macs;
4051 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4052 	}
4053 
4054 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4055 		if (colors[i] < 0)
4056 			continue;
4057 
4058 		cmd.quotas[idx].id_and_color =
4059 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4060 
4061 		if (n_ifs[i] <= 0) {
4062 			cmd.quotas[idx].quota = htole32(0);
4063 			cmd.quotas[idx].max_duration = htole32(0);
4064 		} else {
4065 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4066 			cmd.quotas[idx].max_duration = htole32(0);
4067 		}
4068 		idx++;
4069 	}
4070 
4071 	/* Give the remainder of the session to the first binding */
4072 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4073 
4074 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4075 	    sizeof(cmd), &cmd);
4076 	if (ret)
4077 		device_printf(sc->sc_dev,
4078 		    "%s: Failed to send quota: %d\n", __func__, ret);
4079 	return ret;
4080 }
4081 
4082 /*
4083  * END mvm/quota.c
4084  */
4085 
4086 /*
4087  * ieee80211 routines
4088  */
4089 
4090 /*
4091  * Change to AUTH state in 80211 state machine.  Roughly matches what
4092  * Linux does in bss_info_changed().
4093  */
4094 static int
iwm_auth(struct ieee80211vap * vap,struct iwm_softc * sc)4095 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4096 {
4097 	struct ieee80211_node *ni;
4098 	struct iwm_node *in;
4099 	struct iwm_vap *iv = IWM_VAP(vap);
4100 	uint32_t duration;
4101 	int error;
4102 
4103 	/*
4104 	 * XXX i have a feeling that the vap node is being
4105 	 * freed from underneath us. Grr.
4106 	 */
4107 	ni = ieee80211_ref_node(vap->iv_bss);
4108 	in = IWM_NODE(ni);
4109 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4110 	    "%s: called; vap=%p, bss ni=%p\n",
4111 	    __func__,
4112 	    vap,
4113 	    ni);
4114 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4115 	    __func__, ether_sprintf(ni->ni_bssid));
4116 
4117 	in->in_assoc = 0;
4118 	iv->iv_auth = 1;
4119 
4120 	/*
4121 	 * Firmware bug - it'll crash if the beacon interval is less
4122 	 * than 16. We can't avoid connecting at all, so refuse the
4123 	 * station state change, this will cause net80211 to abandon
4124 	 * attempts to connect to this AP, and eventually wpa_s will
4125 	 * blacklist the AP...
4126 	 */
4127 	if (ni->ni_intval < 16) {
4128 		device_printf(sc->sc_dev,
4129 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4130 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4131 		error = EINVAL;
4132 		goto out;
4133 	}
4134 
4135 	error = iwm_allow_mcast(vap, sc);
4136 	if (error) {
4137 		device_printf(sc->sc_dev,
4138 		    "%s: failed to set multicast\n", __func__);
4139 		goto out;
4140 	}
4141 
4142 	/*
4143 	 * This is where it deviates from what Linux does.
4144 	 *
4145 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4146 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4147 	 * and always does a mac_ctx_changed().
4148 	 *
4149 	 * The openbsd port doesn't attempt to do that - it reset things
4150 	 * at odd states and does the add here.
4151 	 *
4152 	 * So, until the state handling is fixed (ie, we never reset
4153 	 * the NIC except for a firmware failure, which should drag
4154 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4155 	 * contexts that are required), let's do a dirty hack here.
4156 	 */
4157 	if (iv->is_uploaded) {
4158 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4159 			device_printf(sc->sc_dev,
4160 			    "%s: failed to update MAC\n", __func__);
4161 			goto out;
4162 		}
4163 	} else {
4164 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4165 			device_printf(sc->sc_dev,
4166 			    "%s: failed to add MAC\n", __func__);
4167 			goto out;
4168 		}
4169 	}
4170 	sc->sc_firmware_state = 1;
4171 
4172 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4173 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4174 		device_printf(sc->sc_dev,
4175 		    "%s: failed update phy ctxt\n", __func__);
4176 		goto out;
4177 	}
4178 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4179 
4180 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4181 		device_printf(sc->sc_dev,
4182 		    "%s: binding update cmd\n", __func__);
4183 		goto out;
4184 	}
4185 	sc->sc_firmware_state = 2;
4186 	/*
4187 	 * Authentication becomes unreliable when powersaving is left enabled
4188 	 * here. Powersaving will be activated again when association has
4189 	 * finished or is aborted.
4190 	 */
4191 	iv->ps_disabled = TRUE;
4192 	error = iwm_power_update_mac(sc);
4193 	iv->ps_disabled = FALSE;
4194 	if (error != 0) {
4195 		device_printf(sc->sc_dev,
4196 		    "%s: failed to update power management\n",
4197 		    __func__);
4198 		goto out;
4199 	}
4200 	if ((error = iwm_add_sta(sc, in)) != 0) {
4201 		device_printf(sc->sc_dev,
4202 		    "%s: failed to add sta\n", __func__);
4203 		goto out;
4204 	}
4205 	sc->sc_firmware_state = 3;
4206 
4207 	/*
4208 	 * Prevent the FW from wandering off channel during association
4209 	 * by "protecting" the session with a time event.
4210 	 */
4211 	/* XXX duration is in units of TU, not MS */
4212 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4213 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4214 
4215 	error = 0;
4216 out:
4217 	if (error != 0)
4218 		iv->iv_auth = 0;
4219 	ieee80211_free_node(ni);
4220 	return (error);
4221 }
4222 
4223 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])4224 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4225 {
4226 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4227 	    M_NOWAIT | M_ZERO);
4228 }
4229 
4230 static uint8_t
iwm_rate_from_ucode_rate(uint32_t rate_n_flags)4231 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4232 {
4233 	uint8_t plcp = rate_n_flags & 0xff;
4234 	int i;
4235 
4236 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4237 		if (iwm_rates[i].plcp == plcp)
4238 			return iwm_rates[i].rate;
4239 	}
4240 	return 0;
4241 }
4242 
4243 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)4244 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4245 {
4246 	int i;
4247 	uint8_t rval;
4248 
4249 	for (i = 0; i < rs->rs_nrates; i++) {
4250 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4251 		if (rval == iwm_rates[ridx].rate)
4252 			return rs->rs_rates[i];
4253 	}
4254 
4255 	return 0;
4256 }
4257 
4258 static int
iwm_rate2ridx(struct iwm_softc * sc,uint8_t rate)4259 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4260 {
4261 	int i;
4262 
4263 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4264 		if (iwm_rates[i].rate == rate)
4265 			return i;
4266 	}
4267 
4268 	device_printf(sc->sc_dev,
4269 	    "%s: WARNING: device rate for %u not found!\n",
4270 	    __func__, rate);
4271 
4272 	return -1;
4273 }
4274 
4275 
4276 static void
iwm_setrates(struct iwm_softc * sc,struct iwm_node * in,int dot11rate)4277 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int dot11rate)
4278 {
4279 	struct ieee80211_node *ni = &in->in_ni;
4280 	struct iwm_lq_cmd *lq = &in->in_lq;
4281 	struct ieee80211_rateset *rs = &ni->ni_rates;
4282 	int nrates = rs->rs_nrates;
4283 	int i, ridx, tab = 0;
4284 //	int txant = 0;
4285 	int rix;
4286 
4287 	/*
4288 	 * Look up the rate index for the given legacy rate from
4289 	 * the rs_rates table.  Default to the lowest rate if it's
4290 	 * not found (which is obviously hugely problematic.)
4291 	 */
4292 	rix = -1;
4293 	for (i = 0; i < nrates; i++) {
4294 		int rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
4295 		if (rate == dot11rate) {
4296 			rix = i;
4297 			break;
4298 		}
4299 	}
4300 	if (rix < 0) {
4301 		device_printf(sc->sc_dev,
4302 		    "%s: failed to lookup dot11rate (%d)\n",
4303 		    __func__, dot11rate);
4304 		rix = 0;
4305 	}
4306 
4307 	if (nrates > nitems(lq->rs_table)) {
4308 		device_printf(sc->sc_dev,
4309 		    "%s: node supports %d rates, driver handles "
4310 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4311 		return;
4312 	}
4313 	if (nrates == 0) {
4314 		device_printf(sc->sc_dev,
4315 		    "%s: node supports 0 rates, odd!\n", __func__);
4316 		return;
4317 	}
4318 	nrates = imin(rix + 1, nrates);
4319 
4320 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4321 	    "%s: nrates=%d\n", __func__, nrates);
4322 
4323 	/* then construct a lq_cmd based on those */
4324 	memset(lq, 0, sizeof(*lq));
4325 	lq->sta_id = IWM_STATION_ID;
4326 
4327 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4328 	if (ni->ni_flags & IEEE80211_NODE_HT)
4329 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4330 
4331 	/*
4332 	 * are these used? (we don't do SISO or MIMO)
4333 	 * need to set them to non-zero, though, or we get an error.
4334 	 */
4335 	lq->single_stream_ant_msk = 1;
4336 	lq->dual_stream_ant_msk = 1;
4337 
4338 	/*
4339 	 * Build the actual rate selection table.
4340 	 * The lowest bits are the rates.  Additionally,
4341 	 * CCK needs bit 9 to be set.  The rest of the bits
4342 	 * we add to the table select the tx antenna
4343 	 * Note that we add the rates in the highest rate first
4344 	 * (opposite of ni_rates).
4345 	 */
4346 	for (i = 0; i < nrates; i++) {
4347 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4348 		int nextant;
4349 
4350 		/* Map 802.11 rate to HW rate index. */
4351 		ridx = iwm_rate2ridx(sc, rate);
4352 		if (ridx == -1)
4353 			continue;
4354 
4355 #if 0
4356 		if (txant == 0)
4357 			txant = iwm_get_valid_tx_ant(sc);
4358 		nextant = 1<<(ffs(txant)-1);
4359 		txant &= ~nextant;
4360 #else
4361 		nextant = iwm_get_valid_tx_ant(sc);
4362 #endif
4363 		tab = iwm_rates[ridx].plcp;
4364 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4365 		if (IWM_RIDX_IS_CCK(ridx))
4366 			tab |= IWM_RATE_MCS_CCK_MSK;
4367 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4368 		    "station rate i=%d, rate=%d, hw=%x\n",
4369 		    i, iwm_rates[ridx].rate, tab);
4370 		lq->rs_table[i] = htole32(tab);
4371 	}
4372 	/* then fill the rest with the lowest possible rate */
4373 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4374 		KASSERT(tab != 0, ("invalid tab"));
4375 		lq->rs_table[i] = htole32(tab);
4376 	}
4377 }
4378 
4379 static void
iwm_bring_down_firmware(struct iwm_softc * sc,struct ieee80211vap * vap)4380 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4381 {
4382 	struct iwm_vap *ivp = IWM_VAP(vap);
4383 	int error;
4384 
4385 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4386 	sc->sc_tx_timer = 0;
4387 
4388 	ivp->iv_auth = 0;
4389 	if (sc->sc_firmware_state == 3) {
4390 		iwm_xmit_queue_drain(sc);
4391 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4392 		error = iwm_rm_sta(sc, vap, TRUE);
4393 		if (error) {
4394 			device_printf(sc->sc_dev,
4395 			    "%s: Failed to remove station: %d\n",
4396 			    __func__, error);
4397 		}
4398 	}
4399 	if (sc->sc_firmware_state == 3) {
4400 		error = iwm_mac_ctxt_changed(sc, vap);
4401 		if (error) {
4402 			device_printf(sc->sc_dev,
4403 			    "%s: Failed to change mac context: %d\n",
4404 			    __func__, error);
4405 		}
4406 	}
4407 	if (sc->sc_firmware_state == 3) {
4408 		error = iwm_sf_update(sc, vap, FALSE);
4409 		if (error) {
4410 			device_printf(sc->sc_dev,
4411 			    "%s: Failed to update smart FIFO: %d\n",
4412 			    __func__, error);
4413 		}
4414 	}
4415 	if (sc->sc_firmware_state == 3) {
4416 		error = iwm_rm_sta_id(sc, vap);
4417 		if (error) {
4418 			device_printf(sc->sc_dev,
4419 			    "%s: Failed to remove station id: %d\n",
4420 			    __func__, error);
4421 		}
4422 	}
4423 	if (sc->sc_firmware_state == 3) {
4424 		error = iwm_update_quotas(sc, NULL);
4425 		if (error) {
4426 			device_printf(sc->sc_dev,
4427 			    "%s: Failed to update PHY quota: %d\n",
4428 			    __func__, error);
4429 		}
4430 	}
4431 	if (sc->sc_firmware_state == 3) {
4432 		/* XXX Might need to specify bssid correctly. */
4433 		error = iwm_mac_ctxt_changed(sc, vap);
4434 		if (error) {
4435 			device_printf(sc->sc_dev,
4436 			    "%s: Failed to change mac context: %d\n",
4437 			    __func__, error);
4438 		}
4439 	}
4440 	if (sc->sc_firmware_state == 3) {
4441 		sc->sc_firmware_state = 2;
4442 	}
4443 	if (sc->sc_firmware_state > 1) {
4444 		error = iwm_binding_remove_vif(sc, ivp);
4445 		if (error) {
4446 			device_printf(sc->sc_dev,
4447 			    "%s: Failed to remove channel ctx: %d\n",
4448 			    __func__, error);
4449 		}
4450 	}
4451 	if (sc->sc_firmware_state > 1) {
4452 		sc->sc_firmware_state = 1;
4453 	}
4454 	ivp->phy_ctxt = NULL;
4455 	if (sc->sc_firmware_state > 0) {
4456 		error = iwm_mac_ctxt_changed(sc, vap);
4457 		if (error) {
4458 			device_printf(sc->sc_dev,
4459 			    "%s: Failed to change mac context: %d\n",
4460 			    __func__, error);
4461 		}
4462 	}
4463 	if (sc->sc_firmware_state > 0) {
4464 		error = iwm_power_update_mac(sc);
4465 		if (error != 0) {
4466 			device_printf(sc->sc_dev,
4467 			    "%s: failed to update power management\n",
4468 			    __func__);
4469 		}
4470 	}
4471 	sc->sc_firmware_state = 0;
4472 }
4473 
4474 static int
iwm_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4475 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4476 {
4477 	struct iwm_vap *ivp = IWM_VAP(vap);
4478 	struct ieee80211com *ic = vap->iv_ic;
4479 	struct iwm_softc *sc = ic->ic_softc;
4480 	struct iwm_node *in;
4481 	int error;
4482 
4483 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4484 	    "switching state %s -> %s arg=0x%x\n",
4485 	    ieee80211_state_name[vap->iv_state],
4486 	    ieee80211_state_name[nstate],
4487 	    arg);
4488 
4489 	IEEE80211_UNLOCK(ic);
4490 	IWM_LOCK(sc);
4491 
4492 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4493 	    (nstate == IEEE80211_S_AUTH ||
4494 	     nstate == IEEE80211_S_ASSOC ||
4495 	     nstate == IEEE80211_S_RUN)) {
4496 		/* Stop blinking for a scan, when authenticating. */
4497 		iwm_led_blink_stop(sc);
4498 	}
4499 
4500 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4501 		iwm_led_disable(sc);
4502 		/* disable beacon filtering if we're hopping out of RUN */
4503 		iwm_disable_beacon_filter(sc);
4504 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4505 			in->in_assoc = 0;
4506 	}
4507 
4508 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4509 	     vap->iv_state == IEEE80211_S_ASSOC ||
4510 	     vap->iv_state == IEEE80211_S_RUN) &&
4511 	    (nstate == IEEE80211_S_INIT ||
4512 	     nstate == IEEE80211_S_SCAN ||
4513 	     nstate == IEEE80211_S_AUTH)) {
4514 		iwm_stop_session_protection(sc, ivp);
4515 	}
4516 
4517 	if ((vap->iv_state == IEEE80211_S_RUN ||
4518 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4519 	    nstate == IEEE80211_S_INIT) {
4520 		/*
4521 		 * In this case, iv_newstate() wants to send an 80211 frame on
4522 		 * the network that we are leaving. So we need to call it,
4523 		 * before tearing down all the firmware state.
4524 		 */
4525 		IWM_UNLOCK(sc);
4526 		IEEE80211_LOCK(ic);
4527 		ivp->iv_newstate(vap, nstate, arg);
4528 		IEEE80211_UNLOCK(ic);
4529 		IWM_LOCK(sc);
4530 		iwm_bring_down_firmware(sc, vap);
4531 		IWM_UNLOCK(sc);
4532 		IEEE80211_LOCK(ic);
4533 		return 0;
4534 	}
4535 
4536 	switch (nstate) {
4537 	case IEEE80211_S_INIT:
4538 	case IEEE80211_S_SCAN:
4539 		break;
4540 
4541 	case IEEE80211_S_AUTH:
4542 		iwm_bring_down_firmware(sc, vap);
4543 		if ((error = iwm_auth(vap, sc)) != 0) {
4544 			device_printf(sc->sc_dev,
4545 			    "%s: could not move to auth state: %d\n",
4546 			    __func__, error);
4547 			iwm_bring_down_firmware(sc, vap);
4548 			IWM_UNLOCK(sc);
4549 			IEEE80211_LOCK(ic);
4550 			return 1;
4551 		}
4552 		break;
4553 
4554 	case IEEE80211_S_ASSOC:
4555 		/*
4556 		 * EBS may be disabled due to previous failures reported by FW.
4557 		 * Reset EBS status here assuming environment has been changed.
4558 		 */
4559 		sc->last_ebs_successful = TRUE;
4560 		break;
4561 
4562 	case IEEE80211_S_RUN:
4563 		in = IWM_NODE(vap->iv_bss);
4564 		/* Update the association state, now we have it all */
4565 		/* (eg associd comes in at this point */
4566 		error = iwm_update_sta(sc, in);
4567 		if (error != 0) {
4568 			device_printf(sc->sc_dev,
4569 			    "%s: failed to update STA\n", __func__);
4570 			IWM_UNLOCK(sc);
4571 			IEEE80211_LOCK(ic);
4572 			return error;
4573 		}
4574 		in->in_assoc = 1;
4575 		error = iwm_mac_ctxt_changed(sc, vap);
4576 		if (error != 0) {
4577 			device_printf(sc->sc_dev,
4578 			    "%s: failed to update MAC: %d\n", __func__, error);
4579 		}
4580 
4581 		iwm_sf_update(sc, vap, FALSE);
4582 		iwm_enable_beacon_filter(sc, ivp);
4583 		iwm_power_update_mac(sc);
4584 		iwm_update_quotas(sc, ivp);
4585 		ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4586 		iwm_setrates(sc, in,
4587 		    ieee80211_node_get_txrate_dot11rate(&in->in_ni));
4588 
4589 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4590 			device_printf(sc->sc_dev,
4591 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4592 		}
4593 
4594 		iwm_led_enable(sc);
4595 		break;
4596 
4597 	default:
4598 		break;
4599 	}
4600 	IWM_UNLOCK(sc);
4601 	IEEE80211_LOCK(ic);
4602 
4603 	return (ivp->iv_newstate(vap, nstate, arg));
4604 }
4605 
4606 void
iwm_endscan_cb(void * arg,int pending)4607 iwm_endscan_cb(void *arg, int pending)
4608 {
4609 	struct iwm_softc *sc = arg;
4610 	struct ieee80211com *ic = &sc->sc_ic;
4611 
4612 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4613 	    "%s: scan ended\n",
4614 	    __func__);
4615 
4616 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4617 }
4618 
4619 static int
iwm_send_bt_init_conf(struct iwm_softc * sc)4620 iwm_send_bt_init_conf(struct iwm_softc *sc)
4621 {
4622 	struct iwm_bt_coex_cmd bt_cmd;
4623 
4624 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4625 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4626 
4627 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4628 	    &bt_cmd);
4629 }
4630 
4631 static boolean_t
iwm_is_lar_supported(struct iwm_softc * sc)4632 iwm_is_lar_supported(struct iwm_softc *sc)
4633 {
4634 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4635 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4636 
4637 	if (iwm_lar_disable)
4638 		return FALSE;
4639 
4640 	/*
4641 	 * Enable LAR only if it is supported by the FW (TLV) &&
4642 	 * enabled in the NVM
4643 	 */
4644 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4645 		return nvm_lar && tlv_lar;
4646 	else
4647 		return tlv_lar;
4648 }
4649 
4650 static boolean_t
iwm_is_wifi_mcc_supported(struct iwm_softc * sc)4651 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4652 {
4653 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4654 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4655 }
4656 
4657 static int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)4658 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4659 {
4660 	struct iwm_mcc_update_cmd mcc_cmd;
4661 	struct iwm_host_cmd hcmd = {
4662 		.id = IWM_MCC_UPDATE_CMD,
4663 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4664 		.data = { &mcc_cmd },
4665 	};
4666 	int ret;
4667 #ifdef IWM_DEBUG
4668 	struct iwm_rx_packet *pkt;
4669 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4670 	struct iwm_mcc_update_resp_v2 *mcc_resp;
4671 	int n_channels;
4672 	uint16_t mcc;
4673 #endif
4674 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4675 
4676 	if (!iwm_is_lar_supported(sc)) {
4677 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4678 		    __func__);
4679 		return 0;
4680 	}
4681 
4682 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4683 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4684 	if (iwm_is_wifi_mcc_supported(sc))
4685 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4686 	else
4687 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4688 
4689 	if (resp_v2)
4690 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4691 	else
4692 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4693 
4694 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4695 	    "send MCC update to FW with '%c%c' src = %d\n",
4696 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4697 
4698 	ret = iwm_send_cmd(sc, &hcmd);
4699 	if (ret)
4700 		return ret;
4701 
4702 #ifdef IWM_DEBUG
4703 	pkt = hcmd.resp_pkt;
4704 
4705 	/* Extract MCC response */
4706 	if (resp_v2) {
4707 		mcc_resp = (void *)pkt->data;
4708 		mcc = mcc_resp->mcc;
4709 		n_channels =  le32toh(mcc_resp->n_channels);
4710 	} else {
4711 		mcc_resp_v1 = (void *)pkt->data;
4712 		mcc = mcc_resp_v1->mcc;
4713 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4714 	}
4715 
4716 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4717 	if (mcc == 0)
4718 		mcc = 0x3030;  /* "00" - world */
4719 
4720 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4721 	    "regulatory domain '%c%c' (%d channels available)\n",
4722 	    mcc >> 8, mcc & 0xff, n_channels);
4723 #endif
4724 	iwm_free_resp(sc, &hcmd);
4725 
4726 	return 0;
4727 }
4728 
4729 static void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)4730 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4731 {
4732 	struct iwm_host_cmd cmd = {
4733 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4734 		.len = { sizeof(uint32_t), },
4735 		.data = { &backoff, },
4736 	};
4737 
4738 	if (iwm_send_cmd(sc, &cmd) != 0) {
4739 		device_printf(sc->sc_dev,
4740 		    "failed to change thermal tx backoff\n");
4741 	}
4742 }
4743 
4744 static int
iwm_init_hw(struct iwm_softc * sc)4745 iwm_init_hw(struct iwm_softc *sc)
4746 {
4747 	struct ieee80211com *ic = &sc->sc_ic;
4748 	int error, i, ac;
4749 
4750 	sc->sf_state = IWM_SF_UNINIT;
4751 
4752 	if ((error = iwm_start_hw(sc)) != 0) {
4753 		printf("iwm_start_hw: failed %d\n", error);
4754 		return error;
4755 	}
4756 
4757 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4758 		printf("iwm_run_init_ucode: failed %d\n", error);
4759 		return error;
4760 	}
4761 
4762 	/*
4763 	 * should stop and start HW since that INIT
4764 	 * image just loaded
4765 	 */
4766 	iwm_stop_device(sc);
4767 	sc->sc_ps_disabled = FALSE;
4768 	if ((error = iwm_start_hw(sc)) != 0) {
4769 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4770 		return error;
4771 	}
4772 
4773 	/* omstart, this time with the regular firmware */
4774 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4775 	if (error) {
4776 		device_printf(sc->sc_dev, "could not load firmware\n");
4777 		goto error;
4778 	}
4779 
4780 	error = iwm_sf_update(sc, NULL, FALSE);
4781 	if (error)
4782 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4783 
4784 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4785 		device_printf(sc->sc_dev, "bt init conf failed\n");
4786 		goto error;
4787 	}
4788 
4789 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4790 	if (error != 0) {
4791 		device_printf(sc->sc_dev, "antenna config failed\n");
4792 		goto error;
4793 	}
4794 
4795 	/* Send phy db control command and then phy db calibration */
4796 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4797 		goto error;
4798 
4799 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4800 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4801 		goto error;
4802 	}
4803 
4804 	/* Add auxiliary station for scanning */
4805 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4806 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4807 		goto error;
4808 	}
4809 
4810 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4811 		/*
4812 		 * The channel used here isn't relevant as it's
4813 		 * going to be overwritten in the other flows.
4814 		 * For now use the first channel we have.
4815 		 */
4816 		if ((error = iwm_phy_ctxt_add(sc,
4817 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4818 			goto error;
4819 	}
4820 
4821 	/* Initialize tx backoffs to the minimum. */
4822 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4823 		iwm_tt_tx_backoff(sc, 0);
4824 
4825 	if (iwm_config_ltr(sc) != 0)
4826 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4827 
4828 	error = iwm_power_update_device(sc);
4829 	if (error)
4830 		goto error;
4831 
4832 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4833 		goto error;
4834 
4835 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4836 		if ((error = iwm_config_umac_scan(sc)) != 0)
4837 			goto error;
4838 	}
4839 
4840 	/* Enable Tx queues. */
4841 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4842 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4843 		    iwm_ac_to_tx_fifo[ac]);
4844 		if (error)
4845 			goto error;
4846 	}
4847 
4848 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4849 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4850 		goto error;
4851 	}
4852 
4853 	return 0;
4854 
4855  error:
4856 	iwm_stop_device(sc);
4857 	return error;
4858 }
4859 
4860 /* Allow multicast from our BSSID. */
4861 static int
iwm_allow_mcast(struct ieee80211vap * vap,struct iwm_softc * sc)4862 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4863 {
4864 	struct ieee80211_node *ni = vap->iv_bss;
4865 	struct iwm_mcast_filter_cmd *cmd;
4866 	size_t size;
4867 	int error;
4868 
4869 	size = roundup(sizeof(*cmd), 4);
4870 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4871 	if (cmd == NULL)
4872 		return ENOMEM;
4873 	cmd->filter_own = 1;
4874 	cmd->port_id = 0;
4875 	cmd->count = 0;
4876 	cmd->pass_all = 1;
4877 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4878 
4879 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4880 	    IWM_CMD_SYNC, size, cmd);
4881 	free(cmd, M_DEVBUF);
4882 
4883 	return (error);
4884 }
4885 
4886 /*
4887  * ifnet interfaces
4888  */
4889 
4890 static void
iwm_init(struct iwm_softc * sc)4891 iwm_init(struct iwm_softc *sc)
4892 {
4893 	int error;
4894 
4895 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4896 		return;
4897 	}
4898 	sc->sc_generation++;
4899 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4900 
4901 	if ((error = iwm_init_hw(sc)) != 0) {
4902 		printf("iwm_init_hw failed %d\n", error);
4903 		iwm_stop(sc);
4904 		return;
4905 	}
4906 
4907 	/*
4908 	 * Ok, firmware loaded and we are jogging
4909 	 */
4910 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4911 }
4912 
4913 static int
iwm_transmit(struct ieee80211com * ic,struct mbuf * m)4914 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4915 {
4916 	struct iwm_softc *sc;
4917 	int error;
4918 
4919 	sc = ic->ic_softc;
4920 
4921 	IWM_LOCK(sc);
4922 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4923 		IWM_UNLOCK(sc);
4924 		return (ENXIO);
4925 	}
4926 	error = mbufq_enqueue(&sc->sc_snd, m);
4927 	if (error) {
4928 		IWM_UNLOCK(sc);
4929 		return (error);
4930 	}
4931 	iwm_start(sc);
4932 	IWM_UNLOCK(sc);
4933 	return (0);
4934 }
4935 
4936 /*
4937  * Dequeue packets from sendq and call send.
4938  */
4939 static void
iwm_start(struct iwm_softc * sc)4940 iwm_start(struct iwm_softc *sc)
4941 {
4942 	struct ieee80211_node *ni;
4943 	struct mbuf *m;
4944 	int ac = 0;
4945 
4946 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4947 	while (sc->qfullmsk == 0 &&
4948 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4949 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4950 		if (iwm_tx(sc, m, ni, ac) != 0) {
4951 			if_inc_counter(ni->ni_vap->iv_ifp,
4952 			    IFCOUNTER_OERRORS, 1);
4953 			ieee80211_free_node(ni);
4954 			continue;
4955 		}
4956 		if (sc->sc_tx_timer == 0) {
4957 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4958 			    sc);
4959 		}
4960 		sc->sc_tx_timer = 15;
4961 	}
4962 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4963 }
4964 
4965 static void
iwm_stop(struct iwm_softc * sc)4966 iwm_stop(struct iwm_softc *sc)
4967 {
4968 
4969 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4970 	sc->sc_flags |= IWM_FLAG_STOPPED;
4971 	sc->sc_generation++;
4972 	iwm_led_blink_stop(sc);
4973 	sc->sc_tx_timer = 0;
4974 	iwm_stop_device(sc);
4975 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4976 }
4977 
4978 static void
iwm_watchdog(void * arg)4979 iwm_watchdog(void *arg)
4980 {
4981 	struct iwm_softc *sc = arg;
4982 	struct ieee80211com *ic = &sc->sc_ic;
4983 
4984 	if (sc->sc_attached == 0)
4985 		return;
4986 
4987 	if (sc->sc_tx_timer > 0) {
4988 		if (--sc->sc_tx_timer == 0) {
4989 			device_printf(sc->sc_dev, "device timeout\n");
4990 #ifdef IWM_DEBUG
4991 			iwm_nic_error(sc);
4992 #endif
4993 			ieee80211_restart_all(ic);
4994 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4995 			return;
4996 		}
4997 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4998 	}
4999 }
5000 
5001 static void
iwm_parent(struct ieee80211com * ic)5002 iwm_parent(struct ieee80211com *ic)
5003 {
5004 	struct iwm_softc *sc = ic->ic_softc;
5005 	int startall = 0;
5006 	int rfkill = 0;
5007 
5008 	IWM_LOCK(sc);
5009 	if (ic->ic_nrunning > 0) {
5010 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5011 			iwm_init(sc);
5012 			rfkill = iwm_check_rfkill(sc);
5013 			if (!rfkill)
5014 				startall = 1;
5015 		}
5016 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5017 		iwm_stop(sc);
5018 	IWM_UNLOCK(sc);
5019 	if (startall)
5020 		ieee80211_start_all(ic);
5021 	else if (rfkill)
5022 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5023 }
5024 
5025 static void
iwm_rftoggle_task(void * arg,int npending __unused)5026 iwm_rftoggle_task(void *arg, int npending __unused)
5027 {
5028 	struct iwm_softc *sc = arg;
5029 	struct ieee80211com *ic = &sc->sc_ic;
5030 	int rfkill;
5031 
5032 	IWM_LOCK(sc);
5033 	rfkill = iwm_check_rfkill(sc);
5034 	IWM_UNLOCK(sc);
5035 	if (rfkill) {
5036 		device_printf(sc->sc_dev,
5037 		    "%s: rfkill switch, disabling interface\n", __func__);
5038 		ieee80211_suspend_all(ic);
5039 		ieee80211_notify_radio(ic, 0);
5040 	} else {
5041 		device_printf(sc->sc_dev,
5042 		    "%s: rfkill cleared, re-enabling interface\n", __func__);
5043 		ieee80211_resume_all(ic);
5044 		ieee80211_notify_radio(ic, 1);
5045 	}
5046 }
5047 
5048 /*
5049  * The interrupt side of things
5050  */
5051 
5052 /*
5053  * error dumping routines are from iwlwifi/mvm/utils.c
5054  */
5055 
5056 /*
5057  * Note: This structure is read from the device with IO accesses,
5058  * and the reading already does the endian conversion. As it is
5059  * read with uint32_t-sized accesses, any members with a different size
5060  * need to be ordered correctly though!
5061  */
5062 struct iwm_error_event_table {
5063 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5064 	uint32_t error_id;		/* type of error */
5065 	uint32_t trm_hw_status0;	/* TRM HW status */
5066 	uint32_t trm_hw_status1;	/* TRM HW status */
5067 	uint32_t blink2;		/* branch link */
5068 	uint32_t ilink1;		/* interrupt link */
5069 	uint32_t ilink2;		/* interrupt link */
5070 	uint32_t data1;		/* error-specific data */
5071 	uint32_t data2;		/* error-specific data */
5072 	uint32_t data3;		/* error-specific data */
5073 	uint32_t bcon_time;		/* beacon timer */
5074 	uint32_t tsf_low;		/* network timestamp function timer */
5075 	uint32_t tsf_hi;		/* network timestamp function timer */
5076 	uint32_t gp1;		/* GP1 timer register */
5077 	uint32_t gp2;		/* GP2 timer register */
5078 	uint32_t fw_rev_type;	/* firmware revision type */
5079 	uint32_t major;		/* uCode version major */
5080 	uint32_t minor;		/* uCode version minor */
5081 	uint32_t hw_ver;		/* HW Silicon version */
5082 	uint32_t brd_ver;		/* HW board version */
5083 	uint32_t log_pc;		/* log program counter */
5084 	uint32_t frame_ptr;		/* frame pointer */
5085 	uint32_t stack_ptr;		/* stack pointer */
5086 	uint32_t hcmd;		/* last host command header */
5087 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5088 				 * rxtx_flag */
5089 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5090 				 * host_flag */
5091 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5092 				 * enc_flag */
5093 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5094 				 * time_flag */
5095 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5096 				 * wico interrupt */
5097 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5098 	uint32_t wait_event;		/* wait event() caller address */
5099 	uint32_t l2p_control;	/* L2pControlField */
5100 	uint32_t l2p_duration;	/* L2pDurationField */
5101 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5102 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5103 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5104 				 * (LMPM_PMG_SEL) */
5105 	uint32_t u_timestamp;	/* indicate when the date and time of the
5106 				 * compilation */
5107 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5108 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5109 
5110 /*
5111  * UMAC error struct - relevant starting from family 8000 chip.
5112  * Note: This structure is read from the device with IO accesses,
5113  * and the reading already does the endian conversion. As it is
5114  * read with u32-sized accesses, any members with a different size
5115  * need to be ordered correctly though!
5116  */
5117 struct iwm_umac_error_event_table {
5118 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5119 	uint32_t error_id;	/* type of error */
5120 	uint32_t blink1;	/* branch link */
5121 	uint32_t blink2;	/* branch link */
5122 	uint32_t ilink1;	/* interrupt link */
5123 	uint32_t ilink2;	/* interrupt link */
5124 	uint32_t data1;		/* error-specific data */
5125 	uint32_t data2;		/* error-specific data */
5126 	uint32_t data3;		/* error-specific data */
5127 	uint32_t umac_major;
5128 	uint32_t umac_minor;
5129 	uint32_t frame_pointer;	/* core register 27*/
5130 	uint32_t stack_pointer;	/* core register 28 */
5131 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5132 	uint32_t nic_isr_pref;	/* ISR status register */
5133 } __packed;
5134 
5135 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5136 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5137 
5138 #ifdef IWM_DEBUG
5139 struct {
5140 	const char *name;
5141 	uint8_t num;
5142 } advanced_lookup[] = {
5143 	{ "NMI_INTERRUPT_WDG", 0x34 },
5144 	{ "SYSASSERT", 0x35 },
5145 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5146 	{ "BAD_COMMAND", 0x38 },
5147 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5148 	{ "FATAL_ERROR", 0x3D },
5149 	{ "NMI_TRM_HW_ERR", 0x46 },
5150 	{ "NMI_INTERRUPT_TRM", 0x4C },
5151 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5152 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5153 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5154 	{ "NMI_INTERRUPT_HOST", 0x66 },
5155 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5156 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5157 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5158 	{ "ADVANCED_SYSASSERT", 0 },
5159 };
5160 
5161 static const char *
iwm_desc_lookup(uint32_t num)5162 iwm_desc_lookup(uint32_t num)
5163 {
5164 	int i;
5165 
5166 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5167 		if (advanced_lookup[i].num == num)
5168 			return advanced_lookup[i].name;
5169 
5170 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5171 	return advanced_lookup[i].name;
5172 }
5173 
5174 static void
iwm_nic_umac_error(struct iwm_softc * sc)5175 iwm_nic_umac_error(struct iwm_softc *sc)
5176 {
5177 	struct iwm_umac_error_event_table table;
5178 	uint32_t base;
5179 
5180 	base = sc->umac_error_event_table;
5181 
5182 	if (base < 0x800000) {
5183 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5184 		    base);
5185 		return;
5186 	}
5187 
5188 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5189 		device_printf(sc->sc_dev, "reading errlog failed\n");
5190 		return;
5191 	}
5192 
5193 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5194 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5195 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5196 		    sc->sc_flags, table.valid);
5197 	}
5198 
5199 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5200 		iwm_desc_lookup(table.error_id));
5201 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5202 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5203 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5204 	    table.ilink1);
5205 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5206 	    table.ilink2);
5207 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5208 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5209 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5210 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5211 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5212 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5213 	    table.frame_pointer);
5214 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5215 	    table.stack_pointer);
5216 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5217 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5218 	    table.nic_isr_pref);
5219 }
5220 
5221 /*
5222  * Support for dumping the error log seemed like a good idea ...
5223  * but it's mostly hex junk and the only sensible thing is the
5224  * hw/ucode revision (which we know anyway).  Since it's here,
5225  * I'll just leave it in, just in case e.g. the Intel guys want to
5226  * help us decipher some "ADVANCED_SYSASSERT" later.
5227  */
5228 static void
iwm_nic_error(struct iwm_softc * sc)5229 iwm_nic_error(struct iwm_softc *sc)
5230 {
5231 	struct iwm_error_event_table table;
5232 	uint32_t base;
5233 
5234 	device_printf(sc->sc_dev, "dumping device error log\n");
5235 	base = sc->error_event_table[0];
5236 	if (base < 0x800000) {
5237 		device_printf(sc->sc_dev,
5238 		    "Invalid error log pointer 0x%08x\n", base);
5239 		return;
5240 	}
5241 
5242 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5243 		device_printf(sc->sc_dev, "reading errlog failed\n");
5244 		return;
5245 	}
5246 
5247 	if (!table.valid) {
5248 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5249 		return;
5250 	}
5251 
5252 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5253 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5254 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5255 		    sc->sc_flags, table.valid);
5256 	}
5257 
5258 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5259 	    iwm_desc_lookup(table.error_id));
5260 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5261 	    table.trm_hw_status0);
5262 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5263 	    table.trm_hw_status1);
5264 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5265 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5266 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5267 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5268 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5269 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5270 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5271 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5272 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5273 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5274 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5275 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5276 	    table.fw_rev_type);
5277 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5278 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5279 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5280 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5281 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5282 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5283 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5284 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5285 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5286 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5287 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5288 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5289 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5290 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5291 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5292 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5293 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5294 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5295 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5296 
5297 	if (sc->umac_error_event_table)
5298 		iwm_nic_umac_error(sc);
5299 }
5300 #endif
5301 
5302 static void
iwm_handle_rxb(struct iwm_softc * sc,struct mbuf * m)5303 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5304 {
5305 	struct ieee80211com *ic = &sc->sc_ic;
5306 	struct iwm_cmd_response *cresp;
5307 	struct mbuf *m1;
5308 	uint32_t offset = 0;
5309 	uint32_t maxoff = IWM_RBUF_SIZE;
5310 	uint32_t nextoff;
5311 	boolean_t stolen = FALSE;
5312 
5313 #define HAVEROOM(a)	\
5314     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5315 
5316 	while (HAVEROOM(offset)) {
5317 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5318 		    offset);
5319 		int qid, idx, code, len;
5320 
5321 		qid = pkt->hdr.qid;
5322 		idx = pkt->hdr.idx;
5323 
5324 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5325 
5326 		/*
5327 		 * randomly get these from the firmware, no idea why.
5328 		 * they at least seem harmless, so just ignore them for now
5329 		 */
5330 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5331 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5332 			break;
5333 		}
5334 
5335 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5336 		    "rx packet qid=%d idx=%d type=%x\n",
5337 		    qid & ~0x80, pkt->hdr.idx, code);
5338 
5339 		len = iwm_rx_packet_len(pkt);
5340 		len += sizeof(uint32_t); /* account for status word */
5341 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5342 
5343 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5344 
5345 		switch (code) {
5346 		case IWM_REPLY_RX_PHY_CMD:
5347 			iwm_rx_rx_phy_cmd(sc, pkt);
5348 			break;
5349 
5350 		case IWM_REPLY_RX_MPDU_CMD: {
5351 			/*
5352 			 * If this is the last frame in the RX buffer, we
5353 			 * can directly feed the mbuf to the sharks here.
5354 			 */
5355 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5356 			    struct iwm_rx_packet *, nextoff);
5357 			if (!HAVEROOM(nextoff) ||
5358 			    (nextpkt->hdr.code == 0 &&
5359 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5360 			     nextpkt->hdr.idx == 0) ||
5361 			    (nextpkt->len_n_flags ==
5362 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5363 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5364 					stolen = FALSE;
5365 					/* Make sure we abort the loop */
5366 					nextoff = maxoff;
5367 				}
5368 				break;
5369 			}
5370 
5371 			/*
5372 			 * Use m_copym instead of m_split, because that
5373 			 * makes it easier to keep a valid rx buffer in
5374 			 * the ring, when iwm_rx_mpdu() fails.
5375 			 *
5376 			 * We need to start m_copym() at offset 0, to get the
5377 			 * M_PKTHDR flag preserved.
5378 			 */
5379 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5380 			if (m1) {
5381 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5382 					stolen = TRUE;
5383 				else
5384 					m_freem(m1);
5385 			}
5386 			break;
5387 		}
5388 
5389 		case IWM_TX_CMD:
5390 			iwm_rx_tx_cmd(sc, pkt);
5391 			break;
5392 
5393 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5394 			struct iwm_missed_beacons_notif *resp;
5395 			int missed;
5396 
5397 			/* XXX look at mac_id to determine interface ID */
5398 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5399 
5400 			resp = (void *)pkt->data;
5401 			missed = le32toh(resp->consec_missed_beacons);
5402 
5403 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5404 			    "%s: MISSED_BEACON: mac_id=%d, "
5405 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5406 			    "num_rx=%d\n",
5407 			    __func__,
5408 			    le32toh(resp->mac_id),
5409 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5410 			    le32toh(resp->consec_missed_beacons),
5411 			    le32toh(resp->num_expected_beacons),
5412 			    le32toh(resp->num_recvd_beacons));
5413 
5414 			/* Be paranoid */
5415 			if (vap == NULL)
5416 				break;
5417 
5418 			/* XXX no net80211 locking? */
5419 			if (vap->iv_state == IEEE80211_S_RUN &&
5420 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5421 				if (missed > vap->iv_bmissthreshold) {
5422 					/* XXX bad locking; turn into task */
5423 					IWM_UNLOCK(sc);
5424 					ieee80211_beacon_miss(ic);
5425 					IWM_LOCK(sc);
5426 				}
5427 			}
5428 
5429 			break;
5430 		}
5431 
5432 		case IWM_MFUART_LOAD_NOTIFICATION:
5433 			break;
5434 
5435 		case IWM_ALIVE:
5436 			break;
5437 
5438 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5439 			break;
5440 
5441 		case IWM_STATISTICS_NOTIFICATION:
5442 			iwm_handle_rx_statistics(sc, pkt);
5443 			break;
5444 
5445 		case IWM_NVM_ACCESS_CMD:
5446 		case IWM_MCC_UPDATE_CMD:
5447 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5448 				memcpy(sc->sc_cmd_resp,
5449 				    pkt, sizeof(sc->sc_cmd_resp));
5450 			}
5451 			break;
5452 
5453 		case IWM_MCC_CHUB_UPDATE_CMD: {
5454 			struct iwm_mcc_chub_notif *notif;
5455 			notif = (void *)pkt->data;
5456 
5457 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5458 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5459 			sc->sc_fw_mcc[2] = '\0';
5460 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5461 			    "fw source %d sent CC '%s'\n",
5462 			    notif->source_id, sc->sc_fw_mcc);
5463 			break;
5464 		}
5465 
5466 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5467 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5468 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5469 			struct iwm_dts_measurement_notif_v1 *notif;
5470 
5471 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5472 				device_printf(sc->sc_dev,
5473 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5474 				break;
5475 			}
5476 			notif = (void *)pkt->data;
5477 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5478 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5479 			    notif->temp);
5480 			break;
5481 		}
5482 
5483 		case IWM_PHY_CONFIGURATION_CMD:
5484 		case IWM_TX_ANT_CONFIGURATION_CMD:
5485 		case IWM_ADD_STA:
5486 		case IWM_MAC_CONTEXT_CMD:
5487 		case IWM_REPLY_SF_CFG_CMD:
5488 		case IWM_POWER_TABLE_CMD:
5489 		case IWM_LTR_CONFIG:
5490 		case IWM_PHY_CONTEXT_CMD:
5491 		case IWM_BINDING_CONTEXT_CMD:
5492 		case IWM_TIME_EVENT_CMD:
5493 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5494 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5495 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5496 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5497 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5498 		case IWM_REPLY_BEACON_FILTERING_CMD:
5499 		case IWM_MAC_PM_POWER_TABLE:
5500 		case IWM_TIME_QUOTA_CMD:
5501 		case IWM_REMOVE_STA:
5502 		case IWM_TXPATH_FLUSH:
5503 		case IWM_LQ_CMD:
5504 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5505 				 IWM_FW_PAGING_BLOCK_CMD):
5506 		case IWM_BT_CONFIG:
5507 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5508 			cresp = (void *)pkt->data;
5509 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5510 				memcpy(sc->sc_cmd_resp,
5511 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5512 			}
5513 			break;
5514 
5515 		/* ignore */
5516 		case IWM_PHY_DB_CMD:
5517 			break;
5518 
5519 		case IWM_INIT_COMPLETE_NOTIF:
5520 			break;
5521 
5522 		case IWM_SCAN_OFFLOAD_COMPLETE:
5523 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5524 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5525 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5526 				ieee80211_runtask(ic, &sc->sc_es_task);
5527 			}
5528 			break;
5529 
5530 		case IWM_SCAN_ITERATION_COMPLETE: {
5531 			break;
5532 		}
5533 
5534 		case IWM_SCAN_COMPLETE_UMAC:
5535 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5536 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5537 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5538 				ieee80211_runtask(ic, &sc->sc_es_task);
5539 			}
5540 			break;
5541 
5542 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5543 #ifdef IWM_DEBUG
5544 			struct iwm_umac_scan_iter_complete_notif *notif;
5545 			notif = (void *)pkt->data;
5546 
5547 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5548 			    "complete, status=0x%x, %d channels scanned\n",
5549 			    notif->status, notif->scanned_channels);
5550 #endif
5551 			break;
5552 		}
5553 
5554 		case IWM_REPLY_ERROR: {
5555 			struct iwm_error_resp *resp;
5556 			resp = (void *)pkt->data;
5557 
5558 			device_printf(sc->sc_dev,
5559 			    "firmware error 0x%x, cmd 0x%x\n",
5560 			    le32toh(resp->error_type),
5561 			    resp->cmd_id);
5562 			break;
5563 		}
5564 
5565 		case IWM_TIME_EVENT_NOTIFICATION:
5566 			iwm_rx_time_event_notif(sc, pkt);
5567 			break;
5568 
5569 		/*
5570 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5571 		 * messages. Just ignore them for now.
5572 		 */
5573 		case IWM_DEBUG_LOG_MSG:
5574 			break;
5575 
5576 		case IWM_MCAST_FILTER_CMD:
5577 			break;
5578 
5579 		case IWM_SCD_QUEUE_CFG: {
5580 #ifdef IWM_DEBUG
5581 			struct iwm_scd_txq_cfg_rsp *rsp;
5582 			rsp = (void *)pkt->data;
5583 
5584 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5585 			    "queue cfg token=0x%x sta_id=%d "
5586 			    "tid=%d scd_queue=%d\n",
5587 			    rsp->token, rsp->sta_id, rsp->tid,
5588 			    rsp->scd_queue);
5589 #endif
5590 			break;
5591 		}
5592 
5593 		default:
5594 			device_printf(sc->sc_dev,
5595 			    "code %x, frame %d/%d %x unhandled\n",
5596 			    code, qid & ~0x80, idx, pkt->len_n_flags);
5597 			break;
5598 		}
5599 
5600 		/*
5601 		 * Why test bit 0x80?  The Linux driver:
5602 		 *
5603 		 * There is one exception:  uCode sets bit 15 when it
5604 		 * originates the response/notification, i.e. when the
5605 		 * response/notification is not a direct response to a
5606 		 * command sent by the driver.  For example, uCode issues
5607 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5608 		 * it is not a direct response to any driver command.
5609 		 *
5610 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5611 		 * uses a slightly different format for pkt->hdr, and "qid"
5612 		 * is actually the upper byte of a two-byte field.
5613 		 */
5614 		if (!(qid & (1 << 7)))
5615 			iwm_cmd_done(sc, pkt);
5616 
5617 		offset = nextoff;
5618 	}
5619 	if (stolen)
5620 		m_freem(m);
5621 #undef HAVEROOM
5622 }
5623 
5624 /*
5625  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5626  * Basic structure from if_iwn
5627  */
5628 static void
iwm_notif_intr(struct iwm_softc * sc)5629 iwm_notif_intr(struct iwm_softc *sc)
5630 {
5631 	int count;
5632 	uint32_t wreg;
5633 	uint16_t hw;
5634 
5635 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5636 	    BUS_DMASYNC_POSTREAD);
5637 
5638 	if (sc->cfg->mqrx_supported) {
5639 		count = IWM_RX_MQ_RING_COUNT;
5640 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5641 	} else {
5642 		count = IWM_RX_LEGACY_RING_COUNT;
5643 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5644 	}
5645 
5646 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5647 
5648 	/*
5649 	 * Process responses
5650 	 */
5651 	while (sc->rxq.cur != hw) {
5652 		struct iwm_rx_ring *ring = &sc->rxq;
5653 		struct iwm_rx_data *data = &ring->data[ring->cur];
5654 
5655 		bus_dmamap_sync(ring->data_dmat, data->map,
5656 		    BUS_DMASYNC_POSTREAD);
5657 
5658 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5659 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5660 		iwm_handle_rxb(sc, data->m);
5661 
5662 		ring->cur = (ring->cur + 1) % count;
5663 	}
5664 
5665 	/*
5666 	 * Tell the firmware that it can reuse the ring entries that
5667 	 * we have just processed.
5668 	 * Seems like the hardware gets upset unless we align
5669 	 * the write by 8??
5670 	 */
5671 	hw = (hw == 0) ? count - 1 : hw - 1;
5672 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5673 }
5674 
5675 static void
iwm_intr(void * arg)5676 iwm_intr(void *arg)
5677 {
5678 	struct iwm_softc *sc = arg;
5679 	int handled = 0;
5680 	int r1, r2;
5681 	int isperiodic = 0;
5682 
5683 	IWM_LOCK(sc);
5684 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5685 
5686 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5687 		uint32_t *ict = sc->ict_dma.vaddr;
5688 		int tmp;
5689 
5690 		tmp = htole32(ict[sc->ict_cur]);
5691 		if (!tmp)
5692 			goto out_ena;
5693 
5694 		/*
5695 		 * ok, there was something.  keep plowing until we have all.
5696 		 */
5697 		r1 = r2 = 0;
5698 		while (tmp) {
5699 			r1 |= tmp;
5700 			ict[sc->ict_cur] = 0;
5701 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5702 			tmp = htole32(ict[sc->ict_cur]);
5703 		}
5704 
5705 		/* this is where the fun begins.  don't ask */
5706 		if (r1 == 0xffffffff)
5707 			r1 = 0;
5708 
5709 		/* i am not expected to understand this */
5710 		if (r1 & 0xc0000)
5711 			r1 |= 0x8000;
5712 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5713 	} else {
5714 		r1 = IWM_READ(sc, IWM_CSR_INT);
5715 		/* "hardware gone" (where, fishing?) */
5716 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5717 			goto out;
5718 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5719 	}
5720 	if (r1 == 0 && r2 == 0) {
5721 		goto out_ena;
5722 	}
5723 
5724 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5725 
5726 	/* Safely ignore these bits for debug checks below */
5727 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5728 
5729 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5730 		int i;
5731 		struct ieee80211com *ic = &sc->sc_ic;
5732 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5733 
5734 #ifdef IWM_DEBUG
5735 		iwm_nic_error(sc);
5736 #endif
5737 		/* Dump driver status (TX and RX rings) while we're here. */
5738 		device_printf(sc->sc_dev, "driver status:\n");
5739 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5740 			struct iwm_tx_ring *ring = &sc->txq[i];
5741 			device_printf(sc->sc_dev,
5742 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5743 			    "queued=%-3d\n",
5744 			    i, ring->qid, ring->cur, ring->queued);
5745 		}
5746 		device_printf(sc->sc_dev,
5747 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5748 		device_printf(sc->sc_dev,
5749 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5750 
5751 		/* Reset our firmware state tracking. */
5752 		sc->sc_firmware_state = 0;
5753 		/* Don't stop the device; just do a VAP restart */
5754 		IWM_UNLOCK(sc);
5755 
5756 		if (vap == NULL) {
5757 			printf("%s: null vap\n", __func__);
5758 			return;
5759 		}
5760 
5761 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5762 		    "restarting\n", __func__, vap->iv_state);
5763 
5764 		ieee80211_restart_all(ic);
5765 		return;
5766 	}
5767 
5768 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5769 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5770 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5771 		iwm_stop(sc);
5772 		goto out;
5773 	}
5774 
5775 	/* firmware chunk loaded */
5776 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5777 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5778 		handled |= IWM_CSR_INT_BIT_FH_TX;
5779 		sc->sc_fw_chunk_done = 1;
5780 		wakeup(&sc->sc_fw);
5781 	}
5782 
5783 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5784 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5785 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5786 	}
5787 
5788 	/*
5789 	 * The Linux driver uses periodic interrupts to avoid races.
5790 	 * We cargo-cult like it's going out of fashion.
5791 	 */
5792 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5793 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5794 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5795 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5796 			IWM_WRITE_1(sc,
5797 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5798 		isperiodic = 1;
5799 	}
5800 
5801 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5802 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5803 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5804 
5805 		iwm_notif_intr(sc);
5806 
5807 		/* enable periodic interrupt, see above */
5808 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5809 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5810 			    IWM_CSR_INT_PERIODIC_ENA);
5811 	}
5812 
5813 	if (__predict_false(r1 & ~handled))
5814 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5815 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5816  out_ena:
5817 	iwm_restore_interrupts(sc);
5818  out:
5819 	IWM_UNLOCK(sc);
5820 	return;
5821 }
5822 
5823 /*
5824  * Autoconf glue-sniffing
5825  */
5826 #define	PCI_VENDOR_INTEL		0x8086
5827 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5828 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5829 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5830 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5831 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5832 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5833 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5834 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5835 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5836 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5837 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5838 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5839 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5840 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5841 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
5842 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5843 
5844 static const struct iwm_devices {
5845 	uint16_t		device;
5846 	const struct iwm_cfg	*cfg;
5847 } iwm_devices[] = {
5848 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5849 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5850 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5851 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5852 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5853 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5854 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5855 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5856 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5857 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5858 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5859 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5860 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5861 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5862 	{ PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5863 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5864 };
5865 
5866 static int
iwm_probe(device_t dev)5867 iwm_probe(device_t dev)
5868 {
5869 	int i;
5870 
5871 	for (i = 0; i < nitems(iwm_devices); i++) {
5872 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5873 		    pci_get_device(dev) == iwm_devices[i].device) {
5874 			device_set_desc(dev, iwm_devices[i].cfg->name);
5875 			return (BUS_PROBE_DEFAULT);
5876 		}
5877 	}
5878 
5879 	return (ENXIO);
5880 }
5881 
5882 static int
iwm_dev_check(device_t dev)5883 iwm_dev_check(device_t dev)
5884 {
5885 	struct iwm_softc *sc;
5886 	uint16_t devid;
5887 	int i;
5888 
5889 	sc = device_get_softc(dev);
5890 
5891 	devid = pci_get_device(dev);
5892 	for (i = 0; i < nitems(iwm_devices); i++) {
5893 		if (iwm_devices[i].device == devid) {
5894 			sc->cfg = iwm_devices[i].cfg;
5895 			return (0);
5896 		}
5897 	}
5898 	device_printf(dev, "unknown adapter type\n");
5899 	return ENXIO;
5900 }
5901 
5902 /* PCI registers */
5903 #define PCI_CFG_RETRY_TIMEOUT	0x041
5904 
5905 static int
iwm_pci_attach(device_t dev)5906 iwm_pci_attach(device_t dev)
5907 {
5908 	struct iwm_softc *sc;
5909 	int count, error, rid;
5910 	uint16_t reg;
5911 
5912 	sc = device_get_softc(dev);
5913 
5914 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5915 	 * PCI Tx retries from interfering with C3 CPU state */
5916 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5917 
5918 	/* Enable bus-mastering and hardware bug workaround. */
5919 	pci_enable_busmaster(dev);
5920 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5921 	/* if !MSI */
5922 	if (reg & PCIM_STATUS_INTxSTATE) {
5923 		reg &= ~PCIM_STATUS_INTxSTATE;
5924 	}
5925 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5926 
5927 	rid = PCIR_BAR(0);
5928 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5929 	    RF_ACTIVE);
5930 	if (sc->sc_mem == NULL) {
5931 		device_printf(sc->sc_dev, "can't map mem space\n");
5932 		return (ENXIO);
5933 	}
5934 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5935 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5936 
5937 	/* Install interrupt handler. */
5938 	count = 1;
5939 	rid = 0;
5940 	if (pci_alloc_msi(dev, &count) == 0)
5941 		rid = 1;
5942 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5943 	    (rid != 0 ? 0 : RF_SHAREABLE));
5944 	if (sc->sc_irq == NULL) {
5945 		device_printf(dev, "can't map interrupt\n");
5946 			return (ENXIO);
5947 	}
5948 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5949 	    NULL, iwm_intr, sc, &sc->sc_ih);
5950 	if (error != 0) {
5951 		device_printf(dev, "can't establish interrupt");
5952 		return (error);
5953 	}
5954 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5955 
5956 	return (0);
5957 }
5958 
5959 static void
iwm_pci_detach(device_t dev)5960 iwm_pci_detach(device_t dev)
5961 {
5962 	struct iwm_softc *sc = device_get_softc(dev);
5963 
5964 	if (sc->sc_irq != NULL) {
5965 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5966 		bus_release_resource(dev, SYS_RES_IRQ,
5967 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5968 		pci_release_msi(dev);
5969         }
5970 	if (sc->sc_mem != NULL)
5971 		bus_release_resource(dev, SYS_RES_MEMORY,
5972 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5973 }
5974 
5975 static int
iwm_attach(device_t dev)5976 iwm_attach(device_t dev)
5977 {
5978 	struct iwm_softc *sc = device_get_softc(dev);
5979 	struct ieee80211com *ic = &sc->sc_ic;
5980 	int error;
5981 	int txq_i, i;
5982 
5983 	sc->sc_dev = dev;
5984 	sc->sc_attached = 1;
5985 	IWM_LOCK_INIT(sc);
5986 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5987 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5988 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5989 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5990 	TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5991 
5992 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5993 	    taskqueue_thread_enqueue, &sc->sc_tq);
5994 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5995 	if (error != 0) {
5996 		device_printf(dev, "can't start taskq thread, error %d\n",
5997 		    error);
5998 		goto fail;
5999 	}
6000 
6001 	error = iwm_dev_check(dev);
6002 	if (error != 0)
6003 		goto fail;
6004 
6005 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6006 	if (sc->sc_notif_wait == NULL) {
6007 		device_printf(dev, "failed to init notification wait struct\n");
6008 		goto fail;
6009 	}
6010 
6011 	sc->sf_state = IWM_SF_UNINIT;
6012 
6013 	/* Init phy db */
6014 	sc->sc_phy_db = iwm_phy_db_init(sc);
6015 	if (!sc->sc_phy_db) {
6016 		device_printf(dev, "Cannot init phy_db\n");
6017 		goto fail;
6018 	}
6019 
6020 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6021 	sc->last_ebs_successful = TRUE;
6022 
6023 	/* PCI attach */
6024 	error = iwm_pci_attach(dev);
6025 	if (error != 0)
6026 		goto fail;
6027 
6028 	sc->sc_wantresp = -1;
6029 
6030 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6031 	/*
6032 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6033 	 * changed, and now the revision step also includes bit 0-1 (no more
6034 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6035 	 * in the old format.
6036 	 */
6037 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6038 		int ret;
6039 		uint32_t hw_step;
6040 
6041 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6042 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6043 
6044 		if (iwm_prepare_card_hw(sc) != 0) {
6045 			device_printf(dev, "could not initialize hardware\n");
6046 			goto fail;
6047 		}
6048 
6049 		/*
6050 		 * In order to recognize C step the driver should read the
6051 		 * chip version id located at the AUX bus MISC address.
6052 		 */
6053 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6054 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6055 		DELAY(2);
6056 
6057 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6058 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6059 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6060 				   25000);
6061 		if (!ret) {
6062 			device_printf(sc->sc_dev,
6063 			    "Failed to wake up the nic\n");
6064 			goto fail;
6065 		}
6066 
6067 		if (iwm_nic_lock(sc)) {
6068 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6069 			hw_step |= IWM_ENABLE_WFPM;
6070 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6071 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6072 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6073 			if (hw_step == 0x3)
6074 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6075 						(IWM_SILICON_C_STEP << 2);
6076 			iwm_nic_unlock(sc);
6077 		} else {
6078 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6079 			goto fail;
6080 		}
6081 	}
6082 
6083 	/* special-case 7265D, it has the same PCI IDs. */
6084 	if (sc->cfg == &iwm7265_cfg &&
6085 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6086 		sc->cfg = &iwm7265d_cfg;
6087 	}
6088 
6089 	/* Allocate DMA memory for firmware transfers. */
6090 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6091 		device_printf(dev, "could not allocate memory for firmware\n");
6092 		goto fail;
6093 	}
6094 
6095 	/* Allocate "Keep Warm" page. */
6096 	if ((error = iwm_alloc_kw(sc)) != 0) {
6097 		device_printf(dev, "could not allocate keep warm page\n");
6098 		goto fail;
6099 	}
6100 
6101 	/* We use ICT interrupts */
6102 	if ((error = iwm_alloc_ict(sc)) != 0) {
6103 		device_printf(dev, "could not allocate ICT table\n");
6104 		goto fail;
6105 	}
6106 
6107 	/* Allocate TX scheduler "rings". */
6108 	if ((error = iwm_alloc_sched(sc)) != 0) {
6109 		device_printf(dev, "could not allocate TX scheduler rings\n");
6110 		goto fail;
6111 	}
6112 
6113 	/* Allocate TX rings */
6114 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6115 		if ((error = iwm_alloc_tx_ring(sc,
6116 		    &sc->txq[txq_i], txq_i)) != 0) {
6117 			device_printf(dev,
6118 			    "could not allocate TX ring %d\n",
6119 			    txq_i);
6120 			goto fail;
6121 		}
6122 	}
6123 
6124 	/* Allocate RX ring. */
6125 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6126 		device_printf(dev, "could not allocate RX ring\n");
6127 		goto fail;
6128 	}
6129 
6130 	/* Clear pending interrupts. */
6131 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6132 
6133 	ic->ic_softc = sc;
6134 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6135 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6136 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6137 
6138 	/* Set device capabilities. */
6139 	ic->ic_caps =
6140 	    IEEE80211_C_STA |
6141 	    IEEE80211_C_WPA |		/* WPA/RSN */
6142 	    IEEE80211_C_WME |
6143 	    IEEE80211_C_PMGT |
6144 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6145 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6146 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6147 	    ;
6148 	/* Advertise full-offload scanning */
6149 	ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
6150 	ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
6151 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6152 		sc->sc_phyctxt[i].id = i;
6153 		sc->sc_phyctxt[i].color = 0;
6154 		sc->sc_phyctxt[i].ref = 0;
6155 		sc->sc_phyctxt[i].channel = NULL;
6156 	}
6157 
6158 	/* Default noise floor */
6159 	sc->sc_noise = -96;
6160 
6161 	/* Max RSSI */
6162 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6163 
6164 #ifdef IWM_DEBUG
6165 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6166 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6167 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6168 #endif
6169 
6170 	error = iwm_read_firmware(sc);
6171 	if (error) {
6172 		goto fail;
6173 	} else if (sc->sc_fw.fw_fp == NULL) {
6174 		/*
6175 		 * XXX Add a solution for properly deferring firmware load
6176 		 *     during bootup.
6177 		 */
6178 		goto fail;
6179 	} else {
6180 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6181 		sc->sc_preinit_hook.ich_arg = sc;
6182 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6183 			device_printf(dev,
6184 			    "config_intrhook_establish failed\n");
6185 			goto fail;
6186 		}
6187 	}
6188 
6189 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6190 	    "<-%s\n", __func__);
6191 
6192 	return 0;
6193 
6194 	/* Free allocated memory if something failed during attachment. */
6195 fail:
6196 	iwm_detach_local(sc, 0);
6197 
6198 	return ENXIO;
6199 }
6200 
6201 static int
iwm_is_valid_ether_addr(uint8_t * addr)6202 iwm_is_valid_ether_addr(uint8_t *addr)
6203 {
6204 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6205 
6206 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6207 		return (FALSE);
6208 
6209 	return (TRUE);
6210 }
6211 
6212 static int
iwm_wme_update(struct ieee80211com * ic)6213 iwm_wme_update(struct ieee80211com *ic)
6214 {
6215 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6216 	struct iwm_softc *sc = ic->ic_softc;
6217 	struct chanAccParams chp;
6218 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6219 	struct iwm_vap *ivp = IWM_VAP(vap);
6220 	struct iwm_node *in;
6221 	struct wmeParams tmp[WME_NUM_AC];
6222 	int aci, error;
6223 
6224 	if (vap == NULL)
6225 		return (0);
6226 
6227 	ieee80211_wme_ic_getparams(ic, &chp);
6228 
6229 	IEEE80211_LOCK(ic);
6230 	for (aci = 0; aci < WME_NUM_AC; aci++)
6231 		tmp[aci] = chp.cap_wmeParams[aci];
6232 	IEEE80211_UNLOCK(ic);
6233 
6234 	IWM_LOCK(sc);
6235 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6236 		const struct wmeParams *ac = &tmp[aci];
6237 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6238 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6239 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6240 		ivp->queue_params[aci].edca_txop =
6241 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6242 	}
6243 	ivp->have_wme = TRUE;
6244 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6245 		in = IWM_NODE(vap->iv_bss);
6246 		if (in->in_assoc) {
6247 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6248 				device_printf(sc->sc_dev,
6249 				    "%s: failed to update MAC\n", __func__);
6250 			}
6251 		}
6252 	}
6253 	IWM_UNLOCK(sc);
6254 
6255 	return (0);
6256 #undef IWM_EXP2
6257 }
6258 
6259 static void
iwm_preinit(void * arg)6260 iwm_preinit(void *arg)
6261 {
6262 	struct iwm_softc *sc = arg;
6263 	device_t dev = sc->sc_dev;
6264 	struct ieee80211com *ic = &sc->sc_ic;
6265 	int error;
6266 
6267 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6268 	    "->%s\n", __func__);
6269 
6270 	IWM_LOCK(sc);
6271 	if ((error = iwm_start_hw(sc)) != 0) {
6272 		device_printf(dev, "could not initialize hardware\n");
6273 		IWM_UNLOCK(sc);
6274 		goto fail;
6275 	}
6276 
6277 	error = iwm_run_init_ucode(sc, 1);
6278 	iwm_stop_device(sc);
6279 	if (error) {
6280 		IWM_UNLOCK(sc);
6281 		goto fail;
6282 	}
6283 	device_printf(dev,
6284 	    "hw rev 0x%x, fw ver %s, address %s\n",
6285 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6286 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6287 
6288 	/* not all hardware can do 5GHz band */
6289 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6290 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6291 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6292 	IWM_UNLOCK(sc);
6293 
6294 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6295 	    ic->ic_channels);
6296 
6297 	/*
6298 	 * At this point we've committed - if we fail to do setup,
6299 	 * we now also have to tear down the net80211 state.
6300 	 */
6301 	ieee80211_ifattach(ic);
6302 	ic->ic_vap_create = iwm_vap_create;
6303 	ic->ic_vap_delete = iwm_vap_delete;
6304 	ic->ic_raw_xmit = iwm_raw_xmit;
6305 	ic->ic_node_alloc = iwm_node_alloc;
6306 	ic->ic_scan_start = iwm_scan_start;
6307 	ic->ic_scan_end = iwm_scan_end;
6308 	ic->ic_update_mcast = iwm_update_mcast;
6309 	ic->ic_getradiocaps = iwm_init_channel_map;
6310 	ic->ic_set_channel = iwm_set_channel;
6311 	ic->ic_scan_curchan = iwm_scan_curchan;
6312 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6313 	ic->ic_wme.wme_update = iwm_wme_update;
6314 	ic->ic_parent = iwm_parent;
6315 	ic->ic_transmit = iwm_transmit;
6316 	iwm_radiotap_attach(sc);
6317 	if (bootverbose)
6318 		ieee80211_announce(ic);
6319 
6320 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6321 	    "<-%s\n", __func__);
6322 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6323 
6324 	return;
6325 fail:
6326 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6327 	iwm_detach_local(sc, 0);
6328 }
6329 
6330 /*
6331  * Attach the interface to 802.11 radiotap.
6332  */
6333 static void
iwm_radiotap_attach(struct iwm_softc * sc)6334 iwm_radiotap_attach(struct iwm_softc *sc)
6335 {
6336         struct ieee80211com *ic = &sc->sc_ic;
6337 
6338 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6339 	    "->%s begin\n", __func__);
6340         ieee80211_radiotap_attach(ic,
6341             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6342                 IWM_TX_RADIOTAP_PRESENT,
6343             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6344                 IWM_RX_RADIOTAP_PRESENT);
6345 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6346 	    "->%s end\n", __func__);
6347 }
6348 
6349 static struct ieee80211vap *
iwm_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])6350 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6351     enum ieee80211_opmode opmode, int flags,
6352     const uint8_t bssid[IEEE80211_ADDR_LEN],
6353     const uint8_t mac[IEEE80211_ADDR_LEN])
6354 {
6355 	struct iwm_vap *ivp;
6356 	struct ieee80211vap *vap;
6357 
6358 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6359 		return NULL;
6360 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6361 	vap = &ivp->iv_vap;
6362 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6363 	vap->iv_bmissthreshold = 10;            /* override default */
6364 	/* Override with driver methods. */
6365 	ivp->iv_newstate = vap->iv_newstate;
6366 	vap->iv_newstate = iwm_newstate;
6367 
6368 	ivp->id = IWM_DEFAULT_MACID;
6369 	ivp->color = IWM_DEFAULT_COLOR;
6370 
6371 	ivp->have_wme = FALSE;
6372 	ivp->ps_disabled = FALSE;
6373 
6374 	ieee80211_ratectl_init(vap);
6375 	/* Complete setup. */
6376 	ieee80211_vap_attach(vap, ieee80211_media_change,
6377 	    ieee80211_media_status, mac);
6378 	ic->ic_opmode = opmode;
6379 
6380 	return vap;
6381 }
6382 
6383 static void
iwm_vap_delete(struct ieee80211vap * vap)6384 iwm_vap_delete(struct ieee80211vap *vap)
6385 {
6386 	struct iwm_vap *ivp = IWM_VAP(vap);
6387 
6388 	ieee80211_ratectl_deinit(vap);
6389 	ieee80211_vap_detach(vap);
6390 	free(ivp, M_80211_VAP);
6391 }
6392 
6393 static void
iwm_xmit_queue_drain(struct iwm_softc * sc)6394 iwm_xmit_queue_drain(struct iwm_softc *sc)
6395 {
6396 	struct mbuf *m;
6397 	struct ieee80211_node *ni;
6398 
6399 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6400 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6401 		ieee80211_free_node(ni);
6402 		m_freem(m);
6403 	}
6404 }
6405 
6406 static void
iwm_scan_start(struct ieee80211com * ic)6407 iwm_scan_start(struct ieee80211com *ic)
6408 {
6409 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6410 	struct iwm_softc *sc = ic->ic_softc;
6411 	int error;
6412 
6413 	IWM_LOCK(sc);
6414 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6415 		/* This should not be possible */
6416 		device_printf(sc->sc_dev,
6417 		    "%s: Previous scan not completed yet\n", __func__);
6418 	}
6419 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6420 		error = iwm_umac_scan(sc);
6421 	else
6422 		error = iwm_lmac_scan(sc);
6423 	if (error != 0) {
6424 		device_printf(sc->sc_dev, "could not initiate scan\n");
6425 		IWM_UNLOCK(sc);
6426 		ieee80211_cancel_scan(vap);
6427 	} else {
6428 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6429 		iwm_led_blink_start(sc);
6430 		IWM_UNLOCK(sc);
6431 	}
6432 }
6433 
6434 static void
iwm_scan_end(struct ieee80211com * ic)6435 iwm_scan_end(struct ieee80211com *ic)
6436 {
6437 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6438 	struct iwm_softc *sc = ic->ic_softc;
6439 
6440 	IWM_LOCK(sc);
6441 	iwm_led_blink_stop(sc);
6442 	if (vap->iv_state == IEEE80211_S_RUN)
6443 		iwm_led_enable(sc);
6444 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6445 		/*
6446 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6447 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6448 		 * taskqueue.
6449 		 */
6450 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6451 		iwm_scan_stop_wait(sc);
6452 	}
6453 	IWM_UNLOCK(sc);
6454 
6455 	/*
6456 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6457 	 * This is to make sure that it won't call ieee80211_scan_done
6458 	 * when we have already started the next scan.
6459 	 */
6460 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6461 }
6462 
6463 static void
iwm_update_mcast(struct ieee80211com * ic)6464 iwm_update_mcast(struct ieee80211com *ic)
6465 {
6466 }
6467 
6468 static void
iwm_set_channel(struct ieee80211com * ic)6469 iwm_set_channel(struct ieee80211com *ic)
6470 {
6471 }
6472 
6473 static void
iwm_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)6474 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6475 {
6476 }
6477 
6478 static void
iwm_scan_mindwell(struct ieee80211_scan_state * ss)6479 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6480 {
6481 }
6482 
6483 void
iwm_init_task(void * arg1)6484 iwm_init_task(void *arg1)
6485 {
6486 	struct iwm_softc *sc = arg1;
6487 
6488 	IWM_LOCK(sc);
6489 	while (sc->sc_flags & IWM_FLAG_BUSY)
6490 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6491 	sc->sc_flags |= IWM_FLAG_BUSY;
6492 	iwm_stop(sc);
6493 	if (sc->sc_ic.ic_nrunning > 0)
6494 		iwm_init(sc);
6495 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6496 	wakeup(&sc->sc_flags);
6497 	IWM_UNLOCK(sc);
6498 }
6499 
6500 static int
iwm_resume(device_t dev)6501 iwm_resume(device_t dev)
6502 {
6503 	struct iwm_softc *sc = device_get_softc(dev);
6504 	int do_reinit = 0;
6505 
6506 	/*
6507 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6508 	 * PCI Tx retries from interfering with C3 CPU state.
6509 	 */
6510 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6511 
6512 	if (!sc->sc_attached)
6513 		return 0;
6514 
6515 	iwm_init_task(device_get_softc(dev));
6516 
6517 	IWM_LOCK(sc);
6518 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6519 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6520 		do_reinit = 1;
6521 	}
6522 	IWM_UNLOCK(sc);
6523 
6524 	if (do_reinit)
6525 		ieee80211_resume_all(&sc->sc_ic);
6526 
6527 	return 0;
6528 }
6529 
6530 static int
iwm_suspend(device_t dev)6531 iwm_suspend(device_t dev)
6532 {
6533 	int do_stop = 0;
6534 	struct iwm_softc *sc = device_get_softc(dev);
6535 
6536 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6537 
6538 	if (!sc->sc_attached)
6539 		return (0);
6540 
6541 	ieee80211_suspend_all(&sc->sc_ic);
6542 
6543 	if (do_stop) {
6544 		IWM_LOCK(sc);
6545 		iwm_stop(sc);
6546 		sc->sc_flags |= IWM_FLAG_SCANNING;
6547 		IWM_UNLOCK(sc);
6548 	}
6549 
6550 	return (0);
6551 }
6552 
6553 static int
iwm_detach_local(struct iwm_softc * sc,int do_net80211)6554 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6555 {
6556 	struct iwm_fw_info *fw = &sc->sc_fw;
6557 	device_t dev = sc->sc_dev;
6558 	int i;
6559 
6560 	if (!sc->sc_attached)
6561 		return 0;
6562 	sc->sc_attached = 0;
6563 	if (do_net80211) {
6564 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6565 	}
6566 	iwm_stop_device(sc);
6567 	taskqueue_drain_all(sc->sc_tq);
6568 	taskqueue_free(sc->sc_tq);
6569 	if (do_net80211) {
6570 		IWM_LOCK(sc);
6571 		iwm_xmit_queue_drain(sc);
6572 		IWM_UNLOCK(sc);
6573 		ieee80211_ifdetach(&sc->sc_ic);
6574 	}
6575 	callout_drain(&sc->sc_led_blink_to);
6576 	callout_drain(&sc->sc_watchdog_to);
6577 
6578 	iwm_phy_db_free(sc->sc_phy_db);
6579 	sc->sc_phy_db = NULL;
6580 
6581 	iwm_free_nvm_data(sc->nvm_data);
6582 
6583 	/* Free descriptor rings */
6584 	iwm_free_rx_ring(sc, &sc->rxq);
6585 	for (i = 0; i < nitems(sc->txq); i++)
6586 		iwm_free_tx_ring(sc, &sc->txq[i]);
6587 
6588 	/* Free firmware */
6589 	if (fw->fw_fp != NULL)
6590 		iwm_fw_info_free(fw);
6591 
6592 	/* Free scheduler */
6593 	iwm_dma_contig_free(&sc->sched_dma);
6594 	iwm_dma_contig_free(&sc->ict_dma);
6595 	iwm_dma_contig_free(&sc->kw_dma);
6596 	iwm_dma_contig_free(&sc->fw_dma);
6597 
6598 	iwm_free_fw_paging(sc);
6599 
6600 	/* Finished with the hardware - detach things */
6601 	iwm_pci_detach(dev);
6602 
6603 	if (sc->sc_notif_wait != NULL) {
6604 		iwm_notification_wait_free(sc->sc_notif_wait);
6605 		sc->sc_notif_wait = NULL;
6606 	}
6607 
6608 	IWM_LOCK_DESTROY(sc);
6609 
6610 	return (0);
6611 }
6612 
6613 static int
iwm_detach(device_t dev)6614 iwm_detach(device_t dev)
6615 {
6616 	struct iwm_softc *sc = device_get_softc(dev);
6617 
6618 	return (iwm_detach_local(sc, 1));
6619 }
6620 
6621 static device_method_t iwm_pci_methods[] = {
6622         /* Device interface */
6623         DEVMETHOD(device_probe,         iwm_probe),
6624         DEVMETHOD(device_attach,        iwm_attach),
6625         DEVMETHOD(device_detach,        iwm_detach),
6626         DEVMETHOD(device_suspend,       iwm_suspend),
6627         DEVMETHOD(device_resume,        iwm_resume),
6628 
6629         DEVMETHOD_END
6630 };
6631 
6632 static driver_t iwm_pci_driver = {
6633         "iwm",
6634         iwm_pci_methods,
6635         sizeof (struct iwm_softc)
6636 };
6637 
6638 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL);
6639 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6640     iwm_devices, nitems(iwm_devices));
6641 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6642 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6643 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6644