1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105 #include <sys/cdefs.h>
106 #include "opt_wlan.h"
107 #include "opt_iwm.h"
108
109 #include <sys/param.h>
110 #include <sys/bus.h>
111 #include <sys/conf.h>
112 #include <sys/endian.h>
113 #include <sys/firmware.h>
114 #include <sys/kernel.h>
115 #include <sys/malloc.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/module.h>
119 #include <sys/proc.h>
120 #include <sys/rman.h>
121 #include <sys/socket.h>
122 #include <sys/sockio.h>
123 #include <sys/sysctl.h>
124 #include <sys/linker.h>
125
126 #include <machine/bus.h>
127 #include <machine/endian.h>
128 #include <machine/resource.h>
129
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcireg.h>
132
133 #include <net/bpf.h>
134
135 #include <net/if.h>
136 #include <net/if_var.h>
137 #include <net/if_arp.h>
138 #include <net/if_dl.h>
139 #include <net/if_media.h>
140 #include <net/if_types.h>
141
142 #include <netinet/in.h>
143 #include <netinet/in_systm.h>
144 #include <netinet/if_ether.h>
145 #include <netinet/ip.h>
146
147 #include <net80211/ieee80211_var.h>
148 #include <net80211/ieee80211_regdomain.h>
149 #include <net80211/ieee80211_ratectl.h>
150 #include <net80211/ieee80211_radiotap.h>
151
152 #include <dev/iwm/if_iwmreg.h>
153 #include <dev/iwm/if_iwmvar.h>
154 #include <dev/iwm/if_iwm_config.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165 #include <dev/iwm/if_iwm_sf.h>
166 #include <dev/iwm/if_iwm_sta.h>
167
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171
172 /* From DragonflyBSD */
173 #define mtodoff(m, t, off) ((t)((m)->m_data + (off)))
174
175 const uint8_t iwm_nvm_channels[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64,
180 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165
182 };
183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184 "IWM_NUM_CHANNELS is too small");
185
186 const uint8_t iwm_nvm_channels_8000[] = {
187 /* 2.4 GHz */
188 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189 /* 5 GHz */
190 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192 149, 153, 157, 161, 165, 169, 173, 177, 181
193 };
194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195 "IWM_NUM_CHANNELS_8000 is too small");
196
197 #define IWM_NUM_2GHZ_CHANNELS 14
198 #define IWM_N_HW_ADDR_MASK 0xF
199
200 const struct iwm_rate {
201 uint16_t rate;
202 uint8_t plcp;
203 uint8_t ht_plcp;
204 } iwm_rates[] = {
205 /* Legacy */ /* HT */
206 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
207 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
208 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
209 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
210 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
211 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
212 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
213 { 26, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_8_PLCP },
214 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
215 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
216 { 52, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_9_PLCP },
217 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
218 { 78, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_10_PLCP },
219 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
220 { 104, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_11_PLCP },
221 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
222 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
223 { 156, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_12_PLCP },
224 { 208, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_13_PLCP },
225 { 234, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_14_PLCP },
226 { 260, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_15_PLCP },
227 };
228 #define IWM_RIDX_CCK 0
229 #define IWM_RIDX_OFDM 4
230 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
231 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
232 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
233 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
234
235 /* Convert an MCS index into an iwm_rates[] index. */
236 const int iwm_mcs2ridx[] = {
237 IWM_RATE_MCS_0_INDEX,
238 IWM_RATE_MCS_1_INDEX,
239 IWM_RATE_MCS_2_INDEX,
240 IWM_RATE_MCS_3_INDEX,
241 IWM_RATE_MCS_4_INDEX,
242 IWM_RATE_MCS_5_INDEX,
243 IWM_RATE_MCS_6_INDEX,
244 IWM_RATE_MCS_7_INDEX,
245 IWM_RATE_MCS_8_INDEX,
246 IWM_RATE_MCS_9_INDEX,
247 IWM_RATE_MCS_10_INDEX,
248 IWM_RATE_MCS_11_INDEX,
249 IWM_RATE_MCS_12_INDEX,
250 IWM_RATE_MCS_13_INDEX,
251 IWM_RATE_MCS_14_INDEX,
252 IWM_RATE_MCS_15_INDEX,
253 };
254
255 struct iwm_nvm_section {
256 uint16_t length;
257 uint8_t *data;
258 };
259
260 #define IWM_UCODE_ALIVE_TIMEOUT hz
261 #define IWM_UCODE_CALIB_TIMEOUT (2*hz)
262
263 struct iwm_alive_data {
264 int valid;
265 uint32_t scd_base_addr;
266 };
267
268 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
269 static int iwm_firmware_store_section(struct iwm_softc *,
270 enum iwm_ucode_type,
271 const uint8_t *, size_t);
272 static int iwm_set_default_calib(struct iwm_softc *, const void *);
273 static void iwm_fw_info_free(struct iwm_fw_info *);
274 static int iwm_read_firmware(struct iwm_softc *);
275 static int iwm_alloc_fwmem(struct iwm_softc *);
276 static int iwm_alloc_sched(struct iwm_softc *);
277 static int iwm_alloc_kw(struct iwm_softc *);
278 static int iwm_alloc_ict(struct iwm_softc *);
279 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
282 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
283 int);
284 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
285 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
286 static void iwm_enable_interrupts(struct iwm_softc *);
287 static void iwm_restore_interrupts(struct iwm_softc *);
288 static void iwm_disable_interrupts(struct iwm_softc *);
289 static void iwm_ict_reset(struct iwm_softc *);
290 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
291 static void iwm_stop_device(struct iwm_softc *);
292 static void iwm_nic_config(struct iwm_softc *);
293 static int iwm_nic_rx_init(struct iwm_softc *);
294 static int iwm_nic_tx_init(struct iwm_softc *);
295 static int iwm_nic_init(struct iwm_softc *);
296 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
297 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
298 uint16_t, uint8_t *, uint16_t *);
299 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
300 uint16_t *, uint32_t);
301 static uint32_t iwm_eeprom_channel_flags(uint16_t);
302 static void iwm_add_channel_band(struct iwm_softc *,
303 struct ieee80211_channel[], int, int *, int, size_t,
304 const uint8_t[]);
305 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
306 struct ieee80211_channel[]);
307 static struct iwm_nvm_data *
308 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
309 const uint16_t *, const uint16_t *,
310 const uint16_t *, const uint16_t *,
311 const uint16_t *);
312 static void iwm_free_nvm_data(struct iwm_nvm_data *);
313 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
314 struct iwm_nvm_data *,
315 const uint16_t *,
316 const uint16_t *);
317 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
318 const uint16_t *);
319 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
320 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
321 const uint16_t *);
322 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
323 const uint16_t *);
324 static void iwm_set_radio_cfg(const struct iwm_softc *,
325 struct iwm_nvm_data *, uint32_t);
326 static struct iwm_nvm_data *
327 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
328 static int iwm_nvm_init(struct iwm_softc *);
329 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
330 const struct iwm_fw_desc *);
331 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
332 bus_addr_t, uint32_t);
333 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
334 const struct iwm_fw_img *,
335 int, int *);
336 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
337 const struct iwm_fw_img *,
338 int, int *);
339 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
340 const struct iwm_fw_img *);
341 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
342 const struct iwm_fw_img *);
343 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
344 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
345 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
346 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
347 enum iwm_ucode_type);
348 static int iwm_run_init_ucode(struct iwm_softc *, int);
349 static int iwm_config_ltr(struct iwm_softc *sc);
350 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
351 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
352 struct iwm_rx_packet *);
353 static int iwm_get_noise(struct iwm_softc *,
354 const struct iwm_statistics_rx_non_phy *);
355 static void iwm_handle_rx_statistics(struct iwm_softc *,
356 struct iwm_rx_packet *);
357 static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
358 uint32_t, bool);
359 static int iwm_rx_tx_cmd_single(struct iwm_softc *,
360 struct iwm_rx_packet *,
361 struct iwm_node *);
362 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
363 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
364 #if 0
365 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
366 uint16_t);
367 #endif
368 static const struct iwm_rate *
369 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
370 struct mbuf *, struct iwm_tx_cmd *);
371 static int iwm_tx(struct iwm_softc *, struct mbuf *,
372 struct ieee80211_node *, int);
373 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
374 const struct ieee80211_bpf_params *);
375 static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
376 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
377 static struct ieee80211_node *
378 iwm_node_alloc(struct ieee80211vap *,
379 const uint8_t[IEEE80211_ADDR_LEN]);
380 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
381 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
382 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
383 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
384 static void iwm_endscan_cb(void *, int);
385 static int iwm_send_bt_init_conf(struct iwm_softc *);
386 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
387 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
388 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
389 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
390 static int iwm_init_hw(struct iwm_softc *);
391 static void iwm_init(struct iwm_softc *);
392 static void iwm_start(struct iwm_softc *);
393 static void iwm_stop(struct iwm_softc *);
394 static void iwm_watchdog(void *);
395 static void iwm_parent(struct ieee80211com *);
396 #ifdef IWM_DEBUG
397 static const char *
398 iwm_desc_lookup(uint32_t);
399 static void iwm_nic_error(struct iwm_softc *);
400 static void iwm_nic_umac_error(struct iwm_softc *);
401 #endif
402 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
403 static void iwm_notif_intr(struct iwm_softc *);
404 static void iwm_intr(void *);
405 static int iwm_attach(device_t);
406 static int iwm_is_valid_ether_addr(uint8_t *);
407 static void iwm_preinit(void *);
408 static int iwm_detach_local(struct iwm_softc *sc, int);
409 static void iwm_init_task(void *);
410 static void iwm_radiotap_attach(struct iwm_softc *);
411 static struct ieee80211vap *
412 iwm_vap_create(struct ieee80211com *,
413 const char [IFNAMSIZ], int,
414 enum ieee80211_opmode, int,
415 const uint8_t [IEEE80211_ADDR_LEN],
416 const uint8_t [IEEE80211_ADDR_LEN]);
417 static void iwm_vap_delete(struct ieee80211vap *);
418 static void iwm_xmit_queue_drain(struct iwm_softc *);
419 static void iwm_scan_start(struct ieee80211com *);
420 static void iwm_scan_end(struct ieee80211com *);
421 static void iwm_update_mcast(struct ieee80211com *);
422 static void iwm_set_channel(struct ieee80211com *);
423 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
424 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
425 static int iwm_detach(device_t);
426
427 static int iwm_lar_disable = 0;
428 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
429
430 /*
431 * Firmware parser.
432 */
433
434 static int
iwm_store_cscheme(struct iwm_softc * sc,const uint8_t * data,size_t dlen)435 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
436 {
437 const struct iwm_fw_cscheme_list *l = (const void *)data;
438
439 if (dlen < sizeof(*l) ||
440 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
441 return EINVAL;
442
443 /* we don't actually store anything for now, always use s/w crypto */
444
445 return 0;
446 }
447
448 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,const uint8_t * data,size_t dlen)449 iwm_firmware_store_section(struct iwm_softc *sc,
450 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
451 {
452 struct iwm_fw_img *fws;
453 struct iwm_fw_desc *fwone;
454
455 if (type >= IWM_UCODE_TYPE_MAX)
456 return EINVAL;
457 if (dlen < sizeof(uint32_t))
458 return EINVAL;
459
460 fws = &sc->sc_fw.img[type];
461 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
462 return EINVAL;
463
464 fwone = &fws->sec[fws->fw_count];
465
466 /* first 32bit are device load offset */
467 memcpy(&fwone->offset, data, sizeof(uint32_t));
468
469 /* rest is data */
470 fwone->data = data + sizeof(uint32_t);
471 fwone->len = dlen - sizeof(uint32_t);
472
473 fws->fw_count++;
474
475 return 0;
476 }
477
478 #define IWM_DEFAULT_SCAN_CHANNELS 40
479
480 /* iwlwifi: iwl-drv.c */
481 struct iwm_tlv_calib_data {
482 uint32_t ucode_type;
483 struct iwm_tlv_calib_ctrl calib;
484 } __packed;
485
486 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)487 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
488 {
489 const struct iwm_tlv_calib_data *def_calib = data;
490 uint32_t ucode_type = le32toh(def_calib->ucode_type);
491
492 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
493 device_printf(sc->sc_dev,
494 "Wrong ucode_type %u for default "
495 "calibration.\n", ucode_type);
496 return EINVAL;
497 }
498
499 sc->sc_default_calib[ucode_type].flow_trigger =
500 def_calib->calib.flow_trigger;
501 sc->sc_default_calib[ucode_type].event_trigger =
502 def_calib->calib.event_trigger;
503
504 return 0;
505 }
506
507 static int
iwm_set_ucode_api_flags(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)508 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
509 struct iwm_ucode_capabilities *capa)
510 {
511 const struct iwm_ucode_api *ucode_api = (const void *)data;
512 uint32_t api_index = le32toh(ucode_api->api_index);
513 uint32_t api_flags = le32toh(ucode_api->api_flags);
514 int i;
515
516 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
517 device_printf(sc->sc_dev,
518 "api flags index %d larger than supported by driver\n",
519 api_index);
520 /* don't return an error so we can load FW that has more bits */
521 return 0;
522 }
523
524 for (i = 0; i < 32; i++) {
525 if (api_flags & (1U << i))
526 setbit(capa->enabled_api, i + 32 * api_index);
527 }
528
529 return 0;
530 }
531
532 static int
iwm_set_ucode_capabilities(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)533 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
534 struct iwm_ucode_capabilities *capa)
535 {
536 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
537 uint32_t api_index = le32toh(ucode_capa->api_index);
538 uint32_t api_flags = le32toh(ucode_capa->api_capa);
539 int i;
540
541 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
542 device_printf(sc->sc_dev,
543 "capa flags index %d larger than supported by driver\n",
544 api_index);
545 /* don't return an error so we can load FW that has more bits */
546 return 0;
547 }
548
549 for (i = 0; i < 32; i++) {
550 if (api_flags & (1U << i))
551 setbit(capa->enabled_capa, i + 32 * api_index);
552 }
553
554 return 0;
555 }
556
557 static void
iwm_fw_info_free(struct iwm_fw_info * fw)558 iwm_fw_info_free(struct iwm_fw_info *fw)
559 {
560 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
561 fw->fw_fp = NULL;
562 memset(fw->img, 0, sizeof(fw->img));
563 }
564
565 static int
iwm_read_firmware(struct iwm_softc * sc)566 iwm_read_firmware(struct iwm_softc *sc)
567 {
568 struct iwm_fw_info *fw = &sc->sc_fw;
569 const struct iwm_tlv_ucode_header *uhdr;
570 const struct iwm_ucode_tlv *tlv;
571 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
572 enum iwm_ucode_tlv_type tlv_type;
573 const struct firmware *fwp;
574 const uint8_t *data;
575 uint32_t tlv_len;
576 uint32_t usniffer_img;
577 const uint8_t *tlv_data;
578 uint32_t paging_mem_size;
579 int num_of_cpus;
580 int error = 0;
581 size_t len;
582
583 /*
584 * Load firmware into driver memory.
585 * fw_fp will be set.
586 */
587 fwp = firmware_get(sc->cfg->fw_name);
588 if (fwp == NULL) {
589 device_printf(sc->sc_dev,
590 "could not read firmware %s (error %d)\n",
591 sc->cfg->fw_name, error);
592 goto out;
593 }
594 fw->fw_fp = fwp;
595
596 /* (Re-)Initialize default values. */
597 capa->flags = 0;
598 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
599 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
600 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
601 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
602 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
603
604 /*
605 * Parse firmware contents
606 */
607
608 uhdr = (const void *)fw->fw_fp->data;
609 if (*(const uint32_t *)fw->fw_fp->data != 0
610 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
611 device_printf(sc->sc_dev, "invalid firmware %s\n",
612 sc->cfg->fw_name);
613 error = EINVAL;
614 goto out;
615 }
616
617 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
618 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
619 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
620 IWM_UCODE_API(le32toh(uhdr->ver)));
621 data = uhdr->data;
622 len = fw->fw_fp->datasize - sizeof(*uhdr);
623
624 while (len >= sizeof(*tlv)) {
625 len -= sizeof(*tlv);
626 tlv = (const void *)data;
627
628 tlv_len = le32toh(tlv->length);
629 tlv_type = le32toh(tlv->type);
630 tlv_data = tlv->data;
631
632 if (len < tlv_len) {
633 device_printf(sc->sc_dev,
634 "firmware too short: %zu bytes\n",
635 len);
636 error = EINVAL;
637 goto parse_out;
638 }
639 len -= roundup2(tlv_len, 4);
640 data += sizeof(*tlv) + roundup2(tlv_len, 4);
641
642 switch ((int)tlv_type) {
643 case IWM_UCODE_TLV_PROBE_MAX_LEN:
644 if (tlv_len != sizeof(uint32_t)) {
645 device_printf(sc->sc_dev,
646 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
647 __func__, tlv_len);
648 error = EINVAL;
649 goto parse_out;
650 }
651 capa->max_probe_length =
652 le32_to_cpup((const uint32_t *)tlv_data);
653 /* limit it to something sensible */
654 if (capa->max_probe_length >
655 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
656 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
657 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
658 "ridiculous\n", __func__);
659 error = EINVAL;
660 goto parse_out;
661 }
662 break;
663 case IWM_UCODE_TLV_PAN:
664 if (tlv_len) {
665 device_printf(sc->sc_dev,
666 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
667 __func__, tlv_len);
668 error = EINVAL;
669 goto parse_out;
670 }
671 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
672 break;
673 case IWM_UCODE_TLV_FLAGS:
674 if (tlv_len < sizeof(uint32_t)) {
675 device_printf(sc->sc_dev,
676 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
677 __func__, tlv_len);
678 error = EINVAL;
679 goto parse_out;
680 }
681 if (tlv_len % sizeof(uint32_t)) {
682 device_printf(sc->sc_dev,
683 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
684 __func__, tlv_len);
685 error = EINVAL;
686 goto parse_out;
687 }
688 /*
689 * Apparently there can be many flags, but Linux driver
690 * parses only the first one, and so do we.
691 *
692 * XXX: why does this override IWM_UCODE_TLV_PAN?
693 * Intentional or a bug? Observations from
694 * current firmware file:
695 * 1) TLV_PAN is parsed first
696 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
697 * ==> this resets TLV_PAN to itself... hnnnk
698 */
699 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
700 break;
701 case IWM_UCODE_TLV_CSCHEME:
702 if ((error = iwm_store_cscheme(sc,
703 tlv_data, tlv_len)) != 0) {
704 device_printf(sc->sc_dev,
705 "%s: iwm_store_cscheme(): returned %d\n",
706 __func__, error);
707 goto parse_out;
708 }
709 break;
710 case IWM_UCODE_TLV_NUM_OF_CPU:
711 if (tlv_len != sizeof(uint32_t)) {
712 device_printf(sc->sc_dev,
713 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
714 __func__, tlv_len);
715 error = EINVAL;
716 goto parse_out;
717 }
718 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
719 if (num_of_cpus == 2) {
720 fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
721 TRUE;
722 fw->img[IWM_UCODE_INIT].is_dual_cpus =
723 TRUE;
724 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
725 TRUE;
726 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
727 device_printf(sc->sc_dev,
728 "%s: Driver supports only 1 or 2 CPUs\n",
729 __func__);
730 error = EINVAL;
731 goto parse_out;
732 }
733 break;
734 case IWM_UCODE_TLV_SEC_RT:
735 if ((error = iwm_firmware_store_section(sc,
736 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
737 device_printf(sc->sc_dev,
738 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
739 __func__, error);
740 goto parse_out;
741 }
742 break;
743 case IWM_UCODE_TLV_SEC_INIT:
744 if ((error = iwm_firmware_store_section(sc,
745 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
746 device_printf(sc->sc_dev,
747 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
748 __func__, error);
749 goto parse_out;
750 }
751 break;
752 case IWM_UCODE_TLV_SEC_WOWLAN:
753 if ((error = iwm_firmware_store_section(sc,
754 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
755 device_printf(sc->sc_dev,
756 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
757 __func__, error);
758 goto parse_out;
759 }
760 break;
761 case IWM_UCODE_TLV_DEF_CALIB:
762 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
763 device_printf(sc->sc_dev,
764 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
765 __func__, tlv_len,
766 sizeof(struct iwm_tlv_calib_data));
767 error = EINVAL;
768 goto parse_out;
769 }
770 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
771 device_printf(sc->sc_dev,
772 "%s: iwm_set_default_calib() failed: %d\n",
773 __func__, error);
774 goto parse_out;
775 }
776 break;
777 case IWM_UCODE_TLV_PHY_SKU:
778 if (tlv_len != sizeof(uint32_t)) {
779 error = EINVAL;
780 device_printf(sc->sc_dev,
781 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
782 __func__, tlv_len);
783 goto parse_out;
784 }
785 sc->sc_fw.phy_config =
786 le32_to_cpup((const uint32_t *)tlv_data);
787 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
788 IWM_FW_PHY_CFG_TX_CHAIN) >>
789 IWM_FW_PHY_CFG_TX_CHAIN_POS;
790 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
791 IWM_FW_PHY_CFG_RX_CHAIN) >>
792 IWM_FW_PHY_CFG_RX_CHAIN_POS;
793 break;
794
795 case IWM_UCODE_TLV_API_CHANGES_SET: {
796 if (tlv_len != sizeof(struct iwm_ucode_api)) {
797 error = EINVAL;
798 goto parse_out;
799 }
800 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
801 error = EINVAL;
802 goto parse_out;
803 }
804 break;
805 }
806
807 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
808 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
809 error = EINVAL;
810 goto parse_out;
811 }
812 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
813 error = EINVAL;
814 goto parse_out;
815 }
816 break;
817 }
818
819 case IWM_UCODE_TLV_CMD_VERSIONS:
820 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
821 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
822 /* ignore, not used by current driver */
823 break;
824
825 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
826 if ((error = iwm_firmware_store_section(sc,
827 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
828 tlv_len)) != 0)
829 goto parse_out;
830 break;
831
832 case IWM_UCODE_TLV_PAGING:
833 if (tlv_len != sizeof(uint32_t)) {
834 error = EINVAL;
835 goto parse_out;
836 }
837 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
838
839 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
840 "%s: Paging: paging enabled (size = %u bytes)\n",
841 __func__, paging_mem_size);
842 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
843 device_printf(sc->sc_dev,
844 "%s: Paging: driver supports up to %u bytes for paging image\n",
845 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
846 error = EINVAL;
847 goto out;
848 }
849 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
850 device_printf(sc->sc_dev,
851 "%s: Paging: image isn't multiple %u\n",
852 __func__, IWM_FW_PAGING_SIZE);
853 error = EINVAL;
854 goto out;
855 }
856
857 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
858 paging_mem_size;
859 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
860 sc->sc_fw.img[usniffer_img].paging_mem_size =
861 paging_mem_size;
862 break;
863
864 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
865 if (tlv_len != sizeof(uint32_t)) {
866 error = EINVAL;
867 goto parse_out;
868 }
869 capa->n_scan_channels =
870 le32_to_cpup((const uint32_t *)tlv_data);
871 break;
872
873 case IWM_UCODE_TLV_FW_VERSION:
874 if (tlv_len != sizeof(uint32_t) * 3) {
875 error = EINVAL;
876 goto parse_out;
877 }
878 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
879 "%u.%u.%u",
880 le32toh(((const uint32_t *)tlv_data)[0]),
881 le32toh(((const uint32_t *)tlv_data)[1]),
882 le32toh(((const uint32_t *)tlv_data)[2]));
883 break;
884
885 case IWM_UCODE_TLV_FW_MEM_SEG:
886 break;
887
888 default:
889 device_printf(sc->sc_dev,
890 "%s: unknown firmware section %d, abort\n",
891 __func__, tlv_type);
892 error = EINVAL;
893 goto parse_out;
894 }
895 }
896
897 KASSERT(error == 0, ("unhandled error"));
898
899 parse_out:
900 if (error) {
901 device_printf(sc->sc_dev, "firmware parse error %d, "
902 "section type %d\n", error, tlv_type);
903 }
904
905 out:
906 if (error) {
907 if (fw->fw_fp != NULL)
908 iwm_fw_info_free(fw);
909 }
910
911 return error;
912 }
913
914 /*
915 * DMA resource routines
916 */
917
918 /* fwmem is used to load firmware onto the card */
919 static int
iwm_alloc_fwmem(struct iwm_softc * sc)920 iwm_alloc_fwmem(struct iwm_softc *sc)
921 {
922 /* Must be aligned on a 16-byte boundary. */
923 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
924 IWM_FH_MEM_TB_MAX_LENGTH, 16);
925 }
926
927 /* tx scheduler rings. not used? */
928 static int
iwm_alloc_sched(struct iwm_softc * sc)929 iwm_alloc_sched(struct iwm_softc *sc)
930 {
931 /* TX scheduler rings must be aligned on a 1KB boundary. */
932 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
933 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
934 }
935
936 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
937 static int
iwm_alloc_kw(struct iwm_softc * sc)938 iwm_alloc_kw(struct iwm_softc *sc)
939 {
940 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
941 }
942
943 /* interrupt cause table */
944 static int
iwm_alloc_ict(struct iwm_softc * sc)945 iwm_alloc_ict(struct iwm_softc *sc)
946 {
947 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
948 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
949 }
950
951 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)952 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
953 {
954 bus_size_t size;
955 size_t descsz;
956 int count, i, error;
957
958 ring->cur = 0;
959 if (sc->cfg->mqrx_supported) {
960 count = IWM_RX_MQ_RING_COUNT;
961 descsz = sizeof(uint64_t);
962 } else {
963 count = IWM_RX_LEGACY_RING_COUNT;
964 descsz = sizeof(uint32_t);
965 }
966
967 /* Allocate RX descriptors (256-byte aligned). */
968 size = count * descsz;
969 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
970 256);
971 if (error != 0) {
972 device_printf(sc->sc_dev,
973 "could not allocate RX ring DMA memory\n");
974 goto fail;
975 }
976 ring->desc = ring->free_desc_dma.vaddr;
977
978 /* Allocate RX status area (16-byte aligned). */
979 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
980 sizeof(*ring->stat), 16);
981 if (error != 0) {
982 device_printf(sc->sc_dev,
983 "could not allocate RX status DMA memory\n");
984 goto fail;
985 }
986 ring->stat = ring->stat_dma.vaddr;
987
988 if (sc->cfg->mqrx_supported) {
989 size = count * sizeof(uint32_t);
990 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
991 size, 256);
992 if (error != 0) {
993 device_printf(sc->sc_dev,
994 "could not allocate RX ring DMA memory\n");
995 goto fail;
996 }
997 }
998
999 /* Create RX buffer DMA tag. */
1000 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1001 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1002 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1003 if (error != 0) {
1004 device_printf(sc->sc_dev,
1005 "%s: could not create RX buf DMA tag, error %d\n",
1006 __func__, error);
1007 goto fail;
1008 }
1009
1010 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1011 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1012 if (error != 0) {
1013 device_printf(sc->sc_dev,
1014 "%s: could not create RX buf DMA map, error %d\n",
1015 __func__, error);
1016 goto fail;
1017 }
1018
1019 /*
1020 * Allocate and map RX buffers.
1021 */
1022 for (i = 0; i < count; i++) {
1023 struct iwm_rx_data *data = &ring->data[i];
1024 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1025 if (error != 0) {
1026 device_printf(sc->sc_dev,
1027 "%s: could not create RX buf DMA map, error %d\n",
1028 __func__, error);
1029 goto fail;
1030 }
1031 data->m = NULL;
1032
1033 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1034 goto fail;
1035 }
1036 }
1037 return 0;
1038
1039 fail: iwm_free_rx_ring(sc, ring);
1040 return error;
1041 }
1042
1043 static void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1044 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1045 {
1046 /* Reset the ring state */
1047 ring->cur = 0;
1048
1049 /*
1050 * The hw rx ring index in shared memory must also be cleared,
1051 * otherwise the discrepancy can cause reprocessing chaos.
1052 */
1053 if (sc->rxq.stat)
1054 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1055 }
1056
1057 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1058 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1059 {
1060 int count, i;
1061
1062 iwm_dma_contig_free(&ring->free_desc_dma);
1063 iwm_dma_contig_free(&ring->stat_dma);
1064 iwm_dma_contig_free(&ring->used_desc_dma);
1065
1066 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1067 IWM_RX_LEGACY_RING_COUNT;
1068
1069 for (i = 0; i < count; i++) {
1070 struct iwm_rx_data *data = &ring->data[i];
1071
1072 if (data->m != NULL) {
1073 bus_dmamap_sync(ring->data_dmat, data->map,
1074 BUS_DMASYNC_POSTREAD);
1075 bus_dmamap_unload(ring->data_dmat, data->map);
1076 m_freem(data->m);
1077 data->m = NULL;
1078 }
1079 if (data->map != NULL) {
1080 bus_dmamap_destroy(ring->data_dmat, data->map);
1081 data->map = NULL;
1082 }
1083 }
1084 if (ring->spare_map != NULL) {
1085 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1086 ring->spare_map = NULL;
1087 }
1088 if (ring->data_dmat != NULL) {
1089 bus_dma_tag_destroy(ring->data_dmat);
1090 ring->data_dmat = NULL;
1091 }
1092 }
1093
1094 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1095 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1096 {
1097 bus_addr_t paddr;
1098 bus_size_t size;
1099 size_t maxsize;
1100 int nsegments;
1101 int i, error;
1102
1103 ring->qid = qid;
1104 ring->queued = 0;
1105 ring->cur = 0;
1106
1107 /* Allocate TX descriptors (256-byte aligned). */
1108 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1109 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1110 if (error != 0) {
1111 device_printf(sc->sc_dev,
1112 "could not allocate TX ring DMA memory\n");
1113 goto fail;
1114 }
1115 ring->desc = ring->desc_dma.vaddr;
1116
1117 /*
1118 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1119 * to allocate commands space for other rings.
1120 */
1121 if (qid > IWM_CMD_QUEUE)
1122 return 0;
1123
1124 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1125 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1126 if (error != 0) {
1127 device_printf(sc->sc_dev,
1128 "could not allocate TX cmd DMA memory\n");
1129 goto fail;
1130 }
1131 ring->cmd = ring->cmd_dma.vaddr;
1132
1133 /* FW commands may require more mapped space than packets. */
1134 if (qid == IWM_CMD_QUEUE) {
1135 maxsize = IWM_RBUF_SIZE;
1136 nsegments = 1;
1137 } else {
1138 maxsize = MCLBYTES;
1139 nsegments = IWM_MAX_SCATTER - 2;
1140 }
1141
1142 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1143 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1144 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1145 if (error != 0) {
1146 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1147 goto fail;
1148 }
1149
1150 paddr = ring->cmd_dma.paddr;
1151 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1152 struct iwm_tx_data *data = &ring->data[i];
1153
1154 data->cmd_paddr = paddr;
1155 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1156 + offsetof(struct iwm_tx_cmd, scratch);
1157 paddr += sizeof(struct iwm_device_cmd);
1158
1159 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1160 if (error != 0) {
1161 device_printf(sc->sc_dev,
1162 "could not create TX buf DMA map\n");
1163 goto fail;
1164 }
1165 }
1166 KASSERT(paddr == ring->cmd_dma.paddr + size,
1167 ("invalid physical address"));
1168 return 0;
1169
1170 fail: iwm_free_tx_ring(sc, ring);
1171 return error;
1172 }
1173
1174 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1175 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1176 {
1177 int i;
1178
1179 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1180 struct iwm_tx_data *data = &ring->data[i];
1181
1182 if (data->m != NULL) {
1183 bus_dmamap_sync(ring->data_dmat, data->map,
1184 BUS_DMASYNC_POSTWRITE);
1185 bus_dmamap_unload(ring->data_dmat, data->map);
1186 m_freem(data->m);
1187 data->m = NULL;
1188 }
1189 }
1190 /* Clear TX descriptors. */
1191 memset(ring->desc, 0, ring->desc_dma.size);
1192 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1193 BUS_DMASYNC_PREWRITE);
1194 sc->qfullmsk &= ~(1 << ring->qid);
1195 ring->queued = 0;
1196 ring->cur = 0;
1197
1198 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1199 iwm_pcie_clear_cmd_in_flight(sc);
1200 }
1201
1202 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1203 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1204 {
1205 int i;
1206
1207 iwm_dma_contig_free(&ring->desc_dma);
1208 iwm_dma_contig_free(&ring->cmd_dma);
1209
1210 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1211 struct iwm_tx_data *data = &ring->data[i];
1212
1213 if (data->m != NULL) {
1214 bus_dmamap_sync(ring->data_dmat, data->map,
1215 BUS_DMASYNC_POSTWRITE);
1216 bus_dmamap_unload(ring->data_dmat, data->map);
1217 m_freem(data->m);
1218 data->m = NULL;
1219 }
1220 if (data->map != NULL) {
1221 bus_dmamap_destroy(ring->data_dmat, data->map);
1222 data->map = NULL;
1223 }
1224 }
1225 if (ring->data_dmat != NULL) {
1226 bus_dma_tag_destroy(ring->data_dmat);
1227 ring->data_dmat = NULL;
1228 }
1229 }
1230
1231 /*
1232 * High-level hardware frobbing routines
1233 */
1234
1235 static void
iwm_enable_interrupts(struct iwm_softc * sc)1236 iwm_enable_interrupts(struct iwm_softc *sc)
1237 {
1238 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1239 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1240 }
1241
1242 static void
iwm_restore_interrupts(struct iwm_softc * sc)1243 iwm_restore_interrupts(struct iwm_softc *sc)
1244 {
1245 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1246 }
1247
1248 static void
iwm_disable_interrupts(struct iwm_softc * sc)1249 iwm_disable_interrupts(struct iwm_softc *sc)
1250 {
1251 /* disable interrupts */
1252 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1253
1254 /* acknowledge all interrupts */
1255 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1256 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1257 }
1258
1259 static void
iwm_ict_reset(struct iwm_softc * sc)1260 iwm_ict_reset(struct iwm_softc *sc)
1261 {
1262 iwm_disable_interrupts(sc);
1263
1264 /* Reset ICT table. */
1265 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1266 sc->ict_cur = 0;
1267
1268 /* Set physical address of ICT table (4KB aligned). */
1269 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1270 IWM_CSR_DRAM_INT_TBL_ENABLE
1271 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1272 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1273 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1274
1275 /* Switch to ICT interrupt mode in driver. */
1276 sc->sc_flags |= IWM_FLAG_USE_ICT;
1277
1278 /* Re-enable interrupts. */
1279 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1280 iwm_enable_interrupts(sc);
1281 }
1282
1283 /* iwlwifi pcie/trans.c */
1284
1285 /*
1286 * Since this .. hard-resets things, it's time to actually
1287 * mark the first vap (if any) as having no mac context.
1288 * It's annoying, but since the driver is potentially being
1289 * stop/start'ed whilst active (thanks openbsd port!) we
1290 * have to correctly track this.
1291 */
1292 static void
iwm_stop_device(struct iwm_softc * sc)1293 iwm_stop_device(struct iwm_softc *sc)
1294 {
1295 struct ieee80211com *ic = &sc->sc_ic;
1296 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1297 int chnl, qid;
1298 uint32_t mask = 0;
1299
1300 /* tell the device to stop sending interrupts */
1301 iwm_disable_interrupts(sc);
1302
1303 /*
1304 * FreeBSD-local: mark the first vap as not-uploaded,
1305 * so the next transition through auth/assoc
1306 * will correctly populate the MAC context.
1307 */
1308 if (vap) {
1309 struct iwm_vap *iv = IWM_VAP(vap);
1310 iv->phy_ctxt = NULL;
1311 iv->is_uploaded = 0;
1312 }
1313 sc->sc_firmware_state = 0;
1314 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1315
1316 /* device going down, Stop using ICT table */
1317 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1318
1319 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1320
1321 if (iwm_nic_lock(sc)) {
1322 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1323
1324 /* Stop each Tx DMA channel */
1325 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1326 IWM_WRITE(sc,
1327 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1328 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1329 }
1330
1331 /* Wait for DMA channels to be idle */
1332 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1333 5000)) {
1334 device_printf(sc->sc_dev,
1335 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1336 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1337 }
1338 iwm_nic_unlock(sc);
1339 }
1340 iwm_pcie_rx_stop(sc);
1341
1342 /* Stop RX ring. */
1343 iwm_reset_rx_ring(sc, &sc->rxq);
1344
1345 /* Reset all TX rings. */
1346 for (qid = 0; qid < nitems(sc->txq); qid++)
1347 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1348
1349 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1350 /* Power-down device's busmaster DMA clocks */
1351 if (iwm_nic_lock(sc)) {
1352 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1353 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1354 iwm_nic_unlock(sc);
1355 }
1356 DELAY(5);
1357 }
1358
1359 /* Make sure (redundant) we've released our request to stay awake */
1360 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1361 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1362
1363 /* Stop the device, and put it in low power state */
1364 iwm_apm_stop(sc);
1365
1366 /* stop and reset the on-board processor */
1367 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1368 DELAY(5000);
1369
1370 /*
1371 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1372 */
1373 iwm_disable_interrupts(sc);
1374
1375 /*
1376 * Even if we stop the HW, we still want the RF kill
1377 * interrupt
1378 */
1379 iwm_enable_rfkill_int(sc);
1380 iwm_check_rfkill(sc);
1381
1382 iwm_prepare_card_hw(sc);
1383 }
1384
1385 /* iwlwifi: mvm/ops.c */
1386 static void
iwm_nic_config(struct iwm_softc * sc)1387 iwm_nic_config(struct iwm_softc *sc)
1388 {
1389 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1390 uint32_t reg_val = 0;
1391 uint32_t phy_config = iwm_get_phy_config(sc);
1392
1393 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1394 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1395 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1396 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1397 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1398 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1399
1400 /* SKU control */
1401 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1402 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1403 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1404 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1405
1406 /* radio configuration */
1407 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1408 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1409 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1410
1411 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1412 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1413 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1414 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1415 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1416 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1417 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1418 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1419 reg_val);
1420
1421 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1422 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1423 radio_cfg_step, radio_cfg_dash);
1424
1425 /*
1426 * W/A : NIC is stuck in a reset state after Early PCIe power off
1427 * (PCIe power is lost before PERST# is asserted), causing ME FW
1428 * to lose ownership and not being able to obtain it back.
1429 */
1430 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1431 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1432 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1433 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1434 }
1435 }
1436
1437 static int
iwm_nic_rx_mq_init(struct iwm_softc * sc)1438 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1439 {
1440 int enabled;
1441
1442 if (!iwm_nic_lock(sc))
1443 return EBUSY;
1444
1445 /* Stop RX DMA. */
1446 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1447 /* Disable RX used and free queue operation. */
1448 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1449
1450 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1451 sc->rxq.free_desc_dma.paddr);
1452 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1453 sc->rxq.used_desc_dma.paddr);
1454 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1455 sc->rxq.stat_dma.paddr);
1456 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1457 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1458 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1459
1460 /* We configure only queue 0 for now. */
1461 enabled = ((1 << 0) << 16) | (1 << 0);
1462
1463 /* Enable RX DMA, 4KB buffer size. */
1464 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1465 IWM_RFH_DMA_EN_ENABLE_VAL |
1466 IWM_RFH_RXF_DMA_RB_SIZE_4K |
1467 IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1468 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1469 IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1470
1471 /* Enable RX DMA snooping. */
1472 iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1473 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1474 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1475 (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1476 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1477
1478 /* Enable the configured queue(s). */
1479 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1480
1481 iwm_nic_unlock(sc);
1482
1483 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1484
1485 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1486
1487 return (0);
1488 }
1489
1490 static int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)1491 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1492 {
1493
1494 /* Stop Rx DMA */
1495 iwm_pcie_rx_stop(sc);
1496
1497 if (!iwm_nic_lock(sc))
1498 return EBUSY;
1499
1500 /* reset and flush pointers */
1501 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1502 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1503 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1504 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1505
1506 /* Set physical address of RX ring (256-byte aligned). */
1507 IWM_WRITE(sc,
1508 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1509 sc->rxq.free_desc_dma.paddr >> 8);
1510
1511 /* Set physical address of RX status (16-byte aligned). */
1512 IWM_WRITE(sc,
1513 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1514
1515 /* Enable Rx DMA
1516 * XXX 5000 HW isn't supported by the iwm(4) driver.
1517 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1518 * the credit mechanism in 5000 HW RX FIFO
1519 * Direct rx interrupts to hosts
1520 * Rx buffer size 4 or 8k or 12k
1521 * RB timeout 0x10
1522 * 256 RBDs
1523 */
1524 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1525 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1526 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1527 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1528 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1529 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1530 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1531
1532 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1533
1534 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1535 if (sc->cfg->host_interrupt_operation_mode)
1536 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1537
1538 iwm_nic_unlock(sc);
1539
1540 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1541
1542 return 0;
1543 }
1544
1545 static int
iwm_nic_rx_init(struct iwm_softc * sc)1546 iwm_nic_rx_init(struct iwm_softc *sc)
1547 {
1548 if (sc->cfg->mqrx_supported)
1549 return iwm_nic_rx_mq_init(sc);
1550 else
1551 return iwm_nic_rx_legacy_init(sc);
1552 }
1553
1554 static int
iwm_nic_tx_init(struct iwm_softc * sc)1555 iwm_nic_tx_init(struct iwm_softc *sc)
1556 {
1557 int qid;
1558
1559 if (!iwm_nic_lock(sc))
1560 return EBUSY;
1561
1562 /* Deactivate TX scheduler. */
1563 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1564
1565 /* Set physical address of "keep warm" page (16-byte aligned). */
1566 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1567
1568 /* Initialize TX rings. */
1569 for (qid = 0; qid < nitems(sc->txq); qid++) {
1570 struct iwm_tx_ring *txq = &sc->txq[qid];
1571
1572 /* Set physical address of TX ring (256-byte aligned). */
1573 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1574 txq->desc_dma.paddr >> 8);
1575 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1576 "%s: loading ring %d descriptors (%p) at %lx\n",
1577 __func__,
1578 qid, txq->desc,
1579 (unsigned long) (txq->desc_dma.paddr >> 8));
1580 }
1581
1582 iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1583 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1584 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1585
1586 iwm_nic_unlock(sc);
1587
1588 return 0;
1589 }
1590
1591 static int
iwm_nic_init(struct iwm_softc * sc)1592 iwm_nic_init(struct iwm_softc *sc)
1593 {
1594 int error;
1595
1596 iwm_apm_init(sc);
1597 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1598 iwm_set_pwr(sc);
1599
1600 iwm_nic_config(sc);
1601
1602 if ((error = iwm_nic_rx_init(sc)) != 0)
1603 return error;
1604
1605 /*
1606 * Ditto for TX, from iwn
1607 */
1608 if ((error = iwm_nic_tx_init(sc)) != 0)
1609 return error;
1610
1611 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1612 "%s: shadow registers enabled\n", __func__);
1613 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1614
1615 return 0;
1616 }
1617
1618 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo)1619 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1620 {
1621 int qmsk;
1622
1623 qmsk = 1 << qid;
1624
1625 if (!iwm_nic_lock(sc)) {
1626 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1627 __func__, qid);
1628 return EBUSY;
1629 }
1630
1631 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1632
1633 if (qid == IWM_CMD_QUEUE) {
1634 /* Disable the scheduler. */
1635 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1636
1637 /* Stop the TX queue prior to configuration. */
1638 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1639 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1640 (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1641
1642 iwm_nic_unlock(sc);
1643
1644 /* Disable aggregations for this queue. */
1645 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1646
1647 if (!iwm_nic_lock(sc)) {
1648 device_printf(sc->sc_dev,
1649 "%s: cannot enable txq %d\n", __func__, qid);
1650 return EBUSY;
1651 }
1652 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1653 iwm_nic_unlock(sc);
1654
1655 iwm_write_mem32(sc,
1656 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1657 /* Set scheduler window size and frame limit. */
1658 iwm_write_mem32(sc,
1659 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1660 sizeof(uint32_t),
1661 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1662 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1663 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1664 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1665
1666 if (!iwm_nic_lock(sc)) {
1667 device_printf(sc->sc_dev,
1668 "%s: cannot enable txq %d\n", __func__, qid);
1669 return EBUSY;
1670 }
1671 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1672 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1673 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1674 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1675 IWM_SCD_QUEUE_STTS_REG_MSK);
1676
1677 /* Enable the scheduler for this queue. */
1678 iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1679 } else {
1680 struct iwm_scd_txq_cfg_cmd cmd;
1681 int error;
1682
1683 iwm_nic_unlock(sc);
1684
1685 memset(&cmd, 0, sizeof(cmd));
1686 cmd.scd_queue = qid;
1687 cmd.enable = 1;
1688 cmd.sta_id = sta_id;
1689 cmd.tx_fifo = fifo;
1690 cmd.aggregate = 0;
1691 cmd.window = IWM_FRAME_LIMIT;
1692
1693 error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1694 sizeof(cmd), &cmd);
1695 if (error) {
1696 device_printf(sc->sc_dev,
1697 "cannot enable txq %d\n", qid);
1698 return error;
1699 }
1700
1701 if (!iwm_nic_lock(sc))
1702 return EBUSY;
1703 }
1704
1705 iwm_nic_unlock(sc);
1706
1707 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1708 __func__, qid, fifo);
1709
1710 return 0;
1711 }
1712
1713 static int
iwm_trans_pcie_fw_alive(struct iwm_softc * sc,uint32_t scd_base_addr)1714 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1715 {
1716 int error, chnl;
1717
1718 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1719 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1720
1721 if (!iwm_nic_lock(sc))
1722 return EBUSY;
1723
1724 iwm_ict_reset(sc);
1725
1726 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1727 if (scd_base_addr != 0 &&
1728 scd_base_addr != sc->scd_base_addr) {
1729 device_printf(sc->sc_dev,
1730 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1731 __func__, sc->scd_base_addr, scd_base_addr);
1732 }
1733
1734 iwm_nic_unlock(sc);
1735
1736 /* reset context data, TX status and translation data */
1737 error = iwm_write_mem(sc,
1738 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1739 NULL, clear_dwords);
1740 if (error)
1741 return EBUSY;
1742
1743 if (!iwm_nic_lock(sc))
1744 return EBUSY;
1745
1746 /* Set physical address of TX scheduler rings (1KB aligned). */
1747 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1748
1749 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1750
1751 iwm_nic_unlock(sc);
1752
1753 /* enable command channel */
1754 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1755 if (error)
1756 return error;
1757
1758 if (!iwm_nic_lock(sc))
1759 return EBUSY;
1760
1761 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1762
1763 /* Enable DMA channels. */
1764 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1765 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1766 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1767 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1768 }
1769
1770 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1771 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1772
1773 iwm_nic_unlock(sc);
1774
1775 /* Enable L1-Active */
1776 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1777 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1778 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1779 }
1780
1781 return error;
1782 }
1783
1784 /*
1785 * NVM read access and content parsing. We do not support
1786 * external NVM or writing NVM.
1787 * iwlwifi/mvm/nvm.c
1788 */
1789
1790 /* Default NVM size to read */
1791 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1792
1793 #define IWM_NVM_WRITE_OPCODE 1
1794 #define IWM_NVM_READ_OPCODE 0
1795
1796 /* load nvm chunk response */
1797 enum {
1798 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1799 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1800 };
1801
1802 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)1803 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1804 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1805 {
1806 struct iwm_nvm_access_cmd nvm_access_cmd = {
1807 .offset = htole16(offset),
1808 .length = htole16(length),
1809 .type = htole16(section),
1810 .op_code = IWM_NVM_READ_OPCODE,
1811 };
1812 struct iwm_nvm_access_resp *nvm_resp;
1813 struct iwm_rx_packet *pkt;
1814 struct iwm_host_cmd cmd = {
1815 .id = IWM_NVM_ACCESS_CMD,
1816 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1817 .data = { &nvm_access_cmd, },
1818 };
1819 int ret, bytes_read, offset_read;
1820 uint8_t *resp_data;
1821
1822 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1823
1824 ret = iwm_send_cmd(sc, &cmd);
1825 if (ret) {
1826 device_printf(sc->sc_dev,
1827 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1828 return ret;
1829 }
1830
1831 pkt = cmd.resp_pkt;
1832
1833 /* Extract NVM response */
1834 nvm_resp = (void *)pkt->data;
1835 ret = le16toh(nvm_resp->status);
1836 bytes_read = le16toh(nvm_resp->length);
1837 offset_read = le16toh(nvm_resp->offset);
1838 resp_data = nvm_resp->data;
1839 if (ret) {
1840 if ((offset != 0) &&
1841 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1842 /*
1843 * meaning of NOT_VALID_ADDRESS:
1844 * driver try to read chunk from address that is
1845 * multiple of 2K and got an error since addr is empty.
1846 * meaning of (offset != 0): driver already
1847 * read valid data from another chunk so this case
1848 * is not an error.
1849 */
1850 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1851 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1852 offset);
1853 *len = 0;
1854 ret = 0;
1855 } else {
1856 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1857 "NVM access command failed with status %d\n", ret);
1858 ret = EIO;
1859 }
1860 goto exit;
1861 }
1862
1863 if (offset_read != offset) {
1864 device_printf(sc->sc_dev,
1865 "NVM ACCESS response with invalid offset %d\n",
1866 offset_read);
1867 ret = EINVAL;
1868 goto exit;
1869 }
1870
1871 if (bytes_read > length) {
1872 device_printf(sc->sc_dev,
1873 "NVM ACCESS response with too much data "
1874 "(%d bytes requested, %d bytes received)\n",
1875 length, bytes_read);
1876 ret = EINVAL;
1877 goto exit;
1878 }
1879
1880 /* Write data to NVM */
1881 memcpy(data + offset, resp_data, bytes_read);
1882 *len = bytes_read;
1883
1884 exit:
1885 iwm_free_resp(sc, &cmd);
1886 return ret;
1887 }
1888
1889 /*
1890 * Reads an NVM section completely.
1891 * NICs prior to 7000 family don't have a real NVM, but just read
1892 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1893 * by uCode, we need to manually check in this case that we don't
1894 * overflow and try to read more than the EEPROM size.
1895 * For 7000 family NICs, we supply the maximal size we can read, and
1896 * the uCode fills the response with as much data as we can,
1897 * without overflowing, so no check is needed.
1898 */
1899 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,uint32_t size_read)1900 iwm_nvm_read_section(struct iwm_softc *sc,
1901 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1902 {
1903 uint16_t seglen, length, offset = 0;
1904 int ret;
1905
1906 /* Set nvm section read length */
1907 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1908
1909 seglen = length;
1910
1911 /* Read the NVM until exhausted (reading less than requested) */
1912 while (seglen == length) {
1913 /* Check no memory assumptions fail and cause an overflow */
1914 if ((size_read + offset + length) >
1915 sc->cfg->eeprom_size) {
1916 device_printf(sc->sc_dev,
1917 "EEPROM size is too small for NVM\n");
1918 return ENOBUFS;
1919 }
1920
1921 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1922 if (ret) {
1923 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1924 "Cannot read NVM from section %d offset %d, length %d\n",
1925 section, offset, length);
1926 return ret;
1927 }
1928 offset += seglen;
1929 }
1930
1931 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1932 "NVM section %d read completed\n", section);
1933 *len = offset;
1934 return 0;
1935 }
1936
1937 /*
1938 * BEGIN IWM_NVM_PARSE
1939 */
1940
1941 /* iwlwifi/iwl-nvm-parse.c */
1942
1943 /*
1944 * Translate EEPROM flags to net80211.
1945 */
1946 static uint32_t
iwm_eeprom_channel_flags(uint16_t ch_flags)1947 iwm_eeprom_channel_flags(uint16_t ch_flags)
1948 {
1949 uint32_t nflags;
1950
1951 nflags = 0;
1952 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1953 nflags |= IEEE80211_CHAN_PASSIVE;
1954 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1955 nflags |= IEEE80211_CHAN_NOADHOC;
1956 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1957 nflags |= IEEE80211_CHAN_DFS;
1958 /* Just in case. */
1959 nflags |= IEEE80211_CHAN_NOADHOC;
1960 }
1961
1962 return (nflags);
1963 }
1964
1965 static void
iwm_add_channel_band(struct iwm_softc * sc,struct ieee80211_channel chans[],int maxchans,int * nchans,int ch_idx,size_t ch_num,const uint8_t bands[])1966 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1967 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1968 const uint8_t bands[])
1969 {
1970 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1971 uint32_t nflags;
1972 uint16_t ch_flags;
1973 uint8_t ieee;
1974 int error;
1975
1976 for (; ch_idx < ch_num; ch_idx++) {
1977 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1978 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1979 ieee = iwm_nvm_channels[ch_idx];
1980 else
1981 ieee = iwm_nvm_channels_8000[ch_idx];
1982
1983 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1984 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1985 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1986 ieee, ch_flags,
1987 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1988 "5.2" : "2.4");
1989 continue;
1990 }
1991
1992 nflags = iwm_eeprom_channel_flags(ch_flags);
1993 error = ieee80211_add_channel(chans, maxchans, nchans,
1994 ieee, 0, 0, nflags, bands);
1995 if (error != 0)
1996 break;
1997
1998 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1999 "Ch. %d Flags %x [%sGHz] - Added\n",
2000 ieee, ch_flags,
2001 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2002 "5.2" : "2.4");
2003 }
2004 }
2005
2006 static void
iwm_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])2007 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2008 struct ieee80211_channel chans[])
2009 {
2010 struct iwm_softc *sc = ic->ic_softc;
2011 struct iwm_nvm_data *data = sc->nvm_data;
2012 uint8_t bands[IEEE80211_MODE_BYTES];
2013 size_t ch_num;
2014
2015 memset(bands, 0, sizeof(bands));
2016 /* 1-13: 11b/g channels. */
2017 setbit(bands, IEEE80211_MODE_11B);
2018 setbit(bands, IEEE80211_MODE_11G);
2019 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2020 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2021
2022 /* 14: 11b channel only. */
2023 clrbit(bands, IEEE80211_MODE_11G);
2024 iwm_add_channel_band(sc, chans, maxchans, nchans,
2025 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2026
2027 if (data->sku_cap_band_52GHz_enable) {
2028 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2029 ch_num = nitems(iwm_nvm_channels);
2030 else
2031 ch_num = nitems(iwm_nvm_channels_8000);
2032 memset(bands, 0, sizeof(bands));
2033 setbit(bands, IEEE80211_MODE_11A);
2034 iwm_add_channel_band(sc, chans, maxchans, nchans,
2035 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2036 }
2037 }
2038
2039 static void
iwm_set_hw_address_family_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)2040 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2041 const uint16_t *mac_override, const uint16_t *nvm_hw)
2042 {
2043 const uint8_t *hw_addr;
2044
2045 if (mac_override) {
2046 static const uint8_t reserved_mac[] = {
2047 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2048 };
2049
2050 hw_addr = (const uint8_t *)(mac_override +
2051 IWM_MAC_ADDRESS_OVERRIDE_8000);
2052
2053 /*
2054 * Store the MAC address from MAO section.
2055 * No byte swapping is required in MAO section
2056 */
2057 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2058
2059 /*
2060 * Force the use of the OTP MAC address in case of reserved MAC
2061 * address in the NVM, or if address is given but invalid.
2062 */
2063 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2064 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2065 iwm_is_valid_ether_addr(data->hw_addr) &&
2066 !IEEE80211_IS_MULTICAST(data->hw_addr))
2067 return;
2068
2069 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2070 "%s: mac address from nvm override section invalid\n",
2071 __func__);
2072 }
2073
2074 if (nvm_hw) {
2075 /* read the mac address from WFMP registers */
2076 uint32_t mac_addr0 =
2077 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2078 uint32_t mac_addr1 =
2079 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2080
2081 hw_addr = (const uint8_t *)&mac_addr0;
2082 data->hw_addr[0] = hw_addr[3];
2083 data->hw_addr[1] = hw_addr[2];
2084 data->hw_addr[2] = hw_addr[1];
2085 data->hw_addr[3] = hw_addr[0];
2086
2087 hw_addr = (const uint8_t *)&mac_addr1;
2088 data->hw_addr[4] = hw_addr[1];
2089 data->hw_addr[5] = hw_addr[0];
2090
2091 return;
2092 }
2093
2094 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2095 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2096 }
2097
2098 static int
iwm_get_sku(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2099 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2100 const uint16_t *phy_sku)
2101 {
2102 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2103 return le16_to_cpup(nvm_sw + IWM_SKU);
2104
2105 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2106 }
2107
2108 static int
iwm_get_nvm_version(const struct iwm_softc * sc,const uint16_t * nvm_sw)2109 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2110 {
2111 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2112 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2113 else
2114 return le32_to_cpup((const uint32_t *)(nvm_sw +
2115 IWM_NVM_VERSION_8000));
2116 }
2117
2118 static int
iwm_get_radio_cfg(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2119 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2120 const uint16_t *phy_sku)
2121 {
2122 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2123 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2124
2125 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2126 }
2127
2128 static int
iwm_get_n_hw_addrs(const struct iwm_softc * sc,const uint16_t * nvm_sw)2129 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2130 {
2131 int n_hw_addr;
2132
2133 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2134 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2135
2136 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2137
2138 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2139 }
2140
2141 static void
iwm_set_radio_cfg(const struct iwm_softc * sc,struct iwm_nvm_data * data,uint32_t radio_cfg)2142 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2143 uint32_t radio_cfg)
2144 {
2145 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2146 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2147 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2148 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2149 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2150 return;
2151 }
2152
2153 /* set the radio configuration for family 8000 */
2154 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2155 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2156 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2157 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2158 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2159 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2160 }
2161
2162 static int
iwm_set_hw_address(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * nvm_hw,const uint16_t * mac_override)2163 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2164 const uint16_t *nvm_hw, const uint16_t *mac_override)
2165 {
2166 #ifdef notyet /* for FAMILY 9000 */
2167 if (cfg->mac_addr_from_csr) {
2168 iwm_set_hw_address_from_csr(sc, data);
2169 } else
2170 #endif
2171 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2172 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2173
2174 /* The byte order is little endian 16 bit, meaning 214365 */
2175 data->hw_addr[0] = hw_addr[1];
2176 data->hw_addr[1] = hw_addr[0];
2177 data->hw_addr[2] = hw_addr[3];
2178 data->hw_addr[3] = hw_addr[2];
2179 data->hw_addr[4] = hw_addr[5];
2180 data->hw_addr[5] = hw_addr[4];
2181 } else {
2182 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2183 }
2184
2185 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2186 device_printf(sc->sc_dev, "no valid mac address was found\n");
2187 return EINVAL;
2188 }
2189
2190 return 0;
2191 }
2192
2193 static struct iwm_nvm_data *
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory)2194 iwm_parse_nvm_data(struct iwm_softc *sc,
2195 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2196 const uint16_t *nvm_calib, const uint16_t *mac_override,
2197 const uint16_t *phy_sku, const uint16_t *regulatory)
2198 {
2199 struct iwm_nvm_data *data;
2200 uint32_t sku, radio_cfg;
2201 uint16_t lar_config;
2202
2203 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2204 data = malloc(sizeof(*data) +
2205 IWM_NUM_CHANNELS * sizeof(uint16_t),
2206 M_DEVBUF, M_NOWAIT | M_ZERO);
2207 } else {
2208 data = malloc(sizeof(*data) +
2209 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2210 M_DEVBUF, M_NOWAIT | M_ZERO);
2211 }
2212 if (!data)
2213 return NULL;
2214
2215 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2216
2217 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2218 iwm_set_radio_cfg(sc, data, radio_cfg);
2219
2220 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2221 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2222 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2223 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2224 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2225
2226 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2227
2228 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2229 /* TODO: use IWL_NVM_EXT */
2230 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2231 IWM_NVM_LAR_OFFSET_8000_OLD :
2232 IWM_NVM_LAR_OFFSET_8000;
2233
2234 lar_config = le16_to_cpup(regulatory + lar_offset);
2235 data->lar_enabled = !!(lar_config &
2236 IWM_NVM_LAR_ENABLED_8000);
2237 }
2238
2239 /* If no valid mac address was found - bail out */
2240 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2241 free(data, M_DEVBUF);
2242 return NULL;
2243 }
2244
2245 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2246 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2247 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2248 IWM_NUM_CHANNELS * sizeof(uint16_t));
2249 } else {
2250 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2251 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2252 }
2253
2254 return data;
2255 }
2256
2257 static void
iwm_free_nvm_data(struct iwm_nvm_data * data)2258 iwm_free_nvm_data(struct iwm_nvm_data *data)
2259 {
2260 if (data != NULL)
2261 free(data, M_DEVBUF);
2262 }
2263
2264 static struct iwm_nvm_data *
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)2265 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2266 {
2267 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2268
2269 /* Checking for required sections */
2270 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2271 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2272 !sections[sc->cfg->nvm_hw_section_num].data) {
2273 device_printf(sc->sc_dev,
2274 "Can't parse empty OTP/NVM sections\n");
2275 return NULL;
2276 }
2277 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2278 /* SW and REGULATORY sections are mandatory */
2279 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2280 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2281 device_printf(sc->sc_dev,
2282 "Can't parse empty OTP/NVM sections\n");
2283 return NULL;
2284 }
2285 /* MAC_OVERRIDE or at least HW section must exist */
2286 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2287 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2288 device_printf(sc->sc_dev,
2289 "Can't parse mac_address, empty sections\n");
2290 return NULL;
2291 }
2292
2293 /* PHY_SKU section is mandatory in B0 */
2294 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2295 device_printf(sc->sc_dev,
2296 "Can't parse phy_sku in B0, empty sections\n");
2297 return NULL;
2298 }
2299 } else {
2300 panic("unknown device family %d\n", sc->cfg->device_family);
2301 }
2302
2303 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2304 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2305 calib = (const uint16_t *)
2306 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2307 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2308 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2309 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2310 mac_override = (const uint16_t *)
2311 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2312 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2313
2314 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2315 phy_sku, regulatory);
2316 }
2317
2318 static int
iwm_nvm_init(struct iwm_softc * sc)2319 iwm_nvm_init(struct iwm_softc *sc)
2320 {
2321 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2322 int i, ret, section;
2323 uint32_t size_read = 0;
2324 uint8_t *nvm_buffer, *temp;
2325 uint16_t len;
2326
2327 memset(nvm_sections, 0, sizeof(nvm_sections));
2328
2329 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2330 return EINVAL;
2331
2332 /* load NVM values from nic */
2333 /* Read From FW NVM */
2334 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2335
2336 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2337 if (!nvm_buffer)
2338 return ENOMEM;
2339 for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2340 /* we override the constness for initial read */
2341 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2342 &len, size_read);
2343 if (ret)
2344 continue;
2345 size_read += len;
2346 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2347 if (!temp) {
2348 ret = ENOMEM;
2349 break;
2350 }
2351 memcpy(temp, nvm_buffer, len);
2352
2353 nvm_sections[section].data = temp;
2354 nvm_sections[section].length = len;
2355 }
2356 if (!size_read)
2357 device_printf(sc->sc_dev, "OTP is blank\n");
2358 free(nvm_buffer, M_DEVBUF);
2359
2360 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2361 if (!sc->nvm_data)
2362 return EINVAL;
2363 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2364 "nvm version = %x\n", sc->nvm_data->nvm_version);
2365
2366 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2367 if (nvm_sections[i].data != NULL)
2368 free(nvm_sections[i].data, M_DEVBUF);
2369 }
2370
2371 return 0;
2372 }
2373
2374 static int
iwm_pcie_load_section(struct iwm_softc * sc,uint8_t section_num,const struct iwm_fw_desc * section)2375 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2376 const struct iwm_fw_desc *section)
2377 {
2378 struct iwm_dma_info *dma = &sc->fw_dma;
2379 uint8_t *v_addr;
2380 bus_addr_t p_addr;
2381 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2382 int ret = 0;
2383
2384 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2385 "%s: [%d] uCode section being loaded...\n",
2386 __func__, section_num);
2387
2388 v_addr = dma->vaddr;
2389 p_addr = dma->paddr;
2390
2391 for (offset = 0; offset < section->len; offset += chunk_sz) {
2392 uint32_t copy_size, dst_addr;
2393 int extended_addr = FALSE;
2394
2395 copy_size = MIN(chunk_sz, section->len - offset);
2396 dst_addr = section->offset + offset;
2397
2398 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2399 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2400 extended_addr = TRUE;
2401
2402 if (extended_addr)
2403 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2404 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2405
2406 memcpy(v_addr, (const uint8_t *)section->data + offset,
2407 copy_size);
2408 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2409 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2410 copy_size);
2411
2412 if (extended_addr)
2413 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2414 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2415
2416 if (ret) {
2417 device_printf(sc->sc_dev,
2418 "%s: Could not load the [%d] uCode section\n",
2419 __func__, section_num);
2420 break;
2421 }
2422 }
2423
2424 return ret;
2425 }
2426
2427 /*
2428 * ucode
2429 */
2430 static int
iwm_pcie_load_firmware_chunk(struct iwm_softc * sc,uint32_t dst_addr,bus_addr_t phy_addr,uint32_t byte_cnt)2431 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2432 bus_addr_t phy_addr, uint32_t byte_cnt)
2433 {
2434 sc->sc_fw_chunk_done = 0;
2435
2436 if (!iwm_nic_lock(sc))
2437 return EBUSY;
2438
2439 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2440 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2441
2442 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2443 dst_addr);
2444
2445 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2446 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2447
2448 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2449 (iwm_get_dma_hi_addr(phy_addr)
2450 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2451
2452 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2453 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2454 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2455 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2456
2457 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2458 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2459 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2460 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2461
2462 iwm_nic_unlock(sc);
2463
2464 /* wait up to 5s for this segment to load */
2465 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2466
2467 if (!sc->sc_fw_chunk_done) {
2468 device_printf(sc->sc_dev,
2469 "fw chunk addr 0x%x len %d failed to load\n",
2470 dst_addr, byte_cnt);
2471 return ETIMEDOUT;
2472 }
2473
2474 return 0;
2475 }
2476
2477 static int
iwm_pcie_load_cpu_sections_8000(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2478 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2479 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2480 {
2481 int shift_param;
2482 int i, ret = 0, sec_num = 0x1;
2483 uint32_t val, last_read_idx = 0;
2484
2485 if (cpu == 1) {
2486 shift_param = 0;
2487 *first_ucode_section = 0;
2488 } else {
2489 shift_param = 16;
2490 (*first_ucode_section)++;
2491 }
2492
2493 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2494 last_read_idx = i;
2495
2496 /*
2497 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2498 * CPU1 to CPU2.
2499 * PAGING_SEPARATOR_SECTION delimiter - separate between
2500 * CPU2 non paged to CPU2 paging sec.
2501 */
2502 if (!image->sec[i].data ||
2503 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2504 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2505 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2506 "Break since Data not valid or Empty section, sec = %d\n",
2507 i);
2508 break;
2509 }
2510 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2511 if (ret)
2512 return ret;
2513
2514 /* Notify the ucode of the loaded section number and status */
2515 if (iwm_nic_lock(sc)) {
2516 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2517 val = val | (sec_num << shift_param);
2518 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2519 sec_num = (sec_num << 1) | 0x1;
2520 iwm_nic_unlock(sc);
2521 }
2522 }
2523
2524 *first_ucode_section = last_read_idx;
2525
2526 iwm_enable_interrupts(sc);
2527
2528 if (iwm_nic_lock(sc)) {
2529 if (cpu == 1)
2530 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2531 else
2532 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2533 iwm_nic_unlock(sc);
2534 }
2535
2536 return 0;
2537 }
2538
2539 static int
iwm_pcie_load_cpu_sections(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2540 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2541 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2542 {
2543 int i, ret = 0;
2544 uint32_t last_read_idx = 0;
2545
2546 if (cpu == 1) {
2547 *first_ucode_section = 0;
2548 } else {
2549 (*first_ucode_section)++;
2550 }
2551
2552 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2553 last_read_idx = i;
2554
2555 /*
2556 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2557 * CPU1 to CPU2.
2558 * PAGING_SEPARATOR_SECTION delimiter - separate between
2559 * CPU2 non paged to CPU2 paging sec.
2560 */
2561 if (!image->sec[i].data ||
2562 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2563 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2564 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2565 "Break since Data not valid or Empty section, sec = %d\n",
2566 i);
2567 break;
2568 }
2569
2570 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2571 if (ret)
2572 return ret;
2573 }
2574
2575 *first_ucode_section = last_read_idx;
2576
2577 return 0;
2578
2579 }
2580
2581 static int
iwm_pcie_load_given_ucode(struct iwm_softc * sc,const struct iwm_fw_img * image)2582 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2583 {
2584 int ret = 0;
2585 int first_ucode_section;
2586
2587 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588 image->is_dual_cpus ? "Dual" : "Single");
2589
2590 /* load to FW the binary non secured sections of CPU1 */
2591 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592 if (ret)
2593 return ret;
2594
2595 if (image->is_dual_cpus) {
2596 /* set CPU2 header address */
2597 if (iwm_nic_lock(sc)) {
2598 iwm_write_prph(sc,
2599 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601 iwm_nic_unlock(sc);
2602 }
2603
2604 /* load to FW the binary sections of CPU2 */
2605 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606 &first_ucode_section);
2607 if (ret)
2608 return ret;
2609 }
2610
2611 iwm_enable_interrupts(sc);
2612
2613 /* release CPU reset */
2614 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615
2616 return 0;
2617 }
2618
2619 int
iwm_pcie_load_given_ucode_8000(struct iwm_softc * sc,const struct iwm_fw_img * image)2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621 const struct iwm_fw_img *image)
2622 {
2623 int ret = 0;
2624 int first_ucode_section;
2625
2626 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627 image->is_dual_cpus ? "Dual" : "Single");
2628
2629 /* configure the ucode to be ready to get the secured image */
2630 /* release CPU reset */
2631 if (iwm_nic_lock(sc)) {
2632 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633 IWM_RELEASE_CPU_RESET_BIT);
2634 iwm_nic_unlock(sc);
2635 }
2636
2637 /* load to FW the binary Secured sections of CPU1 */
2638 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639 &first_ucode_section);
2640 if (ret)
2641 return ret;
2642
2643 /* load to FW the binary sections of CPU2 */
2644 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645 &first_ucode_section);
2646 }
2647
2648 /* XXX Get rid of this definition */
2649 static inline void
iwm_enable_fw_load_int(struct iwm_softc * sc)2650 iwm_enable_fw_load_int(struct iwm_softc *sc)
2651 {
2652 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655 }
2656
2657 /* XXX Add proper rfkill support code */
2658 static int
iwm_start_fw(struct iwm_softc * sc,const struct iwm_fw_img * fw)2659 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2660 {
2661 int ret;
2662
2663 /* This may fail if AMT took ownership of the device */
2664 if (iwm_prepare_card_hw(sc)) {
2665 device_printf(sc->sc_dev,
2666 "%s: Exit HW not ready\n", __func__);
2667 ret = EIO;
2668 goto out;
2669 }
2670
2671 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2672
2673 iwm_disable_interrupts(sc);
2674
2675 /* make sure rfkill handshake bits are cleared */
2676 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2677 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2678 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2679
2680 /* clear (again), then enable host interrupts */
2681 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2682
2683 ret = iwm_nic_init(sc);
2684 if (ret) {
2685 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2686 goto out;
2687 }
2688
2689 /*
2690 * Now, we load the firmware and don't want to be interrupted, even
2691 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2692 * FH_TX interrupt which is needed to load the firmware). If the
2693 * RF-Kill switch is toggled, we will find out after having loaded
2694 * the firmware and return the proper value to the caller.
2695 */
2696 iwm_enable_fw_load_int(sc);
2697
2698 /* really make sure rfkill handshake bits are cleared */
2699 /* maybe we should write a few times more? just to make sure */
2700 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702
2703 /* Load the given image to the HW */
2704 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2705 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2706 else
2707 ret = iwm_pcie_load_given_ucode(sc, fw);
2708
2709 /* XXX re-check RF-Kill state */
2710
2711 out:
2712 return ret;
2713 }
2714
2715 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)2716 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2717 {
2718 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2719 .valid = htole32(valid_tx_ant),
2720 };
2721
2722 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2723 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2724 }
2725
2726 /* iwlwifi: mvm/fw.c */
2727 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)2728 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2729 {
2730 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2731 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2732
2733 /* Set parameters */
2734 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2735 phy_cfg_cmd.calib_control.event_trigger =
2736 sc->sc_default_calib[ucode_type].event_trigger;
2737 phy_cfg_cmd.calib_control.flow_trigger =
2738 sc->sc_default_calib[ucode_type].flow_trigger;
2739
2740 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2741 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2742 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2743 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2744 }
2745
2746 static int
iwm_alive_fn(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2747 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2748 {
2749 struct iwm_alive_data *alive_data = data;
2750 struct iwm_alive_resp_v3 *palive3;
2751 struct iwm_alive_resp *palive;
2752 struct iwm_umac_alive *umac;
2753 struct iwm_lmac_alive *lmac1;
2754 struct iwm_lmac_alive *lmac2 = NULL;
2755 uint16_t status;
2756
2757 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2758 palive = (void *)pkt->data;
2759 umac = &palive->umac_data;
2760 lmac1 = &palive->lmac_data[0];
2761 lmac2 = &palive->lmac_data[1];
2762 status = le16toh(palive->status);
2763 } else {
2764 palive3 = (void *)pkt->data;
2765 umac = &palive3->umac_data;
2766 lmac1 = &palive3->lmac_data;
2767 status = le16toh(palive3->status);
2768 }
2769
2770 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2771 if (lmac2)
2772 sc->error_event_table[1] =
2773 le32toh(lmac2->error_event_table_ptr);
2774 sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2775 sc->umac_error_event_table = le32toh(umac->error_info_addr);
2776 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2777 alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2778 if (sc->umac_error_event_table)
2779 sc->support_umac_log = TRUE;
2780
2781 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2782 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2783 status, lmac1->ver_type, lmac1->ver_subtype);
2784
2785 if (lmac2)
2786 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2787
2788 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2789 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2790 le32toh(umac->umac_major),
2791 le32toh(umac->umac_minor));
2792
2793 return TRUE;
2794 }
2795
2796 static int
iwm_wait_phy_db_entry(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2797 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2798 struct iwm_rx_packet *pkt, void *data)
2799 {
2800 struct iwm_phy_db *phy_db = data;
2801
2802 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2803 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2804 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2805 __func__, pkt->hdr.code);
2806 }
2807 return TRUE;
2808 }
2809
2810 if (iwm_phy_db_set_section(phy_db, pkt)) {
2811 device_printf(sc->sc_dev,
2812 "%s: iwm_phy_db_set_section failed\n", __func__);
2813 }
2814
2815 return FALSE;
2816 }
2817
2818 static int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2819 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2820 enum iwm_ucode_type ucode_type)
2821 {
2822 struct iwm_notification_wait alive_wait;
2823 struct iwm_alive_data alive_data;
2824 const struct iwm_fw_img *fw;
2825 enum iwm_ucode_type old_type = sc->cur_ucode;
2826 int error;
2827 static const uint16_t alive_cmd[] = { IWM_ALIVE };
2828
2829 fw = &sc->sc_fw.img[ucode_type];
2830 sc->cur_ucode = ucode_type;
2831 sc->ucode_loaded = FALSE;
2832
2833 memset(&alive_data, 0, sizeof(alive_data));
2834 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2835 alive_cmd, nitems(alive_cmd),
2836 iwm_alive_fn, &alive_data);
2837
2838 error = iwm_start_fw(sc, fw);
2839 if (error) {
2840 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2841 sc->cur_ucode = old_type;
2842 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2843 return error;
2844 }
2845
2846 /*
2847 * Some things may run in the background now, but we
2848 * just wait for the ALIVE notification here.
2849 */
2850 IWM_UNLOCK(sc);
2851 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2852 IWM_UCODE_ALIVE_TIMEOUT);
2853 IWM_LOCK(sc);
2854 if (error) {
2855 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2856 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2857 if (iwm_nic_lock(sc)) {
2858 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2859 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2860 iwm_nic_unlock(sc);
2861 }
2862 device_printf(sc->sc_dev,
2863 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2864 a, b);
2865 }
2866 sc->cur_ucode = old_type;
2867 return error;
2868 }
2869
2870 if (!alive_data.valid) {
2871 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2872 __func__);
2873 sc->cur_ucode = old_type;
2874 return EIO;
2875 }
2876
2877 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2878
2879 /*
2880 * configure and operate fw paging mechanism.
2881 * driver configures the paging flow only once, CPU2 paging image
2882 * included in the IWM_UCODE_INIT image.
2883 */
2884 if (fw->paging_mem_size) {
2885 error = iwm_save_fw_paging(sc, fw);
2886 if (error) {
2887 device_printf(sc->sc_dev,
2888 "%s: failed to save the FW paging image\n",
2889 __func__);
2890 return error;
2891 }
2892
2893 error = iwm_send_paging_cmd(sc, fw);
2894 if (error) {
2895 device_printf(sc->sc_dev,
2896 "%s: failed to send the paging cmd\n", __func__);
2897 iwm_free_fw_paging(sc);
2898 return error;
2899 }
2900 }
2901
2902 if (!error)
2903 sc->ucode_loaded = TRUE;
2904 return error;
2905 }
2906
2907 /*
2908 * mvm misc bits
2909 */
2910
2911 /*
2912 * follows iwlwifi/fw.c
2913 */
2914 static int
iwm_run_init_ucode(struct iwm_softc * sc,int justnvm)2915 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2916 {
2917 struct iwm_notification_wait calib_wait;
2918 static const uint16_t init_complete[] = {
2919 IWM_INIT_COMPLETE_NOTIF,
2920 IWM_CALIB_RES_NOTIF_PHY_DB
2921 };
2922 int ret;
2923
2924 /* do not operate with rfkill switch turned on */
2925 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2926 device_printf(sc->sc_dev,
2927 "radio is disabled by hardware switch\n");
2928 return EPERM;
2929 }
2930
2931 iwm_init_notification_wait(sc->sc_notif_wait,
2932 &calib_wait,
2933 init_complete,
2934 nitems(init_complete),
2935 iwm_wait_phy_db_entry,
2936 sc->sc_phy_db);
2937
2938 /* Will also start the device */
2939 ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2940 if (ret) {
2941 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2942 ret);
2943 goto error;
2944 }
2945
2946 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2947 ret = iwm_send_bt_init_conf(sc);
2948 if (ret) {
2949 device_printf(sc->sc_dev,
2950 "failed to send bt coex configuration: %d\n", ret);
2951 goto error;
2952 }
2953 }
2954
2955 if (justnvm) {
2956 /* Read nvm */
2957 ret = iwm_nvm_init(sc);
2958 if (ret) {
2959 device_printf(sc->sc_dev, "failed to read nvm\n");
2960 goto error;
2961 }
2962 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2963 goto error;
2964 }
2965
2966 /* Send TX valid antennas before triggering calibrations */
2967 ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2968 if (ret) {
2969 device_printf(sc->sc_dev,
2970 "failed to send antennas before calibration: %d\n", ret);
2971 goto error;
2972 }
2973
2974 /*
2975 * Send phy configurations command to init uCode
2976 * to start the 16.0 uCode init image internal calibrations.
2977 */
2978 ret = iwm_send_phy_cfg_cmd(sc);
2979 if (ret) {
2980 device_printf(sc->sc_dev,
2981 "%s: Failed to run INIT calibrations: %d\n",
2982 __func__, ret);
2983 goto error;
2984 }
2985
2986 /*
2987 * Nothing to do but wait for the init complete notification
2988 * from the firmware.
2989 */
2990 IWM_UNLOCK(sc);
2991 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2992 IWM_UCODE_CALIB_TIMEOUT);
2993 IWM_LOCK(sc);
2994
2995
2996 goto out;
2997
2998 error:
2999 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3000 out:
3001 return ret;
3002 }
3003
3004 static int
iwm_config_ltr(struct iwm_softc * sc)3005 iwm_config_ltr(struct iwm_softc *sc)
3006 {
3007 struct iwm_ltr_config_cmd cmd = {
3008 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3009 };
3010
3011 if (!sc->sc_ltr_enabled)
3012 return 0;
3013
3014 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3015 }
3016
3017 /*
3018 * receive side
3019 */
3020
3021 /* (re)stock rx ring, called at init-time and at runtime */
3022 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)3023 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3024 {
3025 struct iwm_rx_ring *ring = &sc->rxq;
3026 struct iwm_rx_data *data = &ring->data[idx];
3027 struct mbuf *m;
3028 bus_dmamap_t dmamap;
3029 bus_dma_segment_t seg;
3030 int nsegs, error;
3031
3032 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3033 if (m == NULL)
3034 return ENOBUFS;
3035
3036 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3037 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3038 &seg, &nsegs, BUS_DMA_NOWAIT);
3039 if (error != 0) {
3040 device_printf(sc->sc_dev,
3041 "%s: can't map mbuf, error %d\n", __func__, error);
3042 m_freem(m);
3043 return error;
3044 }
3045
3046 if (data->m != NULL)
3047 bus_dmamap_unload(ring->data_dmat, data->map);
3048
3049 /* Swap ring->spare_map with data->map */
3050 dmamap = data->map;
3051 data->map = ring->spare_map;
3052 ring->spare_map = dmamap;
3053
3054 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3055 data->m = m;
3056
3057 /* Update RX descriptor. */
3058 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3059 if (sc->cfg->mqrx_supported)
3060 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3061 else
3062 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3063 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3064 BUS_DMASYNC_PREWRITE);
3065
3066 return 0;
3067 }
3068
3069 static void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3070 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3071 {
3072 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3073
3074 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3075
3076 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3077 }
3078
3079 /*
3080 * Retrieve the average noise (in dBm) among receivers.
3081 */
3082 static int
iwm_get_noise(struct iwm_softc * sc,const struct iwm_statistics_rx_non_phy * stats)3083 iwm_get_noise(struct iwm_softc *sc,
3084 const struct iwm_statistics_rx_non_phy *stats)
3085 {
3086 int i, noise;
3087 #ifdef IWM_DEBUG
3088 int nbant, total;
3089 #else
3090 int nbant __unused, total __unused;
3091 #endif
3092
3093 total = nbant = noise = 0;
3094 for (i = 0; i < 3; i++) {
3095 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3096 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3097 __func__,
3098 i,
3099 noise);
3100
3101 if (noise) {
3102 total += noise;
3103 nbant++;
3104 }
3105 }
3106
3107 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3108 __func__, nbant, total);
3109 #if 0
3110 /* There should be at least one antenna but check anyway. */
3111 return (nbant == 0) ? -127 : (total / nbant) - 107;
3112 #else
3113 /* For now, just hard-code it to -96 to be safe */
3114 return (-96);
3115 #endif
3116 }
3117
3118 static void
iwm_handle_rx_statistics(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3119 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3120 {
3121 struct iwm_notif_statistics *stats = (void *)&pkt->data;
3122
3123 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3124 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3125 }
3126
3127 /* iwlwifi: mvm/rx.c */
3128 /*
3129 * iwm_get_signal_strength - use new rx PHY INFO API
3130 * values are reported by the fw as positive values - need to negate
3131 * to obtain their dBM. Account for missing antennas by replacing 0
3132 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3133 */
3134 static int
iwm_rx_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3135 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3136 struct iwm_rx_phy_info *phy_info)
3137 {
3138 int energy_a, energy_b, energy_c, max_energy;
3139 uint32_t val;
3140
3141 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3142 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3143 IWM_RX_INFO_ENERGY_ANT_A_POS;
3144 energy_a = energy_a ? -energy_a : -256;
3145 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3146 IWM_RX_INFO_ENERGY_ANT_B_POS;
3147 energy_b = energy_b ? -energy_b : -256;
3148 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3149 IWM_RX_INFO_ENERGY_ANT_C_POS;
3150 energy_c = energy_c ? -energy_c : -256;
3151 max_energy = MAX(energy_a, energy_b);
3152 max_energy = MAX(max_energy, energy_c);
3153
3154 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3155 "energy In A %d B %d C %d , and max %d\n",
3156 energy_a, energy_b, energy_c, max_energy);
3157
3158 return max_energy;
3159 }
3160
3161 static int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)3162 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3163 struct iwm_rx_mpdu_desc *desc)
3164 {
3165 int energy_a, energy_b;
3166
3167 energy_a = desc->v1.energy_a;
3168 energy_b = desc->v1.energy_b;
3169 energy_a = energy_a ? -energy_a : -256;
3170 energy_b = energy_b ? -energy_b : -256;
3171 return MAX(energy_a, energy_b);
3172 }
3173
3174 /*
3175 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3176 *
3177 * Handles the actual data of the Rx packet from the fw
3178 */
3179 static bool
iwm_rx_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3180 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3181 bool stolen)
3182 {
3183 struct ieee80211com *ic = &sc->sc_ic;
3184 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3185 struct ieee80211_rx_stats rxs;
3186 struct iwm_rx_phy_info *phy_info;
3187 struct iwm_rx_mpdu_res_start *rx_res;
3188 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3189 uint32_t len;
3190 uint32_t rx_pkt_status;
3191 int rssi;
3192
3193 phy_info = &sc->sc_last_phy_info;
3194 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3195 len = le16toh(rx_res->byte_count);
3196 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3197
3198 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3199 device_printf(sc->sc_dev,
3200 "dsp size out of range [0,20]: %d\n",
3201 phy_info->cfg_phy_cnt);
3202 return false;
3203 }
3204
3205 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3206 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3207 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3208 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3209 return false;
3210 }
3211
3212 rssi = iwm_rx_get_signal_strength(sc, phy_info);
3213
3214 /* Map it to relative value */
3215 rssi = rssi - sc->sc_noise;
3216
3217 /* replenish ring for the buffer we're going to feed to the sharks */
3218 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3219 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3220 __func__);
3221 return false;
3222 }
3223
3224 m->m_data = pkt->data + sizeof(*rx_res);
3225 m->m_pkthdr.len = m->m_len = len;
3226
3227 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3228 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3229
3230 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3231 "%s: phy_info: channel=%d, flags=0x%08x\n",
3232 __func__,
3233 le16toh(phy_info->channel),
3234 le16toh(phy_info->phy_flags));
3235
3236 /*
3237 * Populate an RX state struct with the provided information.
3238 */
3239 bzero(&rxs, sizeof(rxs));
3240 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3241 rxs.r_flags |= IEEE80211_R_BAND;
3242 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3243 rxs.c_ieee = le16toh(phy_info->channel);
3244 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3245 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3246 rxs.c_band = IEEE80211_CHAN_2GHZ;
3247 } else {
3248 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3249 rxs.c_band = IEEE80211_CHAN_5GHZ;
3250 }
3251
3252 /* rssi is in 1/2db units */
3253 rxs.c_rssi = rssi * 2;
3254 rxs.c_nf = sc->sc_noise;
3255 if (ieee80211_add_rx_params(m, &rxs) == 0)
3256 return false;
3257
3258 if (ieee80211_radiotap_active_vap(vap)) {
3259 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3260
3261 tap->wr_flags = 0;
3262 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3263 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3264 tap->wr_chan_freq = htole16(rxs.c_freq);
3265 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3266 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3267 tap->wr_dbm_antsignal = (int8_t)rssi;
3268 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3269 tap->wr_tsft = phy_info->system_timestamp;
3270 switch (phy_info->rate) {
3271 /* CCK rates. */
3272 case 10: tap->wr_rate = 2; break;
3273 case 20: tap->wr_rate = 4; break;
3274 case 55: tap->wr_rate = 11; break;
3275 case 110: tap->wr_rate = 22; break;
3276 /* OFDM rates. */
3277 case 0xd: tap->wr_rate = 12; break;
3278 case 0xf: tap->wr_rate = 18; break;
3279 case 0x5: tap->wr_rate = 24; break;
3280 case 0x7: tap->wr_rate = 36; break;
3281 case 0x9: tap->wr_rate = 48; break;
3282 case 0xb: tap->wr_rate = 72; break;
3283 case 0x1: tap->wr_rate = 96; break;
3284 case 0x3: tap->wr_rate = 108; break;
3285 /* Unknown rate: should not happen. */
3286 default: tap->wr_rate = 0;
3287 }
3288 }
3289
3290 return true;
3291 }
3292
3293 static bool
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3294 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3295 bool stolen)
3296 {
3297 struct ieee80211com *ic = &sc->sc_ic;
3298 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3299 struct ieee80211_frame *wh;
3300 struct ieee80211_rx_stats rxs;
3301 struct iwm_rx_mpdu_desc *desc;
3302 struct iwm_rx_packet *pkt;
3303 int rssi;
3304 uint32_t hdrlen, len, rate_n_flags;
3305 uint16_t phy_info;
3306 uint8_t channel;
3307
3308 pkt = mtodo(m, offset);
3309 desc = (void *)pkt->data;
3310
3311 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3312 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3313 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3314 "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3315 return false;
3316 }
3317
3318 channel = desc->v1.channel;
3319 len = le16toh(desc->mpdu_len);
3320 phy_info = le16toh(desc->phy_info);
3321 rate_n_flags = desc->v1.rate_n_flags;
3322
3323 wh = mtodo(m, sizeof(*desc));
3324 m->m_data = pkt->data + sizeof(*desc);
3325 m->m_pkthdr.len = m->m_len = len;
3326 m->m_len = len;
3327
3328 /* Account for padding following the frame header. */
3329 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3330 hdrlen = ieee80211_anyhdrsize(wh);
3331 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3332 m->m_data = mtodo(m, 2);
3333 wh = mtod(m, struct ieee80211_frame *);
3334 }
3335
3336 /* Map it to relative value */
3337 rssi = iwm_rxmq_get_signal_strength(sc, desc);
3338 rssi = rssi - sc->sc_noise;
3339
3340 /* replenish ring for the buffer we're going to feed to the sharks */
3341 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3342 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3343 __func__);
3344 return false;
3345 }
3346
3347 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3348 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3349
3350 /*
3351 * Populate an RX state struct with the provided information.
3352 */
3353 bzero(&rxs, sizeof(rxs));
3354 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3355 rxs.r_flags |= IEEE80211_R_BAND;
3356 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3357 rxs.c_ieee = channel;
3358 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3359 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3360 rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3361
3362 /* rssi is in 1/2db units */
3363 rxs.c_rssi = rssi * 2;
3364 rxs.c_nf = sc->sc_noise;
3365 if (ieee80211_add_rx_params(m, &rxs) == 0)
3366 return false;
3367
3368 if (ieee80211_radiotap_active_vap(vap)) {
3369 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3370
3371 tap->wr_flags = 0;
3372 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3373 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3374 tap->wr_chan_freq = htole16(rxs.c_freq);
3375 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3376 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3377 tap->wr_dbm_antsignal = (int8_t)rssi;
3378 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3379 tap->wr_tsft = desc->v1.gp2_on_air_rise;
3380 switch ((rate_n_flags & 0xff)) {
3381 /* CCK rates. */
3382 case 10: tap->wr_rate = 2; break;
3383 case 20: tap->wr_rate = 4; break;
3384 case 55: tap->wr_rate = 11; break;
3385 case 110: tap->wr_rate = 22; break;
3386 /* OFDM rates. */
3387 case 0xd: tap->wr_rate = 12; break;
3388 case 0xf: tap->wr_rate = 18; break;
3389 case 0x5: tap->wr_rate = 24; break;
3390 case 0x7: tap->wr_rate = 36; break;
3391 case 0x9: tap->wr_rate = 48; break;
3392 case 0xb: tap->wr_rate = 72; break;
3393 case 0x1: tap->wr_rate = 96; break;
3394 case 0x3: tap->wr_rate = 108; break;
3395 /* Unknown rate: should not happen. */
3396 default: tap->wr_rate = 0;
3397 }
3398 }
3399
3400 return true;
3401 }
3402
3403 static bool
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3404 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3405 bool stolen)
3406 {
3407 struct ieee80211com *ic;
3408 struct ieee80211_frame *wh;
3409 struct ieee80211_node *ni;
3410 bool ret;
3411
3412 ic = &sc->sc_ic;
3413
3414 ret = sc->cfg->mqrx_supported ?
3415 iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3416 iwm_rx_rx_mpdu(sc, m, offset, stolen);
3417 if (!ret) {
3418 counter_u64_add(ic->ic_ierrors, 1);
3419 return (ret);
3420 }
3421
3422 wh = mtod(m, struct ieee80211_frame *);
3423 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3424
3425 IWM_UNLOCK(sc);
3426 if (ni != NULL) {
3427 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3428 ieee80211_input_mimo(ni, m);
3429 ieee80211_free_node(ni);
3430 } else {
3431 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3432 ieee80211_input_mimo_all(ic, m);
3433 }
3434 IWM_LOCK(sc);
3435
3436 return true;
3437 }
3438
3439 static int
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)3440 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3441 struct iwm_node *in)
3442 {
3443 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3444 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3445 struct ieee80211_node *ni = &in->in_ni;
3446 struct ieee80211vap *vap = ni->ni_vap;
3447 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3448 int new_rate, cur_rate;
3449 boolean_t rate_matched;
3450 uint8_t tx_resp_rate;
3451
3452 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3453
3454 /* Update rate control statistics. */
3455 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3456 __func__,
3457 (int) le16toh(tx_resp->status.status),
3458 (int) le16toh(tx_resp->status.sequence),
3459 tx_resp->frame_count,
3460 tx_resp->bt_kill_count,
3461 tx_resp->failure_rts,
3462 tx_resp->failure_frame,
3463 le32toh(tx_resp->initial_rate),
3464 (int) le16toh(tx_resp->wireless_media_time));
3465
3466 cur_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
3467 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3468
3469 /* For rate control, ignore frames sent at different initial rate */
3470 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3471
3472 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3473 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3474 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3475 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3476 }
3477
3478 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3479 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3480 txs->short_retries = tx_resp->failure_rts;
3481 txs->long_retries = tx_resp->failure_frame;
3482 if (status != IWM_TX_STATUS_SUCCESS &&
3483 status != IWM_TX_STATUS_DIRECT_DONE) {
3484 switch (status) {
3485 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3486 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3487 break;
3488 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3489 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3490 break;
3491 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3492 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3493 break;
3494 default:
3495 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3496 break;
3497 }
3498 } else {
3499 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3500 }
3501
3502 if (rate_matched) {
3503 ieee80211_ratectl_tx_complete(ni, txs);
3504
3505 ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3506 new_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
3507 if (new_rate != 0 && new_rate != cur_rate) {
3508 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3509 iwm_setrates(sc, in, new_rate);
3510 iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3511 }
3512 }
3513
3514 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3515 }
3516
3517 static void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3518 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3519 {
3520 struct iwm_cmd_header *cmd_hdr;
3521 struct iwm_tx_ring *ring;
3522 struct iwm_tx_data *txd;
3523 struct iwm_node *in;
3524 struct mbuf *m;
3525 int idx, qid, qmsk, status;
3526
3527 cmd_hdr = &pkt->hdr;
3528 idx = cmd_hdr->idx;
3529 qid = cmd_hdr->qid;
3530
3531 ring = &sc->txq[qid];
3532 txd = &ring->data[idx];
3533 in = txd->in;
3534 m = txd->m;
3535
3536 KASSERT(txd->done == 0, ("txd not done"));
3537 KASSERT(txd->in != NULL, ("txd without node"));
3538 KASSERT(txd->m != NULL, ("txd without mbuf"));
3539
3540 sc->sc_tx_timer = 0;
3541
3542 status = iwm_rx_tx_cmd_single(sc, pkt, in);
3543
3544 /* Unmap and free mbuf. */
3545 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3546 bus_dmamap_unload(ring->data_dmat, txd->map);
3547
3548 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3549 "free txd %p, in %p\n", txd, txd->in);
3550 txd->done = 1;
3551 txd->m = NULL;
3552 txd->in = NULL;
3553
3554 ieee80211_tx_complete(&in->in_ni, m, status);
3555
3556 qmsk = 1 << qid;
3557 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3558 sc->qfullmsk &= ~qmsk;
3559 if (sc->qfullmsk == 0)
3560 iwm_start(sc);
3561 }
3562 }
3563
3564 /*
3565 * transmit side
3566 */
3567
3568 /*
3569 * Process a "command done" firmware notification. This is where we wakeup
3570 * processes waiting for a synchronous command completion.
3571 * from if_iwn
3572 */
3573 static void
iwm_cmd_done(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3574 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3575 {
3576 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3577 struct iwm_tx_data *data;
3578
3579 if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3580 return; /* Not a command ack. */
3581 }
3582
3583 /* XXX wide commands? */
3584 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3585 "cmd notification type 0x%x qid %d idx %d\n",
3586 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3587
3588 data = &ring->data[pkt->hdr.idx];
3589
3590 /* If the command was mapped in an mbuf, free it. */
3591 if (data->m != NULL) {
3592 bus_dmamap_sync(ring->data_dmat, data->map,
3593 BUS_DMASYNC_POSTWRITE);
3594 bus_dmamap_unload(ring->data_dmat, data->map);
3595 m_freem(data->m);
3596 data->m = NULL;
3597 }
3598 wakeup(&ring->desc[pkt->hdr.idx]);
3599
3600 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3601 device_printf(sc->sc_dev,
3602 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3603 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3604 /* XXX call iwm_force_nmi() */
3605 }
3606
3607 KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3608 ring->queued--;
3609 if (ring->queued == 0)
3610 iwm_pcie_clear_cmd_in_flight(sc);
3611 }
3612
3613 #if 0
3614 /*
3615 * necessary only for block ack mode
3616 */
3617 void
3618 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3619 uint16_t len)
3620 {
3621 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3622 uint16_t w_val;
3623
3624 scd_bc_tbl = sc->sched_dma.vaddr;
3625
3626 len += 8; /* magic numbers came naturally from paris */
3627 len = roundup(len, 4) / 4;
3628
3629 w_val = htole16(sta_id << 12 | len);
3630
3631 /* Update TX scheduler. */
3632 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3633 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3634 BUS_DMASYNC_PREWRITE);
3635
3636 /* I really wonder what this is ?!? */
3637 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3638 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3639 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3640 BUS_DMASYNC_PREWRITE);
3641 }
3642 }
3643 #endif
3644
3645 static int
iwm_tx_rateidx_global_lookup(struct iwm_softc * sc,uint8_t rate)3646 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3647 {
3648 int i;
3649
3650 for (i = 0; i < nitems(iwm_rates); i++) {
3651 if (iwm_rates[i].rate == rate)
3652 return (i);
3653 }
3654 /* XXX error? */
3655 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3656 "%s: couldn't find an entry for rate=%d\n",
3657 __func__,
3658 rate);
3659 return (0);
3660 }
3661
3662 /*
3663 * Fill in the rate related information for a transmit command.
3664 */
3665 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct mbuf * m,struct iwm_tx_cmd * tx)3666 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3667 struct mbuf *m, struct iwm_tx_cmd *tx)
3668 {
3669 struct ieee80211_node *ni = &in->in_ni;
3670 struct ieee80211_frame *wh;
3671 const struct ieee80211_txparam *tp = ni->ni_txparms;
3672 const struct iwm_rate *rinfo;
3673 int type;
3674 int ridx, rate_flags;
3675
3676 wh = mtod(m, struct ieee80211_frame *);
3677 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3678
3679 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3680 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3681
3682 if (type == IEEE80211_FC0_TYPE_MGT ||
3683 type == IEEE80211_FC0_TYPE_CTL ||
3684 (m->m_flags & M_EAPOL) != 0) {
3685 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3686 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3687 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3688 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3689 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3690 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3691 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3692 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3693 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3694 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3695 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3696 } else {
3697 /* for data frames, use RS table */
3698 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3699 ridx = iwm_rate2ridx(sc,
3700 ieee80211_node_get_txrate_dot11rate(ni));
3701 if (ridx == -1)
3702 ridx = 0;
3703
3704 /* This is the index into the programmed table */
3705 tx->initial_rate_index = 0;
3706 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3707 }
3708
3709 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3710 "%s: frame type=%d txrate %d\n",
3711 __func__, type, iwm_rates[ridx].rate);
3712
3713 rinfo = &iwm_rates[ridx];
3714
3715 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3716 __func__, ridx,
3717 rinfo->rate,
3718 !! (IWM_RIDX_IS_CCK(ridx))
3719 );
3720
3721 /* XXX TODO: hard-coded TX antenna? */
3722 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3723 rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3724 else
3725 rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3726 if (IWM_RIDX_IS_CCK(ridx))
3727 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3728 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3729
3730 return rinfo;
3731 }
3732
3733 #define TB0_SIZE 16
3734 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)3735 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3736 {
3737 struct ieee80211com *ic = &sc->sc_ic;
3738 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3739 struct iwm_node *in = IWM_NODE(ni);
3740 struct iwm_tx_ring *ring;
3741 struct iwm_tx_data *data;
3742 struct iwm_tfd *desc;
3743 struct iwm_device_cmd *cmd;
3744 struct iwm_tx_cmd *tx;
3745 struct ieee80211_frame *wh;
3746 struct ieee80211_key *k = NULL;
3747 struct mbuf *m1;
3748 const struct iwm_rate *rinfo;
3749 uint32_t flags;
3750 u_int hdrlen;
3751 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3752 int nsegs;
3753 uint8_t tid, type;
3754 int i, totlen, error, pad;
3755
3756 wh = mtod(m, struct ieee80211_frame *);
3757 hdrlen = ieee80211_anyhdrsize(wh);
3758 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3759 tid = 0;
3760 ring = &sc->txq[ac];
3761 desc = &ring->desc[ring->cur];
3762 data = &ring->data[ring->cur];
3763
3764 /* Fill out iwm_tx_cmd to send to the firmware */
3765 cmd = &ring->cmd[ring->cur];
3766 cmd->hdr.code = IWM_TX_CMD;
3767 cmd->hdr.flags = 0;
3768 cmd->hdr.qid = ring->qid;
3769 cmd->hdr.idx = ring->cur;
3770
3771 tx = (void *)cmd->data;
3772 memset(tx, 0, sizeof(*tx));
3773
3774 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3775
3776 /* Encrypt the frame if need be. */
3777 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3778 /* Retrieve key for TX && do software encryption. */
3779 k = ieee80211_crypto_encap(ni, m);
3780 if (k == NULL) {
3781 m_freem(m);
3782 return (ENOBUFS);
3783 }
3784 /* 802.11 header may have moved. */
3785 wh = mtod(m, struct ieee80211_frame *);
3786 }
3787
3788 if (ieee80211_radiotap_active_vap(vap)) {
3789 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3790
3791 tap->wt_flags = 0;
3792 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3793 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3794 tap->wt_rate = rinfo->rate;
3795 if (k != NULL)
3796 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3797 ieee80211_radiotap_tx(vap, m);
3798 }
3799
3800 flags = 0;
3801 totlen = m->m_pkthdr.len;
3802 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3803 flags |= IWM_TX_CMD_FLG_ACK;
3804 }
3805
3806 if (type == IEEE80211_FC0_TYPE_DATA &&
3807 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3808 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3809 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3810 }
3811
3812 tx->sta_id = IWM_STATION_ID;
3813
3814 if (type == IEEE80211_FC0_TYPE_MGT) {
3815 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3816
3817 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3818 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3819 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3820 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3821 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3822 } else {
3823 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3824 }
3825 } else {
3826 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3827 }
3828
3829 if (hdrlen & 3) {
3830 /* First segment length must be a multiple of 4. */
3831 flags |= IWM_TX_CMD_FLG_MH_PAD;
3832 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3833 pad = 4 - (hdrlen & 3);
3834 } else {
3835 tx->offload_assist = 0;
3836 pad = 0;
3837 }
3838
3839 tx->len = htole16(totlen);
3840 tx->tid_tspec = tid;
3841 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3842
3843 /* Set physical address of "scratch area". */
3844 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3845 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3846
3847 /* Copy 802.11 header in TX command. */
3848 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3849
3850 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3851
3852 tx->sec_ctl = 0;
3853 tx->tx_flags |= htole32(flags);
3854
3855 /* Trim 802.11 header. */
3856 m_adj(m, hdrlen);
3857 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3858 segs, &nsegs, BUS_DMA_NOWAIT);
3859 if (error != 0) {
3860 if (error != EFBIG) {
3861 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3862 error);
3863 m_freem(m);
3864 return error;
3865 }
3866 /* Too many DMA segments, linearize mbuf. */
3867 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3868 if (m1 == NULL) {
3869 device_printf(sc->sc_dev,
3870 "%s: could not defrag mbuf\n", __func__);
3871 m_freem(m);
3872 return (ENOBUFS);
3873 }
3874 m = m1;
3875
3876 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3877 segs, &nsegs, BUS_DMA_NOWAIT);
3878 if (error != 0) {
3879 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3880 error);
3881 m_freem(m);
3882 return error;
3883 }
3884 }
3885 data->m = m;
3886 data->in = in;
3887 data->done = 0;
3888
3889 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3890 "sending txd %p, in %p\n", data, data->in);
3891 KASSERT(data->in != NULL, ("node is NULL"));
3892
3893 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3894 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3895 ring->qid, ring->cur, totlen, nsegs,
3896 le32toh(tx->tx_flags),
3897 le32toh(tx->rate_n_flags),
3898 tx->initial_rate_index
3899 );
3900
3901 /* Fill TX descriptor. */
3902 memset(desc, 0, sizeof(*desc));
3903 desc->num_tbs = 2 + nsegs;
3904
3905 desc->tbs[0].lo = htole32(data->cmd_paddr);
3906 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3907 (TB0_SIZE << 4));
3908 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3909 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3910 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3911 hdrlen + pad - TB0_SIZE) << 4));
3912
3913 /* Other DMA segments are for data payload. */
3914 for (i = 0; i < nsegs; i++) {
3915 seg = &segs[i];
3916 desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3917 desc->tbs[i + 2].hi_n_len =
3918 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3919 (seg->ds_len << 4);
3920 }
3921
3922 bus_dmamap_sync(ring->data_dmat, data->map,
3923 BUS_DMASYNC_PREWRITE);
3924 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3925 BUS_DMASYNC_PREWRITE);
3926 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3927 BUS_DMASYNC_PREWRITE);
3928
3929 #if 0
3930 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3931 #endif
3932
3933 /* Kick TX ring. */
3934 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3935 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3936
3937 /* Mark TX ring as full if we reach a certain threshold. */
3938 if (++ring->queued > IWM_TX_RING_HIMARK) {
3939 sc->qfullmsk |= 1 << ring->qid;
3940 }
3941
3942 return 0;
3943 }
3944
3945 static int
iwm_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3946 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3947 const struct ieee80211_bpf_params *params)
3948 {
3949 struct ieee80211com *ic = ni->ni_ic;
3950 struct iwm_softc *sc = ic->ic_softc;
3951 int error = 0;
3952
3953 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3954 "->%s begin\n", __func__);
3955
3956 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3957 m_freem(m);
3958 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3959 "<-%s not RUNNING\n", __func__);
3960 return (ENETDOWN);
3961 }
3962
3963 IWM_LOCK(sc);
3964 /* XXX fix this */
3965 if (params == NULL) {
3966 error = iwm_tx(sc, m, ni, 0);
3967 } else {
3968 error = iwm_tx(sc, m, ni, 0);
3969 }
3970 if (sc->sc_tx_timer == 0)
3971 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3972 sc->sc_tx_timer = 5;
3973 IWM_UNLOCK(sc);
3974
3975 return (error);
3976 }
3977
3978 /*
3979 * mvm/tx.c
3980 */
3981
3982 /*
3983 * Note that there are transports that buffer frames before they reach
3984 * the firmware. This means that after flush_tx_path is called, the
3985 * queue might not be empty. The race-free way to handle this is to:
3986 * 1) set the station as draining
3987 * 2) flush the Tx path
3988 * 3) wait for the transport queues to be empty
3989 */
3990 int
iwm_flush_tx_path(struct iwm_softc * sc,uint32_t tfd_msk,uint32_t flags)3991 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3992 {
3993 int ret;
3994 struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3995 .queues_ctl = htole32(tfd_msk),
3996 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3997 };
3998
3999 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4000 sizeof(flush_cmd), &flush_cmd);
4001 if (ret)
4002 device_printf(sc->sc_dev,
4003 "Flushing tx queue failed: %d\n", ret);
4004 return ret;
4005 }
4006
4007 /*
4008 * BEGIN mvm/quota.c
4009 */
4010
4011 static int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_vap * ivp)4012 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4013 {
4014 struct iwm_time_quota_cmd_v1 cmd;
4015 int i, idx, ret, num_active_macs, quota, quota_rem;
4016 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4017 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4018 uint16_t id;
4019
4020 memset(&cmd, 0, sizeof(cmd));
4021
4022 /* currently, PHY ID == binding ID */
4023 if (ivp) {
4024 id = ivp->phy_ctxt->id;
4025 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4026 colors[id] = ivp->phy_ctxt->color;
4027
4028 if (1)
4029 n_ifs[id] = 1;
4030 }
4031
4032 /*
4033 * The FW's scheduling session consists of
4034 * IWM_MAX_QUOTA fragments. Divide these fragments
4035 * equally between all the bindings that require quota
4036 */
4037 num_active_macs = 0;
4038 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4039 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4040 num_active_macs += n_ifs[i];
4041 }
4042
4043 quota = 0;
4044 quota_rem = 0;
4045 if (num_active_macs) {
4046 quota = IWM_MAX_QUOTA / num_active_macs;
4047 quota_rem = IWM_MAX_QUOTA % num_active_macs;
4048 }
4049
4050 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4051 if (colors[i] < 0)
4052 continue;
4053
4054 cmd.quotas[idx].id_and_color =
4055 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4056
4057 if (n_ifs[i] <= 0) {
4058 cmd.quotas[idx].quota = htole32(0);
4059 cmd.quotas[idx].max_duration = htole32(0);
4060 } else {
4061 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4062 cmd.quotas[idx].max_duration = htole32(0);
4063 }
4064 idx++;
4065 }
4066
4067 /* Give the remainder of the session to the first binding */
4068 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4069
4070 ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4071 sizeof(cmd), &cmd);
4072 if (ret)
4073 device_printf(sc->sc_dev,
4074 "%s: Failed to send quota: %d\n", __func__, ret);
4075 return ret;
4076 }
4077
4078 /*
4079 * END mvm/quota.c
4080 */
4081
4082 /*
4083 * ieee80211 routines
4084 */
4085
4086 /*
4087 * Change to AUTH state in 80211 state machine. Roughly matches what
4088 * Linux does in bss_info_changed().
4089 */
4090 static int
iwm_auth(struct ieee80211vap * vap,struct iwm_softc * sc)4091 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4092 {
4093 struct ieee80211_node *ni;
4094 struct iwm_node *in;
4095 struct iwm_vap *iv = IWM_VAP(vap);
4096 uint32_t duration;
4097 int error;
4098
4099 /*
4100 * XXX i have a feeling that the vap node is being
4101 * freed from underneath us. Grr.
4102 */
4103 ni = ieee80211_ref_node(vap->iv_bss);
4104 in = IWM_NODE(ni);
4105 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4106 "%s: called; vap=%p, bss ni=%p\n",
4107 __func__,
4108 vap,
4109 ni);
4110 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4111 __func__, ether_sprintf(ni->ni_bssid));
4112
4113 in->in_assoc = 0;
4114 iv->iv_auth = 1;
4115
4116 /*
4117 * Firmware bug - it'll crash if the beacon interval is less
4118 * than 16. We can't avoid connecting at all, so refuse the
4119 * station state change, this will cause net80211 to abandon
4120 * attempts to connect to this AP, and eventually wpa_s will
4121 * blacklist the AP...
4122 */
4123 if (ni->ni_intval < 16) {
4124 device_printf(sc->sc_dev,
4125 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4126 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4127 error = EINVAL;
4128 goto out;
4129 }
4130
4131 error = iwm_allow_mcast(vap, sc);
4132 if (error) {
4133 device_printf(sc->sc_dev,
4134 "%s: failed to set multicast\n", __func__);
4135 goto out;
4136 }
4137
4138 /*
4139 * This is where it deviates from what Linux does.
4140 *
4141 * Linux iwlwifi doesn't reset the nic each time, nor does it
4142 * call ctxt_add() here. Instead, it adds it during vap creation,
4143 * and always does a mac_ctx_changed().
4144 *
4145 * The openbsd port doesn't attempt to do that - it reset things
4146 * at odd states and does the add here.
4147 *
4148 * So, until the state handling is fixed (ie, we never reset
4149 * the NIC except for a firmware failure, which should drag
4150 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4151 * contexts that are required), let's do a dirty hack here.
4152 */
4153 if (iv->is_uploaded) {
4154 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4155 device_printf(sc->sc_dev,
4156 "%s: failed to update MAC\n", __func__);
4157 goto out;
4158 }
4159 } else {
4160 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4161 device_printf(sc->sc_dev,
4162 "%s: failed to add MAC\n", __func__);
4163 goto out;
4164 }
4165 }
4166 sc->sc_firmware_state = 1;
4167
4168 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4169 in->in_ni.ni_chan, 1, 1)) != 0) {
4170 device_printf(sc->sc_dev,
4171 "%s: failed update phy ctxt\n", __func__);
4172 goto out;
4173 }
4174 iv->phy_ctxt = &sc->sc_phyctxt[0];
4175
4176 if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4177 device_printf(sc->sc_dev,
4178 "%s: binding update cmd\n", __func__);
4179 goto out;
4180 }
4181 sc->sc_firmware_state = 2;
4182 /*
4183 * Authentication becomes unreliable when powersaving is left enabled
4184 * here. Powersaving will be activated again when association has
4185 * finished or is aborted.
4186 */
4187 iv->ps_disabled = TRUE;
4188 error = iwm_power_update_mac(sc);
4189 iv->ps_disabled = FALSE;
4190 if (error != 0) {
4191 device_printf(sc->sc_dev,
4192 "%s: failed to update power management\n",
4193 __func__);
4194 goto out;
4195 }
4196 if ((error = iwm_add_sta(sc, in)) != 0) {
4197 device_printf(sc->sc_dev,
4198 "%s: failed to add sta\n", __func__);
4199 goto out;
4200 }
4201 sc->sc_firmware_state = 3;
4202
4203 /*
4204 * Prevent the FW from wandering off channel during association
4205 * by "protecting" the session with a time event.
4206 */
4207 /* XXX duration is in units of TU, not MS */
4208 duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4209 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4210
4211 error = 0;
4212 out:
4213 if (error != 0)
4214 iv->iv_auth = 0;
4215 ieee80211_free_node(ni);
4216 return (error);
4217 }
4218
4219 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])4220 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4221 {
4222 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4223 M_NOWAIT | M_ZERO);
4224 }
4225
4226 static uint8_t
iwm_rate_from_ucode_rate(uint32_t rate_n_flags)4227 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4228 {
4229 uint8_t plcp = rate_n_flags & 0xff;
4230 int i;
4231
4232 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4233 if (iwm_rates[i].plcp == plcp)
4234 return iwm_rates[i].rate;
4235 }
4236 return 0;
4237 }
4238
4239 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)4240 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4241 {
4242 int i;
4243 uint8_t rval;
4244
4245 for (i = 0; i < rs->rs_nrates; i++) {
4246 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4247 if (rval == iwm_rates[ridx].rate)
4248 return rs->rs_rates[i];
4249 }
4250
4251 return 0;
4252 }
4253
4254 static int
iwm_rate2ridx(struct iwm_softc * sc,uint8_t rate)4255 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4256 {
4257 int i;
4258
4259 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4260 if (iwm_rates[i].rate == rate)
4261 return i;
4262 }
4263
4264 device_printf(sc->sc_dev,
4265 "%s: WARNING: device rate for %u not found!\n",
4266 __func__, rate);
4267
4268 return -1;
4269 }
4270
4271
4272 static void
iwm_setrates(struct iwm_softc * sc,struct iwm_node * in,int dot11rate)4273 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int dot11rate)
4274 {
4275 struct ieee80211_node *ni = &in->in_ni;
4276 struct iwm_lq_cmd *lq = &in->in_lq;
4277 struct ieee80211_rateset *rs = &ni->ni_rates;
4278 int nrates = rs->rs_nrates;
4279 int i, ridx, tab = 0;
4280 // int txant = 0;
4281 int rix;
4282
4283 /*
4284 * Look up the rate index for the given legacy rate from
4285 * the rs_rates table. Default to the lowest rate if it's
4286 * not found (which is obviously hugely problematic.)
4287 */
4288 rix = -1;
4289 for (i = 0; i < nrates; i++) {
4290 int rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
4291 if (rate == dot11rate) {
4292 rix = i;
4293 break;
4294 }
4295 }
4296 if (rix < 0) {
4297 device_printf(sc->sc_dev,
4298 "%s: failed to lookup dot11rate (%d)\n",
4299 __func__, dot11rate);
4300 rix = 0;
4301 }
4302
4303 if (nrates > nitems(lq->rs_table)) {
4304 device_printf(sc->sc_dev,
4305 "%s: node supports %d rates, driver handles "
4306 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4307 return;
4308 }
4309 if (nrates == 0) {
4310 device_printf(sc->sc_dev,
4311 "%s: node supports 0 rates, odd!\n", __func__);
4312 return;
4313 }
4314 nrates = imin(rix + 1, nrates);
4315
4316 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4317 "%s: nrates=%d\n", __func__, nrates);
4318
4319 /* then construct a lq_cmd based on those */
4320 memset(lq, 0, sizeof(*lq));
4321 lq->sta_id = IWM_STATION_ID;
4322
4323 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4324 if (ni->ni_flags & IEEE80211_NODE_HT)
4325 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4326
4327 /*
4328 * are these used? (we don't do SISO or MIMO)
4329 * need to set them to non-zero, though, or we get an error.
4330 */
4331 lq->single_stream_ant_msk = 1;
4332 lq->dual_stream_ant_msk = 1;
4333
4334 /*
4335 * Build the actual rate selection table.
4336 * The lowest bits are the rates. Additionally,
4337 * CCK needs bit 9 to be set. The rest of the bits
4338 * we add to the table select the tx antenna
4339 * Note that we add the rates in the highest rate first
4340 * (opposite of ni_rates).
4341 */
4342 for (i = 0; i < nrates; i++) {
4343 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4344 int nextant;
4345
4346 /* Map 802.11 rate to HW rate index. */
4347 ridx = iwm_rate2ridx(sc, rate);
4348 if (ridx == -1)
4349 continue;
4350
4351 #if 0
4352 if (txant == 0)
4353 txant = iwm_get_valid_tx_ant(sc);
4354 nextant = 1<<(ffs(txant)-1);
4355 txant &= ~nextant;
4356 #else
4357 nextant = iwm_get_valid_tx_ant(sc);
4358 #endif
4359 tab = iwm_rates[ridx].plcp;
4360 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4361 if (IWM_RIDX_IS_CCK(ridx))
4362 tab |= IWM_RATE_MCS_CCK_MSK;
4363 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4364 "station rate i=%d, rate=%d, hw=%x\n",
4365 i, iwm_rates[ridx].rate, tab);
4366 lq->rs_table[i] = htole32(tab);
4367 }
4368 /* then fill the rest with the lowest possible rate */
4369 for (i = nrates; i < nitems(lq->rs_table); i++) {
4370 KASSERT(tab != 0, ("invalid tab"));
4371 lq->rs_table[i] = htole32(tab);
4372 }
4373 }
4374
4375 static void
iwm_bring_down_firmware(struct iwm_softc * sc,struct ieee80211vap * vap)4376 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4377 {
4378 struct iwm_vap *ivp = IWM_VAP(vap);
4379 int error;
4380
4381 /* Avoid Tx watchdog triggering, when transfers get dropped here. */
4382 sc->sc_tx_timer = 0;
4383
4384 ivp->iv_auth = 0;
4385 if (sc->sc_firmware_state == 3) {
4386 iwm_xmit_queue_drain(sc);
4387 // iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4388 error = iwm_rm_sta(sc, vap, TRUE);
4389 if (error) {
4390 device_printf(sc->sc_dev,
4391 "%s: Failed to remove station: %d\n",
4392 __func__, error);
4393 }
4394 }
4395 if (sc->sc_firmware_state == 3) {
4396 error = iwm_mac_ctxt_changed(sc, vap);
4397 if (error) {
4398 device_printf(sc->sc_dev,
4399 "%s: Failed to change mac context: %d\n",
4400 __func__, error);
4401 }
4402 }
4403 if (sc->sc_firmware_state == 3) {
4404 error = iwm_sf_update(sc, vap, FALSE);
4405 if (error) {
4406 device_printf(sc->sc_dev,
4407 "%s: Failed to update smart FIFO: %d\n",
4408 __func__, error);
4409 }
4410 }
4411 if (sc->sc_firmware_state == 3) {
4412 error = iwm_rm_sta_id(sc, vap);
4413 if (error) {
4414 device_printf(sc->sc_dev,
4415 "%s: Failed to remove station id: %d\n",
4416 __func__, error);
4417 }
4418 }
4419 if (sc->sc_firmware_state == 3) {
4420 error = iwm_update_quotas(sc, NULL);
4421 if (error) {
4422 device_printf(sc->sc_dev,
4423 "%s: Failed to update PHY quota: %d\n",
4424 __func__, error);
4425 }
4426 }
4427 if (sc->sc_firmware_state == 3) {
4428 /* XXX Might need to specify bssid correctly. */
4429 error = iwm_mac_ctxt_changed(sc, vap);
4430 if (error) {
4431 device_printf(sc->sc_dev,
4432 "%s: Failed to change mac context: %d\n",
4433 __func__, error);
4434 }
4435 }
4436 if (sc->sc_firmware_state == 3) {
4437 sc->sc_firmware_state = 2;
4438 }
4439 if (sc->sc_firmware_state > 1) {
4440 error = iwm_binding_remove_vif(sc, ivp);
4441 if (error) {
4442 device_printf(sc->sc_dev,
4443 "%s: Failed to remove channel ctx: %d\n",
4444 __func__, error);
4445 }
4446 }
4447 if (sc->sc_firmware_state > 1) {
4448 sc->sc_firmware_state = 1;
4449 }
4450 ivp->phy_ctxt = NULL;
4451 if (sc->sc_firmware_state > 0) {
4452 error = iwm_mac_ctxt_changed(sc, vap);
4453 if (error) {
4454 device_printf(sc->sc_dev,
4455 "%s: Failed to change mac context: %d\n",
4456 __func__, error);
4457 }
4458 }
4459 if (sc->sc_firmware_state > 0) {
4460 error = iwm_power_update_mac(sc);
4461 if (error != 0) {
4462 device_printf(sc->sc_dev,
4463 "%s: failed to update power management\n",
4464 __func__);
4465 }
4466 }
4467 sc->sc_firmware_state = 0;
4468 }
4469
4470 static int
iwm_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4471 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4472 {
4473 struct iwm_vap *ivp = IWM_VAP(vap);
4474 struct ieee80211com *ic = vap->iv_ic;
4475 struct iwm_softc *sc = ic->ic_softc;
4476 struct iwm_node *in;
4477 int error;
4478
4479 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4480 "switching state %s -> %s arg=0x%x\n",
4481 ieee80211_state_name[vap->iv_state],
4482 ieee80211_state_name[nstate],
4483 arg);
4484
4485 IEEE80211_UNLOCK(ic);
4486 IWM_LOCK(sc);
4487
4488 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4489 (nstate == IEEE80211_S_AUTH ||
4490 nstate == IEEE80211_S_ASSOC ||
4491 nstate == IEEE80211_S_RUN)) {
4492 /* Stop blinking for a scan, when authenticating. */
4493 iwm_led_blink_stop(sc);
4494 }
4495
4496 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4497 iwm_led_disable(sc);
4498 /* disable beacon filtering if we're hopping out of RUN */
4499 iwm_disable_beacon_filter(sc);
4500 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4501 in->in_assoc = 0;
4502 }
4503
4504 if ((vap->iv_state == IEEE80211_S_AUTH ||
4505 vap->iv_state == IEEE80211_S_ASSOC ||
4506 vap->iv_state == IEEE80211_S_RUN) &&
4507 (nstate == IEEE80211_S_INIT ||
4508 nstate == IEEE80211_S_SCAN ||
4509 nstate == IEEE80211_S_AUTH)) {
4510 iwm_stop_session_protection(sc, ivp);
4511 }
4512
4513 if ((vap->iv_state == IEEE80211_S_RUN ||
4514 vap->iv_state == IEEE80211_S_ASSOC) &&
4515 nstate == IEEE80211_S_INIT) {
4516 /*
4517 * In this case, iv_newstate() wants to send an 80211 frame on
4518 * the network that we are leaving. So we need to call it,
4519 * before tearing down all the firmware state.
4520 */
4521 IWM_UNLOCK(sc);
4522 IEEE80211_LOCK(ic);
4523 ivp->iv_newstate(vap, nstate, arg);
4524 IEEE80211_UNLOCK(ic);
4525 IWM_LOCK(sc);
4526 iwm_bring_down_firmware(sc, vap);
4527 IWM_UNLOCK(sc);
4528 IEEE80211_LOCK(ic);
4529 return 0;
4530 }
4531
4532 switch (nstate) {
4533 case IEEE80211_S_INIT:
4534 case IEEE80211_S_SCAN:
4535 break;
4536
4537 case IEEE80211_S_AUTH:
4538 iwm_bring_down_firmware(sc, vap);
4539 if ((error = iwm_auth(vap, sc)) != 0) {
4540 device_printf(sc->sc_dev,
4541 "%s: could not move to auth state: %d\n",
4542 __func__, error);
4543 iwm_bring_down_firmware(sc, vap);
4544 IWM_UNLOCK(sc);
4545 IEEE80211_LOCK(ic);
4546 return 1;
4547 }
4548 break;
4549
4550 case IEEE80211_S_ASSOC:
4551 /*
4552 * EBS may be disabled due to previous failures reported by FW.
4553 * Reset EBS status here assuming environment has been changed.
4554 */
4555 sc->last_ebs_successful = TRUE;
4556 break;
4557
4558 case IEEE80211_S_RUN:
4559 in = IWM_NODE(vap->iv_bss);
4560 /* Update the association state, now we have it all */
4561 /* (eg associd comes in at this point */
4562 error = iwm_update_sta(sc, in);
4563 if (error != 0) {
4564 device_printf(sc->sc_dev,
4565 "%s: failed to update STA\n", __func__);
4566 IWM_UNLOCK(sc);
4567 IEEE80211_LOCK(ic);
4568 return error;
4569 }
4570 in->in_assoc = 1;
4571 error = iwm_mac_ctxt_changed(sc, vap);
4572 if (error != 0) {
4573 device_printf(sc->sc_dev,
4574 "%s: failed to update MAC: %d\n", __func__, error);
4575 }
4576
4577 iwm_sf_update(sc, vap, FALSE);
4578 iwm_enable_beacon_filter(sc, ivp);
4579 iwm_power_update_mac(sc);
4580 iwm_update_quotas(sc, ivp);
4581 ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4582 iwm_setrates(sc, in,
4583 ieee80211_node_get_txrate_dot11rate(&in->in_ni));
4584
4585 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4586 device_printf(sc->sc_dev,
4587 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4588 }
4589
4590 iwm_led_enable(sc);
4591 break;
4592
4593 default:
4594 break;
4595 }
4596 IWM_UNLOCK(sc);
4597 IEEE80211_LOCK(ic);
4598
4599 return (ivp->iv_newstate(vap, nstate, arg));
4600 }
4601
4602 void
iwm_endscan_cb(void * arg,int pending)4603 iwm_endscan_cb(void *arg, int pending)
4604 {
4605 struct iwm_softc *sc = arg;
4606 struct ieee80211com *ic = &sc->sc_ic;
4607
4608 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4609 "%s: scan ended\n",
4610 __func__);
4611
4612 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4613 }
4614
4615 static int
iwm_send_bt_init_conf(struct iwm_softc * sc)4616 iwm_send_bt_init_conf(struct iwm_softc *sc)
4617 {
4618 struct iwm_bt_coex_cmd bt_cmd;
4619
4620 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4621 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4622
4623 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4624 &bt_cmd);
4625 }
4626
4627 static boolean_t
iwm_is_lar_supported(struct iwm_softc * sc)4628 iwm_is_lar_supported(struct iwm_softc *sc)
4629 {
4630 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4631 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4632
4633 if (iwm_lar_disable)
4634 return FALSE;
4635
4636 /*
4637 * Enable LAR only if it is supported by the FW (TLV) &&
4638 * enabled in the NVM
4639 */
4640 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4641 return nvm_lar && tlv_lar;
4642 else
4643 return tlv_lar;
4644 }
4645
4646 static boolean_t
iwm_is_wifi_mcc_supported(struct iwm_softc * sc)4647 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4648 {
4649 return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4650 iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4651 }
4652
4653 static int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)4654 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4655 {
4656 struct iwm_mcc_update_cmd mcc_cmd;
4657 struct iwm_host_cmd hcmd = {
4658 .id = IWM_MCC_UPDATE_CMD,
4659 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4660 .data = { &mcc_cmd },
4661 };
4662 int ret;
4663 #ifdef IWM_DEBUG
4664 struct iwm_rx_packet *pkt;
4665 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4666 struct iwm_mcc_update_resp_v2 *mcc_resp;
4667 int n_channels;
4668 uint16_t mcc;
4669 #endif
4670 int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4671
4672 if (!iwm_is_lar_supported(sc)) {
4673 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4674 __func__);
4675 return 0;
4676 }
4677
4678 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4679 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4680 if (iwm_is_wifi_mcc_supported(sc))
4681 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4682 else
4683 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4684
4685 if (resp_v2)
4686 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4687 else
4688 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4689
4690 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4691 "send MCC update to FW with '%c%c' src = %d\n",
4692 alpha2[0], alpha2[1], mcc_cmd.source_id);
4693
4694 ret = iwm_send_cmd(sc, &hcmd);
4695 if (ret)
4696 return ret;
4697
4698 #ifdef IWM_DEBUG
4699 pkt = hcmd.resp_pkt;
4700
4701 /* Extract MCC response */
4702 if (resp_v2) {
4703 mcc_resp = (void *)pkt->data;
4704 mcc = mcc_resp->mcc;
4705 n_channels = le32toh(mcc_resp->n_channels);
4706 } else {
4707 mcc_resp_v1 = (void *)pkt->data;
4708 mcc = mcc_resp_v1->mcc;
4709 n_channels = le32toh(mcc_resp_v1->n_channels);
4710 }
4711
4712 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4713 if (mcc == 0)
4714 mcc = 0x3030; /* "00" - world */
4715
4716 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4717 "regulatory domain '%c%c' (%d channels available)\n",
4718 mcc >> 8, mcc & 0xff, n_channels);
4719 #endif
4720 iwm_free_resp(sc, &hcmd);
4721
4722 return 0;
4723 }
4724
4725 static void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)4726 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4727 {
4728 struct iwm_host_cmd cmd = {
4729 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4730 .len = { sizeof(uint32_t), },
4731 .data = { &backoff, },
4732 };
4733
4734 if (iwm_send_cmd(sc, &cmd) != 0) {
4735 device_printf(sc->sc_dev,
4736 "failed to change thermal tx backoff\n");
4737 }
4738 }
4739
4740 static int
iwm_init_hw(struct iwm_softc * sc)4741 iwm_init_hw(struct iwm_softc *sc)
4742 {
4743 struct ieee80211com *ic = &sc->sc_ic;
4744 int error, i, ac;
4745
4746 sc->sf_state = IWM_SF_UNINIT;
4747
4748 if ((error = iwm_start_hw(sc)) != 0) {
4749 printf("iwm_start_hw: failed %d\n", error);
4750 return error;
4751 }
4752
4753 if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4754 printf("iwm_run_init_ucode: failed %d\n", error);
4755 return error;
4756 }
4757
4758 /*
4759 * should stop and start HW since that INIT
4760 * image just loaded
4761 */
4762 iwm_stop_device(sc);
4763 sc->sc_ps_disabled = FALSE;
4764 if ((error = iwm_start_hw(sc)) != 0) {
4765 device_printf(sc->sc_dev, "could not initialize hardware\n");
4766 return error;
4767 }
4768
4769 /* omstart, this time with the regular firmware */
4770 error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4771 if (error) {
4772 device_printf(sc->sc_dev, "could not load firmware\n");
4773 goto error;
4774 }
4775
4776 error = iwm_sf_update(sc, NULL, FALSE);
4777 if (error)
4778 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4779
4780 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4781 device_printf(sc->sc_dev, "bt init conf failed\n");
4782 goto error;
4783 }
4784
4785 error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4786 if (error != 0) {
4787 device_printf(sc->sc_dev, "antenna config failed\n");
4788 goto error;
4789 }
4790
4791 /* Send phy db control command and then phy db calibration */
4792 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4793 goto error;
4794
4795 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4796 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4797 goto error;
4798 }
4799
4800 /* Add auxiliary station for scanning */
4801 if ((error = iwm_add_aux_sta(sc)) != 0) {
4802 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4803 goto error;
4804 }
4805
4806 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4807 /*
4808 * The channel used here isn't relevant as it's
4809 * going to be overwritten in the other flows.
4810 * For now use the first channel we have.
4811 */
4812 if ((error = iwm_phy_ctxt_add(sc,
4813 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4814 goto error;
4815 }
4816
4817 /* Initialize tx backoffs to the minimum. */
4818 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4819 iwm_tt_tx_backoff(sc, 0);
4820
4821 if (iwm_config_ltr(sc) != 0)
4822 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4823
4824 error = iwm_power_update_device(sc);
4825 if (error)
4826 goto error;
4827
4828 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4829 goto error;
4830
4831 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4832 if ((error = iwm_config_umac_scan(sc)) != 0)
4833 goto error;
4834 }
4835
4836 /* Enable Tx queues. */
4837 for (ac = 0; ac < WME_NUM_AC; ac++) {
4838 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4839 iwm_ac_to_tx_fifo[ac]);
4840 if (error)
4841 goto error;
4842 }
4843
4844 if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4845 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4846 goto error;
4847 }
4848
4849 return 0;
4850
4851 error:
4852 iwm_stop_device(sc);
4853 return error;
4854 }
4855
4856 /* Allow multicast from our BSSID. */
4857 static int
iwm_allow_mcast(struct ieee80211vap * vap,struct iwm_softc * sc)4858 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4859 {
4860 struct ieee80211_node *ni = vap->iv_bss;
4861 struct iwm_mcast_filter_cmd *cmd;
4862 size_t size;
4863 int error;
4864
4865 size = roundup(sizeof(*cmd), 4);
4866 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4867 if (cmd == NULL)
4868 return ENOMEM;
4869 cmd->filter_own = 1;
4870 cmd->port_id = 0;
4871 cmd->count = 0;
4872 cmd->pass_all = 1;
4873 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4874
4875 error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4876 IWM_CMD_SYNC, size, cmd);
4877 free(cmd, M_DEVBUF);
4878
4879 return (error);
4880 }
4881
4882 /*
4883 * ifnet interfaces
4884 */
4885
4886 static void
iwm_init(struct iwm_softc * sc)4887 iwm_init(struct iwm_softc *sc)
4888 {
4889 int error;
4890
4891 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4892 return;
4893 }
4894 sc->sc_generation++;
4895 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4896
4897 if ((error = iwm_init_hw(sc)) != 0) {
4898 printf("iwm_init_hw failed %d\n", error);
4899 iwm_stop(sc);
4900 return;
4901 }
4902
4903 /*
4904 * Ok, firmware loaded and we are jogging
4905 */
4906 sc->sc_flags |= IWM_FLAG_HW_INITED;
4907 }
4908
4909 static int
iwm_transmit(struct ieee80211com * ic,struct mbuf * m)4910 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4911 {
4912 struct iwm_softc *sc;
4913 int error;
4914
4915 sc = ic->ic_softc;
4916
4917 IWM_LOCK(sc);
4918 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4919 IWM_UNLOCK(sc);
4920 return (ENXIO);
4921 }
4922 error = mbufq_enqueue(&sc->sc_snd, m);
4923 if (error) {
4924 IWM_UNLOCK(sc);
4925 return (error);
4926 }
4927 iwm_start(sc);
4928 IWM_UNLOCK(sc);
4929 return (0);
4930 }
4931
4932 /*
4933 * Dequeue packets from sendq and call send.
4934 */
4935 static void
iwm_start(struct iwm_softc * sc)4936 iwm_start(struct iwm_softc *sc)
4937 {
4938 struct ieee80211_node *ni;
4939 struct mbuf *m;
4940 int ac = 0;
4941
4942 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4943 while (sc->qfullmsk == 0 &&
4944 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4945 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4946 if (iwm_tx(sc, m, ni, ac) != 0) {
4947 if_inc_counter(ni->ni_vap->iv_ifp,
4948 IFCOUNTER_OERRORS, 1);
4949 ieee80211_free_node(ni);
4950 continue;
4951 }
4952 if (sc->sc_tx_timer == 0) {
4953 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4954 sc);
4955 }
4956 sc->sc_tx_timer = 15;
4957 }
4958 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4959 }
4960
4961 static void
iwm_stop(struct iwm_softc * sc)4962 iwm_stop(struct iwm_softc *sc)
4963 {
4964
4965 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4966 sc->sc_flags |= IWM_FLAG_STOPPED;
4967 sc->sc_generation++;
4968 iwm_led_blink_stop(sc);
4969 sc->sc_tx_timer = 0;
4970 iwm_stop_device(sc);
4971 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4972 }
4973
4974 static void
iwm_watchdog(void * arg)4975 iwm_watchdog(void *arg)
4976 {
4977 struct iwm_softc *sc = arg;
4978 struct ieee80211com *ic = &sc->sc_ic;
4979
4980 if (sc->sc_attached == 0)
4981 return;
4982
4983 if (sc->sc_tx_timer > 0) {
4984 if (--sc->sc_tx_timer == 0) {
4985 device_printf(sc->sc_dev, "device timeout\n");
4986 #ifdef IWM_DEBUG
4987 iwm_nic_error(sc);
4988 #endif
4989 ieee80211_restart_all(ic);
4990 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4991 return;
4992 }
4993 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4994 }
4995 }
4996
4997 static void
iwm_parent(struct ieee80211com * ic)4998 iwm_parent(struct ieee80211com *ic)
4999 {
5000 struct iwm_softc *sc = ic->ic_softc;
5001 int startall = 0;
5002 int rfkill = 0;
5003
5004 IWM_LOCK(sc);
5005 if (ic->ic_nrunning > 0) {
5006 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5007 iwm_init(sc);
5008 rfkill = iwm_check_rfkill(sc);
5009 if (!rfkill)
5010 startall = 1;
5011 }
5012 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5013 iwm_stop(sc);
5014 IWM_UNLOCK(sc);
5015 if (startall)
5016 ieee80211_start_all(ic);
5017 else if (rfkill)
5018 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5019 }
5020
5021 static void
iwm_rftoggle_task(void * arg,int npending __unused)5022 iwm_rftoggle_task(void *arg, int npending __unused)
5023 {
5024 struct iwm_softc *sc = arg;
5025 struct ieee80211com *ic = &sc->sc_ic;
5026 int rfkill;
5027
5028 IWM_LOCK(sc);
5029 rfkill = iwm_check_rfkill(sc);
5030 IWM_UNLOCK(sc);
5031 if (rfkill) {
5032 device_printf(sc->sc_dev,
5033 "%s: rfkill switch, disabling interface\n", __func__);
5034 ieee80211_suspend_all(ic);
5035 ieee80211_notify_radio(ic, 0);
5036 } else {
5037 device_printf(sc->sc_dev,
5038 "%s: rfkill cleared, re-enabling interface\n", __func__);
5039 ieee80211_resume_all(ic);
5040 ieee80211_notify_radio(ic, 1);
5041 }
5042 }
5043
5044 /*
5045 * The interrupt side of things
5046 */
5047
5048 /*
5049 * error dumping routines are from iwlwifi/mvm/utils.c
5050 */
5051
5052 /*
5053 * Note: This structure is read from the device with IO accesses,
5054 * and the reading already does the endian conversion. As it is
5055 * read with uint32_t-sized accesses, any members with a different size
5056 * need to be ordered correctly though!
5057 */
5058 struct iwm_error_event_table {
5059 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5060 uint32_t error_id; /* type of error */
5061 uint32_t trm_hw_status0; /* TRM HW status */
5062 uint32_t trm_hw_status1; /* TRM HW status */
5063 uint32_t blink2; /* branch link */
5064 uint32_t ilink1; /* interrupt link */
5065 uint32_t ilink2; /* interrupt link */
5066 uint32_t data1; /* error-specific data */
5067 uint32_t data2; /* error-specific data */
5068 uint32_t data3; /* error-specific data */
5069 uint32_t bcon_time; /* beacon timer */
5070 uint32_t tsf_low; /* network timestamp function timer */
5071 uint32_t tsf_hi; /* network timestamp function timer */
5072 uint32_t gp1; /* GP1 timer register */
5073 uint32_t gp2; /* GP2 timer register */
5074 uint32_t fw_rev_type; /* firmware revision type */
5075 uint32_t major; /* uCode version major */
5076 uint32_t minor; /* uCode version minor */
5077 uint32_t hw_ver; /* HW Silicon version */
5078 uint32_t brd_ver; /* HW board version */
5079 uint32_t log_pc; /* log program counter */
5080 uint32_t frame_ptr; /* frame pointer */
5081 uint32_t stack_ptr; /* stack pointer */
5082 uint32_t hcmd; /* last host command header */
5083 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5084 * rxtx_flag */
5085 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5086 * host_flag */
5087 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5088 * enc_flag */
5089 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5090 * time_flag */
5091 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5092 * wico interrupt */
5093 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5094 uint32_t wait_event; /* wait event() caller address */
5095 uint32_t l2p_control; /* L2pControlField */
5096 uint32_t l2p_duration; /* L2pDurationField */
5097 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5098 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5099 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5100 * (LMPM_PMG_SEL) */
5101 uint32_t u_timestamp; /* indicate when the date and time of the
5102 * compilation */
5103 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5104 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5105
5106 /*
5107 * UMAC error struct - relevant starting from family 8000 chip.
5108 * Note: This structure is read from the device with IO accesses,
5109 * and the reading already does the endian conversion. As it is
5110 * read with u32-sized accesses, any members with a different size
5111 * need to be ordered correctly though!
5112 */
5113 struct iwm_umac_error_event_table {
5114 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5115 uint32_t error_id; /* type of error */
5116 uint32_t blink1; /* branch link */
5117 uint32_t blink2; /* branch link */
5118 uint32_t ilink1; /* interrupt link */
5119 uint32_t ilink2; /* interrupt link */
5120 uint32_t data1; /* error-specific data */
5121 uint32_t data2; /* error-specific data */
5122 uint32_t data3; /* error-specific data */
5123 uint32_t umac_major;
5124 uint32_t umac_minor;
5125 uint32_t frame_pointer; /* core register 27*/
5126 uint32_t stack_pointer; /* core register 28 */
5127 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5128 uint32_t nic_isr_pref; /* ISR status register */
5129 } __packed;
5130
5131 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5132 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5133
5134 #ifdef IWM_DEBUG
5135 struct {
5136 const char *name;
5137 uint8_t num;
5138 } advanced_lookup[] = {
5139 { "NMI_INTERRUPT_WDG", 0x34 },
5140 { "SYSASSERT", 0x35 },
5141 { "UCODE_VERSION_MISMATCH", 0x37 },
5142 { "BAD_COMMAND", 0x38 },
5143 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5144 { "FATAL_ERROR", 0x3D },
5145 { "NMI_TRM_HW_ERR", 0x46 },
5146 { "NMI_INTERRUPT_TRM", 0x4C },
5147 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5148 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5149 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5150 { "NMI_INTERRUPT_HOST", 0x66 },
5151 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5152 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5153 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5154 { "ADVANCED_SYSASSERT", 0 },
5155 };
5156
5157 static const char *
iwm_desc_lookup(uint32_t num)5158 iwm_desc_lookup(uint32_t num)
5159 {
5160 int i;
5161
5162 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5163 if (advanced_lookup[i].num == num)
5164 return advanced_lookup[i].name;
5165
5166 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5167 return advanced_lookup[i].name;
5168 }
5169
5170 static void
iwm_nic_umac_error(struct iwm_softc * sc)5171 iwm_nic_umac_error(struct iwm_softc *sc)
5172 {
5173 struct iwm_umac_error_event_table table;
5174 uint32_t base;
5175
5176 base = sc->umac_error_event_table;
5177
5178 if (base < 0x800000) {
5179 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5180 base);
5181 return;
5182 }
5183
5184 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5185 device_printf(sc->sc_dev, "reading errlog failed\n");
5186 return;
5187 }
5188
5189 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5190 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5191 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5192 sc->sc_flags, table.valid);
5193 }
5194
5195 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5196 iwm_desc_lookup(table.error_id));
5197 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5198 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5199 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5200 table.ilink1);
5201 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5202 table.ilink2);
5203 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5204 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5205 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5206 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5207 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5208 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5209 table.frame_pointer);
5210 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5211 table.stack_pointer);
5212 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5213 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5214 table.nic_isr_pref);
5215 }
5216
5217 /*
5218 * Support for dumping the error log seemed like a good idea ...
5219 * but it's mostly hex junk and the only sensible thing is the
5220 * hw/ucode revision (which we know anyway). Since it's here,
5221 * I'll just leave it in, just in case e.g. the Intel guys want to
5222 * help us decipher some "ADVANCED_SYSASSERT" later.
5223 */
5224 static void
iwm_nic_error(struct iwm_softc * sc)5225 iwm_nic_error(struct iwm_softc *sc)
5226 {
5227 struct iwm_error_event_table table;
5228 uint32_t base;
5229
5230 device_printf(sc->sc_dev, "dumping device error log\n");
5231 base = sc->error_event_table[0];
5232 if (base < 0x800000) {
5233 device_printf(sc->sc_dev,
5234 "Invalid error log pointer 0x%08x\n", base);
5235 return;
5236 }
5237
5238 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5239 device_printf(sc->sc_dev, "reading errlog failed\n");
5240 return;
5241 }
5242
5243 if (!table.valid) {
5244 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5245 return;
5246 }
5247
5248 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5249 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5250 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5251 sc->sc_flags, table.valid);
5252 }
5253
5254 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5255 iwm_desc_lookup(table.error_id));
5256 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5257 table.trm_hw_status0);
5258 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5259 table.trm_hw_status1);
5260 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5261 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5262 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5263 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5264 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5265 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5266 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5267 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5268 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5269 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5270 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5271 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5272 table.fw_rev_type);
5273 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5274 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5275 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5276 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5277 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5278 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5279 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5280 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5281 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5282 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5283 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5284 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5285 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5286 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5287 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5288 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5289 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5290 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5291 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5292
5293 if (sc->umac_error_event_table)
5294 iwm_nic_umac_error(sc);
5295 }
5296 #endif
5297
5298 static void
iwm_handle_rxb(struct iwm_softc * sc,struct mbuf * m)5299 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5300 {
5301 struct ieee80211com *ic = &sc->sc_ic;
5302 struct iwm_cmd_response *cresp;
5303 struct mbuf *m1;
5304 uint32_t offset = 0;
5305 uint32_t maxoff = IWM_RBUF_SIZE;
5306 uint32_t nextoff;
5307 boolean_t stolen = FALSE;
5308
5309 #define HAVEROOM(a) \
5310 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5311
5312 while (HAVEROOM(offset)) {
5313 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5314 offset);
5315 int qid, idx, code, len;
5316
5317 qid = pkt->hdr.qid;
5318 idx = pkt->hdr.idx;
5319
5320 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5321
5322 /*
5323 * randomly get these from the firmware, no idea why.
5324 * they at least seem harmless, so just ignore them for now
5325 */
5326 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5327 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5328 break;
5329 }
5330
5331 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5332 "rx packet qid=%d idx=%d type=%x\n",
5333 qid & ~0x80, pkt->hdr.idx, code);
5334
5335 len = iwm_rx_packet_len(pkt);
5336 len += sizeof(uint32_t); /* account for status word */
5337 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5338
5339 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5340
5341 switch (code) {
5342 case IWM_REPLY_RX_PHY_CMD:
5343 iwm_rx_rx_phy_cmd(sc, pkt);
5344 break;
5345
5346 case IWM_REPLY_RX_MPDU_CMD: {
5347 /*
5348 * If this is the last frame in the RX buffer, we
5349 * can directly feed the mbuf to the sharks here.
5350 */
5351 struct iwm_rx_packet *nextpkt = mtodoff(m,
5352 struct iwm_rx_packet *, nextoff);
5353 if (!HAVEROOM(nextoff) ||
5354 (nextpkt->hdr.code == 0 &&
5355 (nextpkt->hdr.qid & ~0x80) == 0 &&
5356 nextpkt->hdr.idx == 0) ||
5357 (nextpkt->len_n_flags ==
5358 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5359 if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5360 stolen = FALSE;
5361 /* Make sure we abort the loop */
5362 nextoff = maxoff;
5363 }
5364 break;
5365 }
5366
5367 /*
5368 * Use m_copym instead of m_split, because that
5369 * makes it easier to keep a valid rx buffer in
5370 * the ring, when iwm_rx_mpdu() fails.
5371 *
5372 * We need to start m_copym() at offset 0, to get the
5373 * M_PKTHDR flag preserved.
5374 */
5375 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5376 if (m1) {
5377 if (iwm_rx_mpdu(sc, m1, offset, stolen))
5378 stolen = TRUE;
5379 else
5380 m_freem(m1);
5381 }
5382 break;
5383 }
5384
5385 case IWM_TX_CMD:
5386 iwm_rx_tx_cmd(sc, pkt);
5387 break;
5388
5389 case IWM_MISSED_BEACONS_NOTIFICATION: {
5390 struct iwm_missed_beacons_notif *resp;
5391 int missed;
5392
5393 /* XXX look at mac_id to determine interface ID */
5394 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5395
5396 resp = (void *)pkt->data;
5397 missed = le32toh(resp->consec_missed_beacons);
5398
5399 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5400 "%s: MISSED_BEACON: mac_id=%d, "
5401 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5402 "num_rx=%d\n",
5403 __func__,
5404 le32toh(resp->mac_id),
5405 le32toh(resp->consec_missed_beacons_since_last_rx),
5406 le32toh(resp->consec_missed_beacons),
5407 le32toh(resp->num_expected_beacons),
5408 le32toh(resp->num_recvd_beacons));
5409
5410 /* Be paranoid */
5411 if (vap == NULL)
5412 break;
5413
5414 /* XXX no net80211 locking? */
5415 if (vap->iv_state == IEEE80211_S_RUN &&
5416 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5417 if (missed > vap->iv_bmissthreshold) {
5418 /* XXX bad locking; turn into task */
5419 IWM_UNLOCK(sc);
5420 ieee80211_beacon_miss(ic);
5421 IWM_LOCK(sc);
5422 }
5423 }
5424
5425 break;
5426 }
5427
5428 case IWM_MFUART_LOAD_NOTIFICATION:
5429 break;
5430
5431 case IWM_ALIVE:
5432 break;
5433
5434 case IWM_CALIB_RES_NOTIF_PHY_DB:
5435 break;
5436
5437 case IWM_STATISTICS_NOTIFICATION:
5438 iwm_handle_rx_statistics(sc, pkt);
5439 break;
5440
5441 case IWM_NVM_ACCESS_CMD:
5442 case IWM_MCC_UPDATE_CMD:
5443 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5444 memcpy(sc->sc_cmd_resp,
5445 pkt, sizeof(sc->sc_cmd_resp));
5446 }
5447 break;
5448
5449 case IWM_MCC_CHUB_UPDATE_CMD: {
5450 struct iwm_mcc_chub_notif *notif;
5451 notif = (void *)pkt->data;
5452
5453 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5454 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5455 sc->sc_fw_mcc[2] = '\0';
5456 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5457 "fw source %d sent CC '%s'\n",
5458 notif->source_id, sc->sc_fw_mcc);
5459 break;
5460 }
5461
5462 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5463 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5464 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5465 struct iwm_dts_measurement_notif_v1 *notif;
5466
5467 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5468 device_printf(sc->sc_dev,
5469 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5470 break;
5471 }
5472 notif = (void *)pkt->data;
5473 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5474 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5475 notif->temp);
5476 break;
5477 }
5478
5479 case IWM_PHY_CONFIGURATION_CMD:
5480 case IWM_TX_ANT_CONFIGURATION_CMD:
5481 case IWM_ADD_STA:
5482 case IWM_MAC_CONTEXT_CMD:
5483 case IWM_REPLY_SF_CFG_CMD:
5484 case IWM_POWER_TABLE_CMD:
5485 case IWM_LTR_CONFIG:
5486 case IWM_PHY_CONTEXT_CMD:
5487 case IWM_BINDING_CONTEXT_CMD:
5488 case IWM_TIME_EVENT_CMD:
5489 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5490 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5491 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5492 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5493 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5494 case IWM_REPLY_BEACON_FILTERING_CMD:
5495 case IWM_MAC_PM_POWER_TABLE:
5496 case IWM_TIME_QUOTA_CMD:
5497 case IWM_REMOVE_STA:
5498 case IWM_TXPATH_FLUSH:
5499 case IWM_LQ_CMD:
5500 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5501 IWM_FW_PAGING_BLOCK_CMD):
5502 case IWM_BT_CONFIG:
5503 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5504 cresp = (void *)pkt->data;
5505 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5506 memcpy(sc->sc_cmd_resp,
5507 pkt, sizeof(*pkt)+sizeof(*cresp));
5508 }
5509 break;
5510
5511 /* ignore */
5512 case IWM_PHY_DB_CMD:
5513 break;
5514
5515 case IWM_INIT_COMPLETE_NOTIF:
5516 break;
5517
5518 case IWM_SCAN_OFFLOAD_COMPLETE:
5519 iwm_rx_lmac_scan_complete_notif(sc, pkt);
5520 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5521 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5522 ieee80211_runtask(ic, &sc->sc_es_task);
5523 }
5524 break;
5525
5526 case IWM_SCAN_ITERATION_COMPLETE: {
5527 break;
5528 }
5529
5530 case IWM_SCAN_COMPLETE_UMAC:
5531 iwm_rx_umac_scan_complete_notif(sc, pkt);
5532 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5533 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5534 ieee80211_runtask(ic, &sc->sc_es_task);
5535 }
5536 break;
5537
5538 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5539 #ifdef IWM_DEBUG
5540 struct iwm_umac_scan_iter_complete_notif *notif;
5541 notif = (void *)pkt->data;
5542
5543 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5544 "complete, status=0x%x, %d channels scanned\n",
5545 notif->status, notif->scanned_channels);
5546 #endif
5547 break;
5548 }
5549
5550 case IWM_REPLY_ERROR: {
5551 struct iwm_error_resp *resp;
5552 resp = (void *)pkt->data;
5553
5554 device_printf(sc->sc_dev,
5555 "firmware error 0x%x, cmd 0x%x\n",
5556 le32toh(resp->error_type),
5557 resp->cmd_id);
5558 break;
5559 }
5560
5561 case IWM_TIME_EVENT_NOTIFICATION:
5562 iwm_rx_time_event_notif(sc, pkt);
5563 break;
5564
5565 /*
5566 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5567 * messages. Just ignore them for now.
5568 */
5569 case IWM_DEBUG_LOG_MSG:
5570 break;
5571
5572 case IWM_MCAST_FILTER_CMD:
5573 break;
5574
5575 case IWM_SCD_QUEUE_CFG: {
5576 #ifdef IWM_DEBUG
5577 struct iwm_scd_txq_cfg_rsp *rsp;
5578 rsp = (void *)pkt->data;
5579
5580 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5581 "queue cfg token=0x%x sta_id=%d "
5582 "tid=%d scd_queue=%d\n",
5583 rsp->token, rsp->sta_id, rsp->tid,
5584 rsp->scd_queue);
5585 #endif
5586 break;
5587 }
5588
5589 default:
5590 device_printf(sc->sc_dev,
5591 "code %x, frame %d/%d %x unhandled\n",
5592 code, qid & ~0x80, idx, pkt->len_n_flags);
5593 break;
5594 }
5595
5596 /*
5597 * Why test bit 0x80? The Linux driver:
5598 *
5599 * There is one exception: uCode sets bit 15 when it
5600 * originates the response/notification, i.e. when the
5601 * response/notification is not a direct response to a
5602 * command sent by the driver. For example, uCode issues
5603 * IWM_REPLY_RX when it sends a received frame to the driver;
5604 * it is not a direct response to any driver command.
5605 *
5606 * Ok, so since when is 7 == 15? Well, the Linux driver
5607 * uses a slightly different format for pkt->hdr, and "qid"
5608 * is actually the upper byte of a two-byte field.
5609 */
5610 if (!(qid & (1 << 7)))
5611 iwm_cmd_done(sc, pkt);
5612
5613 offset = nextoff;
5614 }
5615 if (stolen)
5616 m_freem(m);
5617 #undef HAVEROOM
5618 }
5619
5620 /*
5621 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5622 * Basic structure from if_iwn
5623 */
5624 static void
iwm_notif_intr(struct iwm_softc * sc)5625 iwm_notif_intr(struct iwm_softc *sc)
5626 {
5627 int count;
5628 uint32_t wreg;
5629 uint16_t hw;
5630
5631 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5632 BUS_DMASYNC_POSTREAD);
5633
5634 if (sc->cfg->mqrx_supported) {
5635 count = IWM_RX_MQ_RING_COUNT;
5636 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5637 } else {
5638 count = IWM_RX_LEGACY_RING_COUNT;
5639 wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5640 }
5641
5642 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5643
5644 /*
5645 * Process responses
5646 */
5647 while (sc->rxq.cur != hw) {
5648 struct iwm_rx_ring *ring = &sc->rxq;
5649 struct iwm_rx_data *data = &ring->data[ring->cur];
5650
5651 bus_dmamap_sync(ring->data_dmat, data->map,
5652 BUS_DMASYNC_POSTREAD);
5653
5654 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5655 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5656 iwm_handle_rxb(sc, data->m);
5657
5658 ring->cur = (ring->cur + 1) % count;
5659 }
5660
5661 /*
5662 * Tell the firmware that it can reuse the ring entries that
5663 * we have just processed.
5664 * Seems like the hardware gets upset unless we align
5665 * the write by 8??
5666 */
5667 hw = (hw == 0) ? count - 1 : hw - 1;
5668 IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5669 }
5670
5671 static void
iwm_intr(void * arg)5672 iwm_intr(void *arg)
5673 {
5674 struct iwm_softc *sc = arg;
5675 int handled = 0;
5676 int r1, r2;
5677 int isperiodic = 0;
5678
5679 IWM_LOCK(sc);
5680 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5681
5682 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5683 uint32_t *ict = sc->ict_dma.vaddr;
5684 int tmp;
5685
5686 tmp = htole32(ict[sc->ict_cur]);
5687 if (!tmp)
5688 goto out_ena;
5689
5690 /*
5691 * ok, there was something. keep plowing until we have all.
5692 */
5693 r1 = r2 = 0;
5694 while (tmp) {
5695 r1 |= tmp;
5696 ict[sc->ict_cur] = 0;
5697 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5698 tmp = htole32(ict[sc->ict_cur]);
5699 }
5700
5701 /* this is where the fun begins. don't ask */
5702 if (r1 == 0xffffffff)
5703 r1 = 0;
5704
5705 /* i am not expected to understand this */
5706 if (r1 & 0xc0000)
5707 r1 |= 0x8000;
5708 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5709 } else {
5710 r1 = IWM_READ(sc, IWM_CSR_INT);
5711 /* "hardware gone" (where, fishing?) */
5712 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5713 goto out;
5714 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5715 }
5716 if (r1 == 0 && r2 == 0) {
5717 goto out_ena;
5718 }
5719
5720 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5721
5722 /* Safely ignore these bits for debug checks below */
5723 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5724
5725 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5726 int i;
5727 struct ieee80211com *ic = &sc->sc_ic;
5728 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5729
5730 #ifdef IWM_DEBUG
5731 iwm_nic_error(sc);
5732 #endif
5733 /* Dump driver status (TX and RX rings) while we're here. */
5734 device_printf(sc->sc_dev, "driver status:\n");
5735 for (i = 0; i < IWM_MAX_QUEUES; i++) {
5736 struct iwm_tx_ring *ring = &sc->txq[i];
5737 device_printf(sc->sc_dev,
5738 " tx ring %2d: qid=%-2d cur=%-3d "
5739 "queued=%-3d\n",
5740 i, ring->qid, ring->cur, ring->queued);
5741 }
5742 device_printf(sc->sc_dev,
5743 " rx ring: cur=%d\n", sc->rxq.cur);
5744 device_printf(sc->sc_dev,
5745 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5746
5747 /* Reset our firmware state tracking. */
5748 sc->sc_firmware_state = 0;
5749 /* Don't stop the device; just do a VAP restart */
5750 IWM_UNLOCK(sc);
5751
5752 if (vap == NULL) {
5753 printf("%s: null vap\n", __func__);
5754 return;
5755 }
5756
5757 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5758 "restarting\n", __func__, vap->iv_state);
5759
5760 ieee80211_restart_all(ic);
5761 return;
5762 }
5763
5764 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5765 handled |= IWM_CSR_INT_BIT_HW_ERR;
5766 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5767 iwm_stop(sc);
5768 goto out;
5769 }
5770
5771 /* firmware chunk loaded */
5772 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5773 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5774 handled |= IWM_CSR_INT_BIT_FH_TX;
5775 sc->sc_fw_chunk_done = 1;
5776 wakeup(&sc->sc_fw);
5777 }
5778
5779 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5780 handled |= IWM_CSR_INT_BIT_RF_KILL;
5781 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5782 }
5783
5784 /*
5785 * The Linux driver uses periodic interrupts to avoid races.
5786 * We cargo-cult like it's going out of fashion.
5787 */
5788 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5789 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5790 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5791 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5792 IWM_WRITE_1(sc,
5793 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5794 isperiodic = 1;
5795 }
5796
5797 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5798 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5799 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5800
5801 iwm_notif_intr(sc);
5802
5803 /* enable periodic interrupt, see above */
5804 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5805 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5806 IWM_CSR_INT_PERIODIC_ENA);
5807 }
5808
5809 if (__predict_false(r1 & ~handled))
5810 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5811 "%s: unhandled interrupts: %x\n", __func__, r1);
5812 out_ena:
5813 iwm_restore_interrupts(sc);
5814 out:
5815 IWM_UNLOCK(sc);
5816 return;
5817 }
5818
5819 /*
5820 * Autoconf glue-sniffing
5821 */
5822 #define PCI_VENDOR_INTEL 0x8086
5823 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5824 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5825 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5826 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5827 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb
5828 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5829 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5830 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5831 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5832 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5833 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5834 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd
5835 #define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0
5836 #define PCI_PRODUCT_INTEL_WL_9560_2 0xa370
5837 #define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc
5838 #define PCI_PRODUCT_INTEL_WL_9260_1 0x2526
5839
5840 static const struct iwm_devices {
5841 uint16_t device;
5842 const struct iwm_cfg *cfg;
5843 } iwm_devices[] = {
5844 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5845 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5846 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5847 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5848 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5849 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5850 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5851 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5852 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5853 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5854 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5855 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5856 { PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5857 { PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5858 { PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5859 { PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5860 };
5861
5862 static int
iwm_probe(device_t dev)5863 iwm_probe(device_t dev)
5864 {
5865 int i;
5866
5867 for (i = 0; i < nitems(iwm_devices); i++) {
5868 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5869 pci_get_device(dev) == iwm_devices[i].device) {
5870 device_set_desc(dev, iwm_devices[i].cfg->name);
5871 return (BUS_PROBE_DEFAULT);
5872 }
5873 }
5874
5875 return (ENXIO);
5876 }
5877
5878 static int
iwm_dev_check(device_t dev)5879 iwm_dev_check(device_t dev)
5880 {
5881 struct iwm_softc *sc;
5882 uint16_t devid;
5883 int i;
5884
5885 sc = device_get_softc(dev);
5886
5887 devid = pci_get_device(dev);
5888 for (i = 0; i < nitems(iwm_devices); i++) {
5889 if (iwm_devices[i].device == devid) {
5890 sc->cfg = iwm_devices[i].cfg;
5891 return (0);
5892 }
5893 }
5894 device_printf(dev, "unknown adapter type\n");
5895 return ENXIO;
5896 }
5897
5898 /* PCI registers */
5899 #define PCI_CFG_RETRY_TIMEOUT 0x041
5900
5901 static int
iwm_pci_attach(device_t dev)5902 iwm_pci_attach(device_t dev)
5903 {
5904 struct iwm_softc *sc;
5905 int count, error, rid;
5906 uint16_t reg;
5907
5908 sc = device_get_softc(dev);
5909
5910 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5911 * PCI Tx retries from interfering with C3 CPU state */
5912 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5913
5914 /* Enable bus-mastering and hardware bug workaround. */
5915 pci_enable_busmaster(dev);
5916 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5917 /* if !MSI */
5918 if (reg & PCIM_STATUS_INTxSTATE) {
5919 reg &= ~PCIM_STATUS_INTxSTATE;
5920 }
5921 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5922
5923 rid = PCIR_BAR(0);
5924 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5925 RF_ACTIVE);
5926 if (sc->sc_mem == NULL) {
5927 device_printf(sc->sc_dev, "can't map mem space\n");
5928 return (ENXIO);
5929 }
5930 sc->sc_st = rman_get_bustag(sc->sc_mem);
5931 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5932
5933 /* Install interrupt handler. */
5934 count = 1;
5935 rid = 0;
5936 if (pci_alloc_msi(dev, &count) == 0)
5937 rid = 1;
5938 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5939 (rid != 0 ? 0 : RF_SHAREABLE));
5940 if (sc->sc_irq == NULL) {
5941 device_printf(dev, "can't map interrupt\n");
5942 return (ENXIO);
5943 }
5944 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5945 NULL, iwm_intr, sc, &sc->sc_ih);
5946 if (error != 0) {
5947 device_printf(dev, "can't establish interrupt");
5948 return (error);
5949 }
5950 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5951
5952 return (0);
5953 }
5954
5955 static void
iwm_pci_detach(device_t dev)5956 iwm_pci_detach(device_t dev)
5957 {
5958 struct iwm_softc *sc = device_get_softc(dev);
5959
5960 if (sc->sc_irq != NULL) {
5961 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5962 bus_release_resource(dev, SYS_RES_IRQ,
5963 rman_get_rid(sc->sc_irq), sc->sc_irq);
5964 pci_release_msi(dev);
5965 }
5966 if (sc->sc_mem != NULL)
5967 bus_release_resource(dev, SYS_RES_MEMORY,
5968 rman_get_rid(sc->sc_mem), sc->sc_mem);
5969 }
5970
5971 static int
iwm_attach(device_t dev)5972 iwm_attach(device_t dev)
5973 {
5974 struct iwm_softc *sc = device_get_softc(dev);
5975 struct ieee80211com *ic = &sc->sc_ic;
5976 int error;
5977 int txq_i, i;
5978
5979 sc->sc_dev = dev;
5980 sc->sc_attached = 1;
5981 IWM_LOCK_INIT(sc);
5982 mbufq_init(&sc->sc_snd, ifqmaxlen);
5983 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5984 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5985 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5986 TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5987
5988 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5989 taskqueue_thread_enqueue, &sc->sc_tq);
5990 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5991 if (error != 0) {
5992 device_printf(dev, "can't start taskq thread, error %d\n",
5993 error);
5994 goto fail;
5995 }
5996
5997 error = iwm_dev_check(dev);
5998 if (error != 0)
5999 goto fail;
6000
6001 sc->sc_notif_wait = iwm_notification_wait_init(sc);
6002 if (sc->sc_notif_wait == NULL) {
6003 device_printf(dev, "failed to init notification wait struct\n");
6004 goto fail;
6005 }
6006
6007 sc->sf_state = IWM_SF_UNINIT;
6008
6009 /* Init phy db */
6010 sc->sc_phy_db = iwm_phy_db_init(sc);
6011 if (!sc->sc_phy_db) {
6012 device_printf(dev, "Cannot init phy_db\n");
6013 goto fail;
6014 }
6015
6016 /* Set EBS as successful as long as not stated otherwise by the FW. */
6017 sc->last_ebs_successful = TRUE;
6018
6019 /* PCI attach */
6020 error = iwm_pci_attach(dev);
6021 if (error != 0)
6022 goto fail;
6023
6024 sc->sc_wantresp = -1;
6025
6026 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6027 /*
6028 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6029 * changed, and now the revision step also includes bit 0-1 (no more
6030 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6031 * in the old format.
6032 */
6033 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6034 int ret;
6035 uint32_t hw_step;
6036
6037 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6038 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6039
6040 if (iwm_prepare_card_hw(sc) != 0) {
6041 device_printf(dev, "could not initialize hardware\n");
6042 goto fail;
6043 }
6044
6045 /*
6046 * In order to recognize C step the driver should read the
6047 * chip version id located at the AUX bus MISC address.
6048 */
6049 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6050 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6051 DELAY(2);
6052
6053 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6054 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6055 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6056 25000);
6057 if (!ret) {
6058 device_printf(sc->sc_dev,
6059 "Failed to wake up the nic\n");
6060 goto fail;
6061 }
6062
6063 if (iwm_nic_lock(sc)) {
6064 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6065 hw_step |= IWM_ENABLE_WFPM;
6066 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6067 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6068 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6069 if (hw_step == 0x3)
6070 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6071 (IWM_SILICON_C_STEP << 2);
6072 iwm_nic_unlock(sc);
6073 } else {
6074 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6075 goto fail;
6076 }
6077 }
6078
6079 /* special-case 7265D, it has the same PCI IDs. */
6080 if (sc->cfg == &iwm7265_cfg &&
6081 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6082 sc->cfg = &iwm7265d_cfg;
6083 }
6084
6085 /* Allocate DMA memory for firmware transfers. */
6086 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6087 device_printf(dev, "could not allocate memory for firmware\n");
6088 goto fail;
6089 }
6090
6091 /* Allocate "Keep Warm" page. */
6092 if ((error = iwm_alloc_kw(sc)) != 0) {
6093 device_printf(dev, "could not allocate keep warm page\n");
6094 goto fail;
6095 }
6096
6097 /* We use ICT interrupts */
6098 if ((error = iwm_alloc_ict(sc)) != 0) {
6099 device_printf(dev, "could not allocate ICT table\n");
6100 goto fail;
6101 }
6102
6103 /* Allocate TX scheduler "rings". */
6104 if ((error = iwm_alloc_sched(sc)) != 0) {
6105 device_printf(dev, "could not allocate TX scheduler rings\n");
6106 goto fail;
6107 }
6108
6109 /* Allocate TX rings */
6110 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6111 if ((error = iwm_alloc_tx_ring(sc,
6112 &sc->txq[txq_i], txq_i)) != 0) {
6113 device_printf(dev,
6114 "could not allocate TX ring %d\n",
6115 txq_i);
6116 goto fail;
6117 }
6118 }
6119
6120 /* Allocate RX ring. */
6121 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6122 device_printf(dev, "could not allocate RX ring\n");
6123 goto fail;
6124 }
6125
6126 /* Clear pending interrupts. */
6127 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6128
6129 ic->ic_softc = sc;
6130 ic->ic_name = device_get_nameunit(sc->sc_dev);
6131 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6132 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6133
6134 /* Set device capabilities. */
6135 ic->ic_caps =
6136 IEEE80211_C_STA |
6137 IEEE80211_C_WPA | /* WPA/RSN */
6138 IEEE80211_C_WME |
6139 IEEE80211_C_PMGT |
6140 IEEE80211_C_SHSLOT | /* short slot time supported */
6141 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6142 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6143 ;
6144 /* Advertise full-offload scanning */
6145 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6146 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6147 sc->sc_phyctxt[i].id = i;
6148 sc->sc_phyctxt[i].color = 0;
6149 sc->sc_phyctxt[i].ref = 0;
6150 sc->sc_phyctxt[i].channel = NULL;
6151 }
6152
6153 /* Default noise floor */
6154 sc->sc_noise = -96;
6155
6156 /* Max RSSI */
6157 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6158
6159 #ifdef IWM_DEBUG
6160 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6161 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6162 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6163 #endif
6164
6165 error = iwm_read_firmware(sc);
6166 if (error) {
6167 goto fail;
6168 } else if (sc->sc_fw.fw_fp == NULL) {
6169 /*
6170 * XXX Add a solution for properly deferring firmware load
6171 * during bootup.
6172 */
6173 goto fail;
6174 } else {
6175 sc->sc_preinit_hook.ich_func = iwm_preinit;
6176 sc->sc_preinit_hook.ich_arg = sc;
6177 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6178 device_printf(dev,
6179 "config_intrhook_establish failed\n");
6180 goto fail;
6181 }
6182 }
6183
6184 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6185 "<-%s\n", __func__);
6186
6187 return 0;
6188
6189 /* Free allocated memory if something failed during attachment. */
6190 fail:
6191 iwm_detach_local(sc, 0);
6192
6193 return ENXIO;
6194 }
6195
6196 static int
iwm_is_valid_ether_addr(uint8_t * addr)6197 iwm_is_valid_ether_addr(uint8_t *addr)
6198 {
6199 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6200
6201 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6202 return (FALSE);
6203
6204 return (TRUE);
6205 }
6206
6207 static int
iwm_wme_update(struct ieee80211com * ic)6208 iwm_wme_update(struct ieee80211com *ic)
6209 {
6210 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6211 struct iwm_softc *sc = ic->ic_softc;
6212 struct chanAccParams chp;
6213 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6214 struct iwm_vap *ivp = IWM_VAP(vap);
6215 struct iwm_node *in;
6216 struct wmeParams tmp[WME_NUM_AC];
6217 int aci, error;
6218
6219 if (vap == NULL)
6220 return (0);
6221
6222 ieee80211_wme_ic_getparams(ic, &chp);
6223
6224 IEEE80211_LOCK(ic);
6225 for (aci = 0; aci < WME_NUM_AC; aci++)
6226 tmp[aci] = chp.cap_wmeParams[aci];
6227 IEEE80211_UNLOCK(ic);
6228
6229 IWM_LOCK(sc);
6230 for (aci = 0; aci < WME_NUM_AC; aci++) {
6231 const struct wmeParams *ac = &tmp[aci];
6232 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6233 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6234 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6235 ivp->queue_params[aci].edca_txop =
6236 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6237 }
6238 ivp->have_wme = TRUE;
6239 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6240 in = IWM_NODE(vap->iv_bss);
6241 if (in->in_assoc) {
6242 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6243 device_printf(sc->sc_dev,
6244 "%s: failed to update MAC\n", __func__);
6245 }
6246 }
6247 }
6248 IWM_UNLOCK(sc);
6249
6250 return (0);
6251 #undef IWM_EXP2
6252 }
6253
6254 static void
iwm_preinit(void * arg)6255 iwm_preinit(void *arg)
6256 {
6257 struct iwm_softc *sc = arg;
6258 device_t dev = sc->sc_dev;
6259 struct ieee80211com *ic = &sc->sc_ic;
6260 int error;
6261
6262 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6263 "->%s\n", __func__);
6264
6265 IWM_LOCK(sc);
6266 if ((error = iwm_start_hw(sc)) != 0) {
6267 device_printf(dev, "could not initialize hardware\n");
6268 IWM_UNLOCK(sc);
6269 goto fail;
6270 }
6271
6272 error = iwm_run_init_ucode(sc, 1);
6273 iwm_stop_device(sc);
6274 if (error) {
6275 IWM_UNLOCK(sc);
6276 goto fail;
6277 }
6278 device_printf(dev,
6279 "hw rev 0x%x, fw ver %s, address %s\n",
6280 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6281 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6282
6283 /* not all hardware can do 5GHz band */
6284 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6285 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6286 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6287 IWM_UNLOCK(sc);
6288
6289 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6290 ic->ic_channels);
6291
6292 /*
6293 * At this point we've committed - if we fail to do setup,
6294 * we now also have to tear down the net80211 state.
6295 */
6296 ieee80211_ifattach(ic);
6297 ic->ic_vap_create = iwm_vap_create;
6298 ic->ic_vap_delete = iwm_vap_delete;
6299 ic->ic_raw_xmit = iwm_raw_xmit;
6300 ic->ic_node_alloc = iwm_node_alloc;
6301 ic->ic_scan_start = iwm_scan_start;
6302 ic->ic_scan_end = iwm_scan_end;
6303 ic->ic_update_mcast = iwm_update_mcast;
6304 ic->ic_getradiocaps = iwm_init_channel_map;
6305 ic->ic_set_channel = iwm_set_channel;
6306 ic->ic_scan_curchan = iwm_scan_curchan;
6307 ic->ic_scan_mindwell = iwm_scan_mindwell;
6308 ic->ic_wme.wme_update = iwm_wme_update;
6309 ic->ic_parent = iwm_parent;
6310 ic->ic_transmit = iwm_transmit;
6311 iwm_radiotap_attach(sc);
6312 if (bootverbose)
6313 ieee80211_announce(ic);
6314
6315 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6316 "<-%s\n", __func__);
6317 config_intrhook_disestablish(&sc->sc_preinit_hook);
6318
6319 return;
6320 fail:
6321 config_intrhook_disestablish(&sc->sc_preinit_hook);
6322 iwm_detach_local(sc, 0);
6323 }
6324
6325 /*
6326 * Attach the interface to 802.11 radiotap.
6327 */
6328 static void
iwm_radiotap_attach(struct iwm_softc * sc)6329 iwm_radiotap_attach(struct iwm_softc *sc)
6330 {
6331 struct ieee80211com *ic = &sc->sc_ic;
6332
6333 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6334 "->%s begin\n", __func__);
6335 ieee80211_radiotap_attach(ic,
6336 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6337 IWM_TX_RADIOTAP_PRESENT,
6338 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6339 IWM_RX_RADIOTAP_PRESENT);
6340 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6341 "->%s end\n", __func__);
6342 }
6343
6344 static struct ieee80211vap *
iwm_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])6345 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6346 enum ieee80211_opmode opmode, int flags,
6347 const uint8_t bssid[IEEE80211_ADDR_LEN],
6348 const uint8_t mac[IEEE80211_ADDR_LEN])
6349 {
6350 struct iwm_vap *ivp;
6351 struct ieee80211vap *vap;
6352
6353 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6354 return NULL;
6355 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6356 vap = &ivp->iv_vap;
6357 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6358 vap->iv_bmissthreshold = 10; /* override default */
6359 /* Override with driver methods. */
6360 ivp->iv_newstate = vap->iv_newstate;
6361 vap->iv_newstate = iwm_newstate;
6362
6363 ivp->id = IWM_DEFAULT_MACID;
6364 ivp->color = IWM_DEFAULT_COLOR;
6365
6366 ivp->have_wme = FALSE;
6367 ivp->ps_disabled = FALSE;
6368
6369 ieee80211_ratectl_init(vap);
6370 /* Complete setup. */
6371 ieee80211_vap_attach(vap, ieee80211_media_change,
6372 ieee80211_media_status, mac);
6373 ic->ic_opmode = opmode;
6374
6375 return vap;
6376 }
6377
6378 static void
iwm_vap_delete(struct ieee80211vap * vap)6379 iwm_vap_delete(struct ieee80211vap *vap)
6380 {
6381 struct iwm_vap *ivp = IWM_VAP(vap);
6382
6383 ieee80211_ratectl_deinit(vap);
6384 ieee80211_vap_detach(vap);
6385 free(ivp, M_80211_VAP);
6386 }
6387
6388 static void
iwm_xmit_queue_drain(struct iwm_softc * sc)6389 iwm_xmit_queue_drain(struct iwm_softc *sc)
6390 {
6391 struct mbuf *m;
6392 struct ieee80211_node *ni;
6393
6394 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6395 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6396 ieee80211_free_node(ni);
6397 m_freem(m);
6398 }
6399 }
6400
6401 static void
iwm_scan_start(struct ieee80211com * ic)6402 iwm_scan_start(struct ieee80211com *ic)
6403 {
6404 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6405 struct iwm_softc *sc = ic->ic_softc;
6406 int error;
6407
6408 IWM_LOCK(sc);
6409 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6410 /* This should not be possible */
6411 device_printf(sc->sc_dev,
6412 "%s: Previous scan not completed yet\n", __func__);
6413 }
6414 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6415 error = iwm_umac_scan(sc);
6416 else
6417 error = iwm_lmac_scan(sc);
6418 if (error != 0) {
6419 device_printf(sc->sc_dev, "could not initiate scan\n");
6420 IWM_UNLOCK(sc);
6421 ieee80211_cancel_scan(vap);
6422 } else {
6423 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6424 iwm_led_blink_start(sc);
6425 IWM_UNLOCK(sc);
6426 }
6427 }
6428
6429 static void
iwm_scan_end(struct ieee80211com * ic)6430 iwm_scan_end(struct ieee80211com *ic)
6431 {
6432 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6433 struct iwm_softc *sc = ic->ic_softc;
6434
6435 IWM_LOCK(sc);
6436 iwm_led_blink_stop(sc);
6437 if (vap->iv_state == IEEE80211_S_RUN)
6438 iwm_led_enable(sc);
6439 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6440 /*
6441 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6442 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6443 * taskqueue.
6444 */
6445 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6446 iwm_scan_stop_wait(sc);
6447 }
6448 IWM_UNLOCK(sc);
6449
6450 /*
6451 * Make sure we don't race, if sc_es_task is still enqueued here.
6452 * This is to make sure that it won't call ieee80211_scan_done
6453 * when we have already started the next scan.
6454 */
6455 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6456 }
6457
6458 static void
iwm_update_mcast(struct ieee80211com * ic)6459 iwm_update_mcast(struct ieee80211com *ic)
6460 {
6461 }
6462
6463 static void
iwm_set_channel(struct ieee80211com * ic)6464 iwm_set_channel(struct ieee80211com *ic)
6465 {
6466 }
6467
6468 static void
iwm_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)6469 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6470 {
6471 }
6472
6473 static void
iwm_scan_mindwell(struct ieee80211_scan_state * ss)6474 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6475 {
6476 }
6477
6478 void
iwm_init_task(void * arg1)6479 iwm_init_task(void *arg1)
6480 {
6481 struct iwm_softc *sc = arg1;
6482
6483 IWM_LOCK(sc);
6484 while (sc->sc_flags & IWM_FLAG_BUSY)
6485 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6486 sc->sc_flags |= IWM_FLAG_BUSY;
6487 iwm_stop(sc);
6488 if (sc->sc_ic.ic_nrunning > 0)
6489 iwm_init(sc);
6490 sc->sc_flags &= ~IWM_FLAG_BUSY;
6491 wakeup(&sc->sc_flags);
6492 IWM_UNLOCK(sc);
6493 }
6494
6495 static int
iwm_resume(device_t dev)6496 iwm_resume(device_t dev)
6497 {
6498 struct iwm_softc *sc = device_get_softc(dev);
6499 int do_reinit = 0;
6500
6501 /*
6502 * We disable the RETRY_TIMEOUT register (0x41) to keep
6503 * PCI Tx retries from interfering with C3 CPU state.
6504 */
6505 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6506
6507 if (!sc->sc_attached)
6508 return 0;
6509
6510 iwm_init_task(device_get_softc(dev));
6511
6512 IWM_LOCK(sc);
6513 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6514 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6515 do_reinit = 1;
6516 }
6517 IWM_UNLOCK(sc);
6518
6519 if (do_reinit)
6520 ieee80211_resume_all(&sc->sc_ic);
6521
6522 return 0;
6523 }
6524
6525 static int
iwm_suspend(device_t dev)6526 iwm_suspend(device_t dev)
6527 {
6528 int do_stop = 0;
6529 struct iwm_softc *sc = device_get_softc(dev);
6530
6531 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6532
6533 if (!sc->sc_attached)
6534 return (0);
6535
6536 ieee80211_suspend_all(&sc->sc_ic);
6537
6538 if (do_stop) {
6539 IWM_LOCK(sc);
6540 iwm_stop(sc);
6541 sc->sc_flags |= IWM_FLAG_SCANNING;
6542 IWM_UNLOCK(sc);
6543 }
6544
6545 return (0);
6546 }
6547
6548 static int
iwm_detach_local(struct iwm_softc * sc,int do_net80211)6549 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6550 {
6551 struct iwm_fw_info *fw = &sc->sc_fw;
6552 device_t dev = sc->sc_dev;
6553 int i;
6554
6555 if (!sc->sc_attached)
6556 return 0;
6557 sc->sc_attached = 0;
6558 if (do_net80211) {
6559 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6560 }
6561 iwm_stop_device(sc);
6562 taskqueue_drain_all(sc->sc_tq);
6563 taskqueue_free(sc->sc_tq);
6564 if (do_net80211) {
6565 IWM_LOCK(sc);
6566 iwm_xmit_queue_drain(sc);
6567 IWM_UNLOCK(sc);
6568 ieee80211_ifdetach(&sc->sc_ic);
6569 }
6570 callout_drain(&sc->sc_led_blink_to);
6571 callout_drain(&sc->sc_watchdog_to);
6572
6573 iwm_phy_db_free(sc->sc_phy_db);
6574 sc->sc_phy_db = NULL;
6575
6576 iwm_free_nvm_data(sc->nvm_data);
6577
6578 /* Free descriptor rings */
6579 iwm_free_rx_ring(sc, &sc->rxq);
6580 for (i = 0; i < nitems(sc->txq); i++)
6581 iwm_free_tx_ring(sc, &sc->txq[i]);
6582
6583 /* Free firmware */
6584 if (fw->fw_fp != NULL)
6585 iwm_fw_info_free(fw);
6586
6587 /* Free scheduler */
6588 iwm_dma_contig_free(&sc->sched_dma);
6589 iwm_dma_contig_free(&sc->ict_dma);
6590 iwm_dma_contig_free(&sc->kw_dma);
6591 iwm_dma_contig_free(&sc->fw_dma);
6592
6593 iwm_free_fw_paging(sc);
6594
6595 /* Finished with the hardware - detach things */
6596 iwm_pci_detach(dev);
6597
6598 if (sc->sc_notif_wait != NULL) {
6599 iwm_notification_wait_free(sc->sc_notif_wait);
6600 sc->sc_notif_wait = NULL;
6601 }
6602
6603 IWM_LOCK_DESTROY(sc);
6604
6605 return (0);
6606 }
6607
6608 static int
iwm_detach(device_t dev)6609 iwm_detach(device_t dev)
6610 {
6611 struct iwm_softc *sc = device_get_softc(dev);
6612
6613 return (iwm_detach_local(sc, 1));
6614 }
6615
6616 static device_method_t iwm_pci_methods[] = {
6617 /* Device interface */
6618 DEVMETHOD(device_probe, iwm_probe),
6619 DEVMETHOD(device_attach, iwm_attach),
6620 DEVMETHOD(device_detach, iwm_detach),
6621 DEVMETHOD(device_suspend, iwm_suspend),
6622 DEVMETHOD(device_resume, iwm_resume),
6623
6624 DEVMETHOD_END
6625 };
6626
6627 static driver_t iwm_pci_driver = {
6628 "iwm",
6629 iwm_pci_methods,
6630 sizeof (struct iwm_softc)
6631 };
6632
6633 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL);
6634 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6635 iwm_devices, nitems(iwm_devices));
6636 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6637 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6638 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6639