1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186
187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195
196 #include <dev/iwx/if_iwx_debug.h>
197
198 #define PCI_CFG_RETRY_TIMEOUT 0x41
199
200 #define PCI_VENDOR_INTEL 0x8086
201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
218
219 static const struct iwx_devices {
220 uint16_t device;
221 char *name;
222 } iwx_devices[] = {
223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
240 };
241
242 static const uint8_t iwx_nvm_channels_8000[] = {
243 /* 2.4 GHz */
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 /* 5 GHz */
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250
251 static const uint8_t iwx_nvm_channels_uhb[] = {
252 /* 2.4 GHz */
253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
254 /* 5 GHz */
255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
257 149, 153, 157, 161, 165, 169, 173, 177, 181,
258 /* 6-7 GHz */
259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
263 };
264
265 #define IWX_NUM_2GHZ_CHANNELS 14
266 #define IWX_NUM_5GHZ_CHANNELS 37
267
268 const struct iwx_rate {
269 uint16_t rate;
270 uint8_t plcp;
271 uint8_t ht_plcp;
272 } iwx_rates[] = {
273 /* Legacy */ /* HT */
274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
295 };
296 #define IWX_RIDX_CCK 0
297 #define IWX_RIDX_OFDM 4
298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
302
303 /* Convert an MCS index into an iwx_rates[] index. */
304 const int iwx_mcs2ridx[] = {
305 IWX_RATE_MCS_0_INDEX,
306 IWX_RATE_MCS_1_INDEX,
307 IWX_RATE_MCS_2_INDEX,
308 IWX_RATE_MCS_3_INDEX,
309 IWX_RATE_MCS_4_INDEX,
310 IWX_RATE_MCS_5_INDEX,
311 IWX_RATE_MCS_6_INDEX,
312 IWX_RATE_MCS_7_INDEX,
313 IWX_RATE_MCS_8_INDEX,
314 IWX_RATE_MCS_9_INDEX,
315 IWX_RATE_MCS_10_INDEX,
316 IWX_RATE_MCS_11_INDEX,
317 IWX_RATE_MCS_12_INDEX,
318 IWX_RATE_MCS_13_INDEX,
319 IWX_RATE_MCS_14_INDEX,
320 IWX_RATE_MCS_15_INDEX,
321 };
322
323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
326 #if 0
327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
329 #endif
330 static int iwx_apply_debug_destination(struct iwx_softc *);
331 static void iwx_set_ltr(struct iwx_softc *);
332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
334 const struct iwx_fw_sects *);
335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
336 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
338 struct iwx_context_info_dram *);
339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
341 const uint8_t *, size_t);
342 static int iwx_set_default_calib(struct iwx_softc *, const void *);
343 static void iwx_fw_info_free(struct iwx_fw_info *);
344 static int iwx_read_firmware(struct iwx_softc *);
345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
354 static int iwx_nic_lock(struct iwx_softc *);
355 static void iwx_nic_assert_locked(struct iwx_softc *);
356 static void iwx_nic_unlock(struct iwx_softc *);
357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
358 uint32_t);
359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
363 bus_size_t, bus_size_t);
364 static void iwx_dma_contig_free(struct iwx_dma_info *);
365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void iwx_disable_rx_dma(struct iwx_softc *);
367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_enable_rfkill_int(struct iwx_softc *);
373 static int iwx_check_rfkill(struct iwx_softc *);
374 static void iwx_enable_interrupts(struct iwx_softc *);
375 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
376 #if 0
377 static void iwx_restore_interrupts(struct iwx_softc *);
378 #endif
379 static void iwx_disable_interrupts(struct iwx_softc *);
380 static void iwx_ict_reset(struct iwx_softc *);
381 static int iwx_set_hw_ready(struct iwx_softc *);
382 static int iwx_prepare_card_hw(struct iwx_softc *);
383 static int iwx_force_power_gating(struct iwx_softc *);
384 static void iwx_apm_config(struct iwx_softc *);
385 static int iwx_apm_init(struct iwx_softc *);
386 static void iwx_apm_stop(struct iwx_softc *);
387 static int iwx_allow_mcast(struct iwx_softc *);
388 static void iwx_init_msix_hw(struct iwx_softc *);
389 static void iwx_conf_msix_hw(struct iwx_softc *, int);
390 static int iwx_clear_persistence_bit(struct iwx_softc *);
391 static int iwx_start_hw(struct iwx_softc *);
392 static void iwx_stop_device(struct iwx_softc *);
393 static void iwx_nic_config(struct iwx_softc *);
394 static int iwx_nic_rx_init(struct iwx_softc *);
395 static int iwx_nic_init(struct iwx_softc *);
396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
398 static void iwx_post_alive(struct iwx_softc *);
399 static int iwx_schedule_session_protection(struct iwx_softc *,
400 struct iwx_node *, uint32_t);
401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
402 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
403 struct ieee80211_channel[]);
404 static int iwx_mimo_enabled(struct iwx_softc *);
405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
406 uint16_t);
407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
409 uint16_t, uint16_t, int, int);
410 static void iwx_sta_tx_agg_start(struct iwx_softc *,
411 struct ieee80211_node *, uint8_t);
412 static void iwx_ba_rx_task(void *, int);
413 static void iwx_ba_tx_task(void *, int);
414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
415 static int iwx_is_valid_mac_addr(const uint8_t *);
416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
417 static int iwx_nvm_get(struct iwx_softc *);
418 static int iwx_load_firmware(struct iwx_softc *);
419 static int iwx_start_fw(struct iwx_softc *);
420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
423 static int iwx_load_pnvm(struct iwx_softc *);
424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
426 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
427 static int iwx_send_dqa_cmd(struct iwx_softc *);
428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
429 static int iwx_config_ltr(struct iwx_softc *);
430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
431 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
434 struct iwx_rx_data *);
435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
437 #if 0
438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
439 struct ieee80211_node *, struct ieee80211_rxinfo *);
440 #endif
441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
442 int, int, uint32_t, uint8_t);
443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
445 struct iwx_tx_data *);
446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
448 struct iwx_rx_data *);
449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
451 struct iwx_rx_data *);
452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
456 #if 0
457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
459 #endif
460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
464 const void *);
465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
466 uint32_t *);
467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
468 const void *, uint32_t *);
469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
470 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
472 static uint32_t iwx_fw_rateidx_cck(uint8_t);
473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
475 struct mbuf *);
476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
477 uint16_t, uint16_t);
478 static int iwx_tx(struct iwx_softc *, struct mbuf *,
479 struct ieee80211_node *);
480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
484 struct iwx_beacon_filter_cmd *);
485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
486 int);
487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
488 struct iwx_mac_power_cmd *);
489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
490 static int iwx_power_update_device(struct iwx_softc *);
491 #if 0
492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
493 #endif
494 static int iwx_disable_beacon_filter(struct iwx_softc *);
495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
498 static int iwx_fill_probe_req(struct iwx_softc *,
499 struct iwx_scan_probe_req *);
500 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
503 struct iwx_scan_general_params_v10 *, int);
504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
505 struct iwx_scan_general_params_v10 *, uint16_t, int);
506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
507 struct iwx_scan_channel_params_v6 *, uint32_t, int);
508 static int iwx_umac_scan_v14(struct iwx_softc *, int);
509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
511 static int iwx_rval2ridx(int);
512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
513 int *);
514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
515 struct iwx_mac_ctx_cmd *, uint32_t);
516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
517 struct iwx_mac_data_sta *, int);
518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
519 uint32_t, int);
520 static int iwx_clear_statistics(struct iwx_softc *);
521 static int iwx_scan(struct iwx_softc *);
522 static int iwx_bgscan(struct ieee80211com *);
523 static int iwx_enable_mgmt_queue(struct iwx_softc *);
524 static int iwx_disable_mgmt_queue(struct iwx_softc *);
525 static int iwx_rs_rval2idx(uint8_t);
526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
527 int);
528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
533 uint8_t, uint8_t);
534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
536 uint8_t);
537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
538 static int iwx_deauth(struct iwx_softc *);
539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
540 static int iwx_run_stop(struct iwx_softc *);
541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
542 const uint8_t[IEEE80211_ADDR_LEN]);
543 #if 0
544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
545 struct ieee80211_key *);
546 void iwx_setkey_task(void *);
547 void iwx_delete_key(struct ieee80211com *,
548 struct ieee80211_node *, struct ieee80211_key *);
549 #endif
550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
551 static void iwx_endscan(struct iwx_softc *);
552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
553 struct ieee80211_node *);
554 static int iwx_sf_config(struct iwx_softc *, int);
555 static int iwx_send_bt_init_conf(struct iwx_softc *);
556 static int iwx_send_soc_conf(struct iwx_softc *);
557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
559 static int iwx_init_hw(struct iwx_softc *);
560 static int iwx_init(struct iwx_softc *);
561 static void iwx_stop(struct iwx_softc *);
562 static void iwx_watchdog(void *);
563 static const char *iwx_desc_lookup(uint32_t);
564 static void iwx_nic_error(struct iwx_softc *);
565 static void iwx_dump_driver_status(struct iwx_softc *);
566 static void iwx_nic_umac_error(struct iwx_softc *);
567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
570 struct mbuf *);
571 static void iwx_notif_intr(struct iwx_softc *);
572 #if 0
573 /* XXX-THJ - I don't have hardware for this */
574 static int iwx_intr(void *);
575 #endif
576 static void iwx_intr_msix(void *);
577 static int iwx_preinit(struct iwx_softc *);
578 static void iwx_attach_hook(void *);
579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
580 static int iwx_probe(device_t);
581 static int iwx_attach(device_t);
582 static int iwx_detach(device_t);
583
584 /* FreeBSD specific glue */
585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
587
588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
590
591 #if IWX_DEBUG
592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
593 #else
594 #define DPRINTF(x) do { ; } while (0)
595 #endif
596
597 /* FreeBSD specific functions */
598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
601 static void iwx_vap_delete(struct ieee80211vap *);
602 static void iwx_parent(struct ieee80211com *);
603 static void iwx_scan_start(struct ieee80211com *);
604 static void iwx_scan_end(struct ieee80211com *);
605 static void iwx_update_mcast(struct ieee80211com *ic);
606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
607 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
608 static void iwx_set_channel(struct ieee80211com *);
609 static void iwx_endscan_cb(void *, int );
610 static int iwx_wme_update(struct ieee80211com *);
611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
612 const struct ieee80211_bpf_params *);
613 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
614 static void iwx_start(struct iwx_softc *);
615 static int iwx_ampdu_rx_start(struct ieee80211_node *,
616 struct ieee80211_rx_ampdu *, int, int, int);
617 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
618 struct ieee80211_rx_ampdu *);
619 static int iwx_addba_request(struct ieee80211_node *,
620 struct ieee80211_tx_ampdu *, int, int, int);
621 static int iwx_addba_response(struct ieee80211_node *,
622 struct ieee80211_tx_ampdu *, int, int, int);
623 static void iwx_key_update_begin(struct ieee80211vap *);
624 static void iwx_key_update_end(struct ieee80211vap *);
625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
626 ieee80211_keyix *,ieee80211_keyix *);
627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
628 static int iwx_key_delete(struct ieee80211vap *,
629 const struct ieee80211_key *);
630 static int iwx_suspend(device_t);
631 static int iwx_resume(device_t);
632 static void iwx_radiotap_attach(struct iwx_softc *);
633
634 /* OpenBSD compat defines */
635 #define IEEE80211_HTOP0_SCO_SCN 0
636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
638
639 #define IEEE80211_HT_RATESET_SISO 0
640 #define IEEE80211_HT_RATESET_MIMO2 2
641
642 const struct ieee80211_rateset ieee80211_std_rateset_11a =
643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
644
645 const struct ieee80211_rateset ieee80211_std_rateset_11b =
646 { 4, { 2, 4, 11, 22 } };
647
648 const struct ieee80211_rateset ieee80211_std_rateset_11g =
649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
650
651 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)652 ieee80211_has_addr4(const struct ieee80211_frame *wh)
653 {
654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
655 IEEE80211_FC1_DIR_DSTODS;
656 }
657
658 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
660 {
661 const struct iwx_fw_cmd_version *entry;
662 int i;
663
664 for (i = 0; i < sc->n_cmd_versions; i++) {
665 entry = &sc->cmd_versions[i];
666 if (entry->group == grp && entry->cmd == cmd)
667 return entry->cmd_ver;
668 }
669
670 return IWX_FW_CMD_VER_UNKNOWN;
671 }
672
673 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
675 {
676 const struct iwx_fw_cmd_version *entry;
677 int i;
678
679 for (i = 0; i < sc->n_cmd_versions; i++) {
680 entry = &sc->cmd_versions[i];
681 if (entry->group == grp && entry->cmd == cmd)
682 return entry->notif_ver;
683 }
684
685 return IWX_FW_CMD_VER_UNKNOWN;
686 }
687
688 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
690 {
691 const struct iwx_fw_cscheme_list *l = (const void *)data;
692
693 if (dlen < sizeof(*l) ||
694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
695 return EINVAL;
696
697 /* we don't actually store anything for now, always use s/w crypto */
698
699 return 0;
700 }
701
702 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
705 {
706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
707 if (err) {
708 printf("%s: could not allocate context info DMA memory\n",
709 DEVNAME(sc));
710 return err;
711 }
712
713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
714
715 return 0;
716 }
717
718 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)719 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
720 {
721 struct iwx_self_init_dram *dram = &sc->init_dram;
722 int i;
723
724 if (!dram->paging)
725 return;
726
727 /* free paging*/
728 for (i = 0; i < dram->paging_cnt; i++)
729 iwx_dma_contig_free(&dram->paging[i]);
730
731 free(dram->paging, M_DEVBUF);
732 dram->paging_cnt = 0;
733 dram->paging = NULL;
734 }
735
736 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
738 {
739 int i = 0;
740
741 while (start < fws->fw_count &&
742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
744 start++;
745 i++;
746 }
747
748 return i;
749 }
750
751 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
753 struct iwx_context_info_dram *ctxt_dram)
754 {
755 struct iwx_self_init_dram *dram = &sc->init_dram;
756 int i, ret, fw_cnt = 0;
757
758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
759
760 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
761 /* add 1 due to separator */
762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
763 /* add 2 due to separators */
764 dram->paging_cnt = iwx_get_num_sections(fws,
765 dram->lmac_cnt + dram->umac_cnt + 2);
766
767 IWX_UNLOCK(sc);
768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
770 if (!dram->fw) {
771 printf("%s: could not allocate memory for firmware sections\n",
772 DEVNAME(sc));
773 IWX_LOCK(sc);
774 return ENOMEM;
775 }
776
777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
778 M_DEVBUF, M_ZERO | M_WAITOK);
779 IWX_LOCK(sc);
780 if (!dram->paging) {
781 printf("%s: could not allocate memory for firmware paging\n",
782 DEVNAME(sc));
783 return ENOMEM;
784 }
785
786 /* initialize lmac sections */
787 for (i = 0; i < dram->lmac_cnt; i++) {
788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
789 &dram->fw[fw_cnt]);
790 if (ret)
791 return ret;
792 ctxt_dram->lmac_img[i] =
793 htole64(dram->fw[fw_cnt].paddr);
794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
795 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
796 __func__, i,
797 (unsigned long long)dram->fw[fw_cnt].paddr,
798 (unsigned long long)dram->fw[fw_cnt].size);
799 fw_cnt++;
800 }
801
802 /* initialize umac sections */
803 for (i = 0; i < dram->umac_cnt; i++) {
804 /* access FW with +1 to make up for lmac separator */
805 ret = iwx_ctxt_info_alloc_dma(sc,
806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
807 if (ret)
808 return ret;
809 ctxt_dram->umac_img[i] =
810 htole64(dram->fw[fw_cnt].paddr);
811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
812 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
813 __func__, i,
814 (unsigned long long)dram->fw[fw_cnt].paddr,
815 (unsigned long long)dram->fw[fw_cnt].size);
816 fw_cnt++;
817 }
818
819 /*
820 * Initialize paging.
821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
822 * stored separately.
823 * This is since the timing of its release is different -
824 * while fw memory can be released on alive, the paging memory can be
825 * freed only when the device goes down.
826 * Given that, the logic here in accessing the fw image is a bit
827 * different - fw_cnt isn't changing so loop counter is added to it.
828 */
829 for (i = 0; i < dram->paging_cnt; i++) {
830 /* access FW with +2 to make up for lmac & umac separators */
831 int fw_idx = fw_cnt + i + 2;
832
833 ret = iwx_ctxt_info_alloc_dma(sc,
834 &fws->fw_sect[fw_idx], &dram->paging[i]);
835 if (ret)
836 return ret;
837
838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
840 "%s: firmware paging section %d at 0x%llx size %lld\n",
841 __func__, i,
842 (unsigned long long)dram->paging[i].paddr,
843 (unsigned long long)dram->paging[i].size);
844 }
845
846 return 0;
847 }
848
849 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)850 iwx_fw_version_str(char *buf, size_t bufsize,
851 uint32_t major, uint32_t minor, uint32_t api)
852 {
853 /*
854 * Starting with major version 35 the Linux driver prints the minor
855 * version in hexadecimal.
856 */
857 if (major >= 35)
858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
859 else
860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
861 }
862 #if 0
863 static int
864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
865 uint8_t min_power)
866 {
867 struct iwx_dma_info *fw_mon = &sc->fw_mon;
868 uint32_t size = 0;
869 uint8_t power;
870 int err;
871
872 if (fw_mon->size)
873 return 0;
874
875 for (power = max_power; power >= min_power; power--) {
876 size = (1 << power);
877
878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
879 if (err)
880 continue;
881
882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
883 "%s: allocated 0x%08x bytes for firmware monitor.\n",
884 DEVNAME(sc), size);
885 break;
886 }
887
888 if (err) {
889 fw_mon->size = 0;
890 return err;
891 }
892
893 if (power != max_power)
894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
896 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
897 (unsigned long)(1 << (max_power - 10)));
898
899 return 0;
900 }
901
902 static int
903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
904 {
905 if (!max_power) {
906 /* default max_power is maximum */
907 max_power = 26;
908 } else {
909 max_power += 11;
910 }
911
912 if (max_power > 26) {
913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
914 "%s: External buffer size for monitor is too big %d, "
915 "check the FW TLV\n", DEVNAME(sc), max_power);
916 return 0;
917 }
918
919 if (sc->fw_mon.size)
920 return 0;
921
922 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
923 }
924 #endif
925
926 static int
iwx_apply_debug_destination(struct iwx_softc * sc)927 iwx_apply_debug_destination(struct iwx_softc *sc)
928 {
929 #if 0
930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
931 int i, err;
932 uint8_t mon_mode, size_power, base_shift, end_shift;
933 uint32_t base_reg, end_reg;
934
935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
936 mon_mode = dest_v1->monitor_mode;
937 size_power = dest_v1->size_power;
938 base_reg = le32toh(dest_v1->base_reg);
939 end_reg = le32toh(dest_v1->end_reg);
940 base_shift = dest_v1->base_shift;
941 end_shift = dest_v1->end_shift;
942
943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
944
945 if (mon_mode == EXTERNAL_MODE) {
946 err = iwx_alloc_fw_monitor(sc, size_power);
947 if (err)
948 return err;
949 }
950
951 if (!iwx_nic_lock(sc))
952 return EBUSY;
953
954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
955 uint32_t addr, val;
956 uint8_t op;
957
958 addr = le32toh(dest_v1->reg_ops[i].addr);
959 val = le32toh(dest_v1->reg_ops[i].val);
960 op = dest_v1->reg_ops[i].op;
961
962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
963 switch (op) {
964 case CSR_ASSIGN:
965 IWX_WRITE(sc, addr, val);
966 break;
967 case CSR_SETBIT:
968 IWX_SETBITS(sc, addr, (1 << val));
969 break;
970 case CSR_CLEARBIT:
971 IWX_CLRBITS(sc, addr, (1 << val));
972 break;
973 case PRPH_ASSIGN:
974 iwx_write_prph(sc, addr, val);
975 break;
976 case PRPH_SETBIT:
977 err = iwx_set_bits_prph(sc, addr, (1 << val));
978 if (err)
979 return err;
980 break;
981 case PRPH_CLEARBIT:
982 err = iwx_clear_bits_prph(sc, addr, (1 << val));
983 if (err)
984 return err;
985 break;
986 case PRPH_BLOCKBIT:
987 if (iwx_read_prph(sc, addr) & (1 << val))
988 goto monitor;
989 break;
990 default:
991 DPRINTF(("%s: FW debug - unknown OP %d\n",
992 DEVNAME(sc), op));
993 break;
994 }
995 }
996
997 monitor:
998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
999 iwx_write_prph(sc, le32toh(base_reg),
1000 sc->fw_mon.paddr >> base_shift);
1001 iwx_write_prph(sc, end_reg,
1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1003 >> end_shift);
1004 }
1005
1006 iwx_nic_unlock(sc);
1007 return 0;
1008 #else
1009 return 0;
1010 #endif
1011 }
1012
1013 static void
iwx_set_ltr(struct iwx_softc * sc)1014 iwx_set_ltr(struct iwx_softc *sc)
1015 {
1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1027
1028 /*
1029 * To workaround hardware latency issues during the boot process,
1030 * initialize the LTR to ~250 usec (see ltr_val above).
1031 * The firmware initializes this again later (to a smaller value).
1032 */
1033 if (!sc->sc_integrated) {
1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1035 } else if (sc->sc_integrated &&
1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1038 IWX_HPM_MAC_LRT_ENABLE_ALL);
1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1040 }
1041 }
1042
1043 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1045 {
1046 struct iwx_context_info *ctxt_info;
1047 struct iwx_context_info_rbd_cfg *rx_cfg;
1048 uint32_t control_flags = 0;
1049 uint64_t paddr;
1050 int err;
1051
1052 ctxt_info = sc->ctxt_info_dma.vaddr;
1053 memset(ctxt_info, 0, sizeof(*ctxt_info));
1054
1055 ctxt_info->version.version = 0;
1056 ctxt_info->version.mac_id =
1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1058 /* size is in DWs */
1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1060
1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1063
1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1068 ctxt_info->control.control_flags = htole32(control_flags);
1069
1070 /* initialize RX default queue */
1071 rx_cfg = &ctxt_info->rbd_cfg;
1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1075
1076 /* initialize TX command queue */
1077 ctxt_info->hcmd_cfg.cmd_queue_addr =
1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1079 ctxt_info->hcmd_cfg.cmd_queue_size =
1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1081
1082 /* allocate ucode sections in dram and set addresses */
1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1084 if (err) {
1085 iwx_ctxt_info_free_fw_img(sc);
1086 return err;
1087 }
1088
1089 /* Configure debug, if exists */
1090 if (sc->sc_fw.dbg_dest_tlv_v1) {
1091 #if 1
1092 err = iwx_apply_debug_destination(sc);
1093 if (err) {
1094 iwx_ctxt_info_free_fw_img(sc);
1095 return err;
1096 }
1097 #endif
1098 }
1099
1100 /*
1101 * Write the context info DMA base address. The device expects a
1102 * 64-bit address but a simple bus_space_write_8 to this register
1103 * won't work on some devices, such as the AX201.
1104 */
1105 paddr = sc->ctxt_info_dma.paddr;
1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1108
1109 /* kick FW self load */
1110 if (!iwx_nic_lock(sc)) {
1111 iwx_ctxt_info_free_fw_img(sc);
1112 return EBUSY;
1113 }
1114
1115 iwx_set_ltr(sc);
1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1117 iwx_nic_unlock(sc);
1118
1119 /* Context info will be released upon alive or failure to get one */
1120
1121 return 0;
1122 }
1123
1124 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1126 {
1127 struct iwx_context_info_gen3 *ctxt_info_gen3;
1128 struct iwx_prph_scratch *prph_scratch;
1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1130 uint16_t cb_size;
1131 uint32_t control_flags, scratch_size;
1132 uint64_t paddr;
1133 int err;
1134
1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1136 printf("%s: no image loader found in firmware file\n",
1137 DEVNAME(sc));
1138 iwx_ctxt_info_free_fw_img(sc);
1139 return EINVAL;
1140 }
1141
1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1143 sc->sc_fw.iml_len, 1);
1144 if (err) {
1145 printf("%s: could not allocate DMA memory for "
1146 "firmware image loader\n", DEVNAME(sc));
1147 iwx_ctxt_info_free_fw_img(sc);
1148 return ENOMEM;
1149 }
1150
1151 prph_scratch = sc->prph_scratch_dma.vaddr;
1152 memset(prph_scratch, 0, sizeof(*prph_scratch));
1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1154 prph_sc_ctrl->version.version = 0;
1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1157
1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1159 IWX_PRPH_SCRATCH_MTR_MODE |
1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1161 if (sc->sc_imr_enabled)
1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1163 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1164
1165 /* initialize RX default queue */
1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1167 htole64(sc->rxq.free_desc_dma.paddr);
1168
1169 /* allocate ucode sections in dram and set addresses */
1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1171 if (err) {
1172 iwx_dma_contig_free(&sc->iml_dma);
1173 iwx_ctxt_info_free_fw_img(sc);
1174 return err;
1175 }
1176
1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1180 ctxt_info_gen3->prph_scratch_base_addr =
1181 htole64(sc->prph_scratch_dma.paddr);
1182 scratch_size = sizeof(*prph_scratch);
1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1184 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1185 htole64(sc->rxq.stat_dma.paddr);
1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1190 ctxt_info_gen3->mtr_base_addr =
1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1194 ctxt_info_gen3->mtr_size = htole16(cb_size);
1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1196 ctxt_info_gen3->mcr_size = htole16(cb_size);
1197
1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1199
1200 paddr = sc->ctxt_info_dma.paddr;
1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1203
1204 paddr = sc->iml_dma.paddr;
1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1208
1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1210 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1211
1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1213 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1214
1215 /* kick FW self load */
1216 if (!iwx_nic_lock(sc)) {
1217 iwx_dma_contig_free(&sc->iml_dma);
1218 iwx_ctxt_info_free_fw_img(sc);
1219 return EBUSY;
1220 }
1221 iwx_set_ltr(sc);
1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1223 iwx_nic_unlock(sc);
1224
1225 /* Context info will be released upon alive or failure to get one */
1226 return 0;
1227 }
1228
1229 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1231 {
1232 struct iwx_self_init_dram *dram = &sc->init_dram;
1233 int i;
1234
1235 if (!dram->fw)
1236 return;
1237
1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1239 iwx_dma_contig_free(&dram->fw[i]);
1240
1241 free(dram->fw, M_DEVBUF);
1242 dram->lmac_cnt = 0;
1243 dram->umac_cnt = 0;
1244 dram->fw = NULL;
1245 }
1246
1247 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1249 const uint8_t *data, size_t dlen)
1250 {
1251 struct iwx_fw_sects *fws;
1252 struct iwx_fw_onesect *fwone;
1253
1254 if (type >= IWX_UCODE_TYPE_MAX)
1255 return EINVAL;
1256 if (dlen < sizeof(uint32_t))
1257 return EINVAL;
1258
1259 fws = &sc->sc_fw.fw_sects[type];
1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1263 return EINVAL;
1264
1265 fwone = &fws->fw_sect[fws->fw_count];
1266
1267 /* first 32bit are device load offset */
1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1269
1270 /* rest is data */
1271 fwone->fws_data = data + sizeof(uint32_t);
1272 fwone->fws_len = dlen - sizeof(uint32_t);
1273
1274 fws->fw_count++;
1275 fws->fw_totlen += fwone->fws_len;
1276
1277 return 0;
1278 }
1279
1280 #define IWX_DEFAULT_SCAN_CHANNELS 40
1281 /* Newer firmware might support more channels. Raise this value if needed. */
1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1283
1284 struct iwx_tlv_calib_data {
1285 uint32_t ucode_type;
1286 struct iwx_tlv_calib_ctrl calib;
1287 } __packed;
1288
1289 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1291 {
1292 const struct iwx_tlv_calib_data *def_calib = data;
1293 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1294
1295 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1296 return EINVAL;
1297
1298 sc->sc_default_calib[ucode_type].flow_trigger =
1299 def_calib->calib.flow_trigger;
1300 sc->sc_default_calib[ucode_type].event_trigger =
1301 def_calib->calib.event_trigger;
1302
1303 return 0;
1304 }
1305
1306 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1307 iwx_fw_info_free(struct iwx_fw_info *fw)
1308 {
1309 free(fw->fw_rawdata, M_DEVBUF);
1310 fw->fw_rawdata = NULL;
1311 fw->fw_rawsize = 0;
1312 /* don't touch fw->fw_status */
1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1314 free(fw->iml, M_DEVBUF);
1315 fw->iml = NULL;
1316 fw->iml_len = 0;
1317 }
1318
1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1320
1321 static int
iwx_read_firmware(struct iwx_softc * sc)1322 iwx_read_firmware(struct iwx_softc *sc)
1323 {
1324 struct iwx_fw_info *fw = &sc->sc_fw;
1325 const struct iwx_tlv_ucode_header *uhdr;
1326 struct iwx_ucode_tlv tlv;
1327 uint32_t tlv_type;
1328 const uint8_t *data;
1329 int err = 0;
1330 size_t len;
1331 const struct firmware *fwp;
1332
1333 if (fw->fw_status == IWX_FW_STATUS_DONE)
1334 return 0;
1335
1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1337 fwp = firmware_get(sc->sc_fwname);
1338 sc->sc_fwp = fwp;
1339
1340 if (fwp == NULL) {
1341 printf("%s: could not read firmware %s\n",
1342 DEVNAME(sc), sc->sc_fwname);
1343 err = ENOENT;
1344 goto out;
1345 }
1346
1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1349
1350
1351 sc->sc_capaflags = 0;
1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1355 sc->n_cmd_versions = 0;
1356
1357 uhdr = (const void *)(fwp->data);
1358 if (*(const uint32_t *)fwp->data != 0
1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1360 printf("%s: invalid firmware %s\n",
1361 DEVNAME(sc), sc->sc_fwname);
1362 err = EINVAL;
1363 goto out;
1364 }
1365
1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_API(le32toh(uhdr->ver)));
1370
1371 data = uhdr->data;
1372 len = fwp->datasize - sizeof(*uhdr);
1373
1374 while (len >= sizeof(tlv)) {
1375 size_t tlv_len;
1376 const void *tlv_data;
1377
1378 memcpy(&tlv, data, sizeof(tlv));
1379 tlv_len = le32toh(tlv.length);
1380 tlv_type = le32toh(tlv.type);
1381
1382 len -= sizeof(tlv);
1383 data += sizeof(tlv);
1384 tlv_data = data;
1385
1386 if (len < tlv_len) {
1387 printf("%s: firmware too short: %zu bytes\n",
1388 DEVNAME(sc), len);
1389 err = EINVAL;
1390 goto parse_out;
1391 }
1392
1393 switch (tlv_type) {
1394 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1395 if (tlv_len < sizeof(uint32_t)) {
1396 err = EINVAL;
1397 goto parse_out;
1398 }
1399 sc->sc_capa_max_probe_len
1400 = le32toh(*(const uint32_t *)tlv_data);
1401 if (sc->sc_capa_max_probe_len >
1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1403 err = EINVAL;
1404 goto parse_out;
1405 }
1406 break;
1407 case IWX_UCODE_TLV_PAN:
1408 if (tlv_len) {
1409 err = EINVAL;
1410 goto parse_out;
1411 }
1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1413 break;
1414 case IWX_UCODE_TLV_FLAGS:
1415 if (tlv_len < sizeof(uint32_t)) {
1416 err = EINVAL;
1417 goto parse_out;
1418 }
1419 /*
1420 * Apparently there can be many flags, but Linux driver
1421 * parses only the first one, and so do we.
1422 *
1423 * XXX: why does this override IWX_UCODE_TLV_PAN?
1424 * Intentional or a bug? Observations from
1425 * current firmware file:
1426 * 1) TLV_PAN is parsed first
1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1428 * ==> this resets TLV_PAN to itself... hnnnk
1429 */
1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1431 break;
1432 case IWX_UCODE_TLV_CSCHEME:
1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1434 if (err)
1435 goto parse_out;
1436 break;
1437 case IWX_UCODE_TLV_NUM_OF_CPU: {
1438 uint32_t num_cpu;
1439 if (tlv_len != sizeof(uint32_t)) {
1440 err = EINVAL;
1441 goto parse_out;
1442 }
1443 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1444 if (num_cpu < 1 || num_cpu > 2) {
1445 err = EINVAL;
1446 goto parse_out;
1447 }
1448 break;
1449 }
1450 case IWX_UCODE_TLV_SEC_RT:
1451 err = iwx_firmware_store_section(sc,
1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1453 if (err)
1454 goto parse_out;
1455 break;
1456 case IWX_UCODE_TLV_SEC_INIT:
1457 err = iwx_firmware_store_section(sc,
1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1459 if (err)
1460 goto parse_out;
1461 break;
1462 case IWX_UCODE_TLV_SEC_WOWLAN:
1463 err = iwx_firmware_store_section(sc,
1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1465 if (err)
1466 goto parse_out;
1467 break;
1468 case IWX_UCODE_TLV_DEF_CALIB:
1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1470 err = EINVAL;
1471 goto parse_out;
1472 }
1473 err = iwx_set_default_calib(sc, tlv_data);
1474 if (err)
1475 goto parse_out;
1476 break;
1477 case IWX_UCODE_TLV_PHY_SKU:
1478 if (tlv_len != sizeof(uint32_t)) {
1479 err = EINVAL;
1480 goto parse_out;
1481 }
1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1483 break;
1484
1485 case IWX_UCODE_TLV_API_CHANGES_SET: {
1486 const struct iwx_ucode_api *api;
1487 int idx, i;
1488 if (tlv_len != sizeof(*api)) {
1489 err = EINVAL;
1490 goto parse_out;
1491 }
1492 api = (const struct iwx_ucode_api *)tlv_data;
1493 idx = le32toh(api->api_index);
1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1495 err = EINVAL;
1496 goto parse_out;
1497 }
1498 for (i = 0; i < 32; i++) {
1499 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1500 continue;
1501 setbit(sc->sc_ucode_api, i + (32 * idx));
1502 }
1503 break;
1504 }
1505
1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1507 const struct iwx_ucode_capa *capa;
1508 int idx, i;
1509 if (tlv_len != sizeof(*capa)) {
1510 err = EINVAL;
1511 goto parse_out;
1512 }
1513 capa = (const struct iwx_ucode_capa *)tlv_data;
1514 idx = le32toh(capa->api_index);
1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1516 goto parse_out;
1517 }
1518 for (i = 0; i < 32; i++) {
1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1520 continue;
1521 setbit(sc->sc_enabled_capa, i + (32 * idx));
1522 }
1523 break;
1524 }
1525
1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1528 /* ignore, not used by current driver */
1529 break;
1530
1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1532 err = iwx_firmware_store_section(sc,
1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1534 tlv_len);
1535 if (err)
1536 goto parse_out;
1537 break;
1538
1539 case IWX_UCODE_TLV_PAGING:
1540 if (tlv_len != sizeof(uint32_t)) {
1541 err = EINVAL;
1542 goto parse_out;
1543 }
1544 break;
1545
1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1547 if (tlv_len != sizeof(uint32_t)) {
1548 err = EINVAL;
1549 goto parse_out;
1550 }
1551 sc->sc_capa_n_scan_channels =
1552 le32toh(*(const uint32_t *)tlv_data);
1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1554 err = ERANGE;
1555 goto parse_out;
1556 }
1557 break;
1558
1559 case IWX_UCODE_TLV_FW_VERSION:
1560 if (tlv_len != sizeof(uint32_t) * 3) {
1561 err = EINVAL;
1562 goto parse_out;
1563 }
1564
1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1566 le32toh(((const uint32_t *)tlv_data)[0]),
1567 le32toh(((const uint32_t *)tlv_data)[1]),
1568 le32toh(((const uint32_t *)tlv_data)[2]));
1569 break;
1570
1571 case IWX_UCODE_TLV_FW_DBG_DEST: {
1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1573
1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1575 if (*fw->dbg_dest_ver != 0) {
1576 err = EINVAL;
1577 goto parse_out;
1578 }
1579
1580 if (fw->dbg_dest_tlv_init)
1581 break;
1582 fw->dbg_dest_tlv_init = true;
1583
1584 dest_v1 = (const void *)tlv_data;
1585 fw->dbg_dest_tlv_v1 = dest_v1;
1586 fw->n_dest_reg = tlv_len -
1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1590 "%s: found debug dest; n_dest_reg=%d\n",
1591 __func__, fw->n_dest_reg);
1592 break;
1593 }
1594
1595 case IWX_UCODE_TLV_FW_DBG_CONF: {
1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1597
1598 if (!fw->dbg_dest_tlv_init ||
1599 conf->id >= nitems(fw->dbg_conf_tlv) ||
1600 fw->dbg_conf_tlv[conf->id] != NULL)
1601 break;
1602
1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1604 "Found debug configuration: %d\n", conf->id);
1605 fw->dbg_conf_tlv[conf->id] = conf;
1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1607 break;
1608 }
1609
1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1611 const struct iwx_umac_debug_addrs *dbg_ptrs =
1612 (const void *)tlv_data;
1613
1614 if (tlv_len != sizeof(*dbg_ptrs)) {
1615 err = EINVAL;
1616 goto parse_out;
1617 }
1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1619 break;
1620 sc->sc_uc.uc_umac_error_event_table =
1621 le32toh(dbg_ptrs->error_info_addr) &
1622 ~IWX_FW_ADDR_CACHE_CONTROL;
1623 sc->sc_uc.error_event_table_tlv_status |=
1624 IWX_ERROR_EVENT_TABLE_UMAC;
1625 break;
1626 }
1627
1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1629 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1630 (const void *)tlv_data;
1631
1632 if (tlv_len != sizeof(*dbg_ptrs)) {
1633 err = EINVAL;
1634 goto parse_out;
1635 }
1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1637 break;
1638 sc->sc_uc.uc_lmac_error_event_table[0] =
1639 le32toh(dbg_ptrs->error_event_table_ptr) &
1640 ~IWX_FW_ADDR_CACHE_CONTROL;
1641 sc->sc_uc.error_event_table_tlv_status |=
1642 IWX_ERROR_EVENT_TABLE_LMAC1;
1643 break;
1644 }
1645
1646 case IWX_UCODE_TLV_FW_MEM_SEG:
1647 break;
1648
1649 case IWX_UCODE_TLV_IML:
1650 if (sc->sc_fw.iml != NULL) {
1651 free(fw->iml, M_DEVBUF);
1652 fw->iml_len = 0;
1653 }
1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1655 M_WAITOK | M_ZERO);
1656 if (sc->sc_fw.iml == NULL) {
1657 err = ENOMEM;
1658 goto parse_out;
1659 }
1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1661 sc->sc_fw.iml_len = tlv_len;
1662 break;
1663
1664 case IWX_UCODE_TLV_CMD_VERSIONS:
1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1666 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1667 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1668 }
1669 if (sc->n_cmd_versions != 0) {
1670 err = EINVAL;
1671 goto parse_out;
1672 }
1673 if (tlv_len > sizeof(sc->cmd_versions)) {
1674 err = EINVAL;
1675 goto parse_out;
1676 }
1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1679 break;
1680
1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1682 break;
1683
1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1686 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1687 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1688 break;
1689
1690 /* undocumented TLVs found in iwx-cc-a0-46 image */
1691 case 58:
1692 case 0x1000003:
1693 case 0x1000004:
1694 break;
1695
1696 /* undocumented TLVs found in iwx-cc-a0-48 image */
1697 case 0x1000000:
1698 case 0x1000002:
1699 break;
1700
1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1703 case IWX_UCODE_TLV_TYPE_HCMD:
1704 case IWX_UCODE_TLV_TYPE_REGIONS:
1705 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1706 case IWX_UCODE_TLV_TYPE_CONF_SET:
1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1709 case IWX_UCODE_TLV_CURRENT_PC:
1710 break;
1711
1712 /* undocumented TLV found in iwx-cc-a0-67 image */
1713 case 0x100000b:
1714 break;
1715
1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1717 case 0x101:
1718 break;
1719
1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1721 case 0x100000c:
1722 break;
1723
1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1725 case 69:
1726 break;
1727
1728 default:
1729 err = EINVAL;
1730 goto parse_out;
1731 }
1732
1733 /*
1734 * Check for size_t overflow and ignore missing padding at
1735 * end of firmware file.
1736 */
1737 if (roundup(tlv_len, 4) > len)
1738 break;
1739
1740 len -= roundup(tlv_len, 4);
1741 data += roundup(tlv_len, 4);
1742 }
1743
1744 KASSERT(err == 0, ("unhandled fw parse error"));
1745
1746 parse_out:
1747 if (err) {
1748 printf("%s: firmware parse error %d, "
1749 "section type %d\n", DEVNAME(sc), err, tlv_type);
1750 }
1751
1752 out:
1753 if (err) {
1754 fw->fw_status = IWX_FW_STATUS_NONE;
1755 if (fw->fw_rawdata != NULL)
1756 iwx_fw_info_free(fw);
1757 } else
1758 fw->fw_status = IWX_FW_STATUS_DONE;
1759 return err;
1760 }
1761
1762 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1763 iwx_prph_addr_mask(struct iwx_softc *sc)
1764 {
1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1766 return 0x00ffffff;
1767 else
1768 return 0x000fffff;
1769 }
1770
1771 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1773 {
1774 uint32_t mask = iwx_prph_addr_mask(sc);
1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1776 IWX_BARRIER_READ_WRITE(sc);
1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1778 }
1779
1780 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1782 {
1783 iwx_nic_assert_locked(sc);
1784 return iwx_read_prph_unlocked(sc, addr);
1785 }
1786
1787 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1789 {
1790 uint32_t mask = iwx_prph_addr_mask(sc);
1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1792 IWX_BARRIER_WRITE(sc);
1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1794 }
1795
1796 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1798 {
1799 iwx_nic_assert_locked(sc);
1800 iwx_write_prph_unlocked(sc, addr, val);
1801 }
1802
1803 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1805 {
1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1807 }
1808
1809 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1811 {
1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1813 }
1814
1815 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1817 {
1818 int offs, err = 0;
1819 uint32_t *vals = buf;
1820
1821 if (iwx_nic_lock(sc)) {
1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1823 for (offs = 0; offs < dwords; offs++)
1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1825 iwx_nic_unlock(sc);
1826 } else {
1827 err = EBUSY;
1828 }
1829 return err;
1830 }
1831
1832 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1834 int timo)
1835 {
1836 for (;;) {
1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1838 return 1;
1839 }
1840 if (timo < 10) {
1841 return 0;
1842 }
1843 timo -= 10;
1844 DELAY(10);
1845 }
1846 }
1847
1848 static int
iwx_nic_lock(struct iwx_softc * sc)1849 iwx_nic_lock(struct iwx_softc *sc)
1850 {
1851 if (sc->sc_nic_locks > 0) {
1852 iwx_nic_assert_locked(sc);
1853 sc->sc_nic_locks++;
1854 return 1; /* already locked */
1855 }
1856
1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1859
1860 DELAY(2);
1861
1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1866 sc->sc_nic_locks++;
1867 return 1;
1868 }
1869
1870 printf("%s: acquiring device failed\n", DEVNAME(sc));
1871 return 0;
1872 }
1873
1874 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1875 iwx_nic_assert_locked(struct iwx_softc *sc)
1876 {
1877 if (sc->sc_nic_locks <= 0)
1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1879 }
1880
1881 static void
iwx_nic_unlock(struct iwx_softc * sc)1882 iwx_nic_unlock(struct iwx_softc *sc)
1883 {
1884 if (sc->sc_nic_locks > 0) {
1885 if (--sc->sc_nic_locks == 0)
1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1888 } else
1889 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1890 }
1891
1892 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1894 uint32_t mask)
1895 {
1896 uint32_t val;
1897
1898 if (iwx_nic_lock(sc)) {
1899 val = iwx_read_prph(sc, reg) & mask;
1900 val |= bits;
1901 iwx_write_prph(sc, reg, val);
1902 iwx_nic_unlock(sc);
1903 return 0;
1904 }
1905 return EBUSY;
1906 }
1907
1908 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1910 {
1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1912 }
1913
1914 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1916 {
1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1918 }
1919
1920 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1922 {
1923 if (error != 0)
1924 return;
1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1926 *(bus_addr_t *)arg = segs[0].ds_addr;
1927 }
1928
1929 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1931 bus_size_t size, bus_size_t alignment)
1932 {
1933 int error;
1934
1935 dma->tag = NULL;
1936 dma->map = NULL;
1937 dma->size = size;
1938 dma->vaddr = NULL;
1939
1940 error = bus_dma_tag_create(tag, alignment,
1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1942 1, size, 0, NULL, NULL, &dma->tag);
1943 if (error != 0)
1944 goto fail;
1945
1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1948 if (error != 0)
1949 goto fail;
1950
1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1953 if (error != 0) {
1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1955 dma->vaddr = NULL;
1956 goto fail;
1957 }
1958
1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1960
1961 return 0;
1962
1963 fail:
1964 iwx_dma_contig_free(dma);
1965 return error;
1966 }
1967
1968 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1969 iwx_dma_contig_free(struct iwx_dma_info *dma)
1970 {
1971 if (dma->vaddr != NULL) {
1972 bus_dmamap_sync(dma->tag, dma->map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->tag, dma->map);
1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1976 dma->vaddr = NULL;
1977 }
1978 if (dma->tag != NULL) {
1979 bus_dma_tag_destroy(dma->tag);
1980 dma->tag = NULL;
1981 }
1982 }
1983
1984 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 bus_size_t size;
1988 int i, err;
1989
1990 ring->cur = 0;
1991
1992 /* Allocate RX descriptors (256-byte aligned). */
1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1994 size = sizeof(struct iwx_rx_transfer_desc);
1995 else
1996 size = sizeof(uint64_t);
1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1998 size * IWX_RX_MQ_RING_COUNT, 256);
1999 if (err) {
2000 device_printf(sc->sc_dev,
2001 "could not allocate RX ring DMA memory\n");
2002 goto fail;
2003 }
2004 ring->desc = ring->free_desc_dma.vaddr;
2005
2006 /* Allocate RX status area (16-byte aligned). */
2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2008 size = sizeof(uint16_t);
2009 else
2010 size = sizeof(*ring->stat);
2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2012 if (err) {
2013 device_printf(sc->sc_dev,
2014 "could not allocate RX status DMA memory\n");
2015 goto fail;
2016 }
2017 ring->stat = ring->stat_dma.vaddr;
2018
2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2020 size = sizeof(struct iwx_rx_completion_desc);
2021 else
2022 size = sizeof(uint32_t);
2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2024 size * IWX_RX_MQ_RING_COUNT, 256);
2025 if (err) {
2026 device_printf(sc->sc_dev,
2027 "could not allocate RX ring DMA memory\n");
2028 goto fail;
2029 }
2030
2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2033 0, NULL, NULL, &ring->data_dmat);
2034
2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2036 struct iwx_rx_data *data = &ring->data[i];
2037
2038 memset(data, 0, sizeof(*data));
2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (err) {
2041 device_printf(sc->sc_dev,
2042 "could not create RX buf DMA map\n");
2043 goto fail;
2044 }
2045
2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2047 if (err)
2048 goto fail;
2049 }
2050 return 0;
2051
2052 fail: iwx_free_rx_ring(sc, ring);
2053 return err;
2054 }
2055
2056 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2057 iwx_disable_rx_dma(struct iwx_softc *sc)
2058 {
2059 int ntries;
2060
2061 if (iwx_nic_lock(sc)) {
2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2064 else
2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2066 for (ntries = 0; ntries < 1000; ntries++) {
2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 if (iwx_read_umac_prph(sc,
2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2070 break;
2071 } else {
2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2073 IWX_RXF_DMA_IDLE)
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078 iwx_nic_unlock(sc);
2079 }
2080 }
2081
2082 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2084 {
2085 ring->cur = 0;
2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2087 BUS_DMASYNC_PREWRITE);
2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2089 uint16_t *status = sc->rxq.stat_dma.vaddr;
2090 *status = 0;
2091 } else
2092 memset(ring->stat, 0, sizeof(*ring->stat));
2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2094 BUS_DMASYNC_POSTWRITE);
2095
2096 }
2097
2098 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2100 {
2101 int i;
2102
2103 iwx_dma_contig_free(&ring->free_desc_dma);
2104 iwx_dma_contig_free(&ring->stat_dma);
2105 iwx_dma_contig_free(&ring->used_desc_dma);
2106
2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2108 struct iwx_rx_data *data = &ring->data[i];
2109 if (data->m != NULL) {
2110 bus_dmamap_sync(ring->data_dmat, data->map,
2111 BUS_DMASYNC_POSTREAD);
2112 bus_dmamap_unload(ring->data_dmat, data->map);
2113 m_freem(data->m);
2114 data->m = NULL;
2115 }
2116 if (data->map != NULL) {
2117 bus_dmamap_destroy(ring->data_dmat, data->map);
2118 data->map = NULL;
2119 }
2120 }
2121 if (ring->data_dmat != NULL) {
2122 bus_dma_tag_destroy(ring->data_dmat);
2123 ring->data_dmat = NULL;
2124 }
2125 }
2126
2127 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2129 {
2130 bus_addr_t paddr;
2131 bus_size_t size;
2132 int i, err;
2133 size_t bc_tbl_size;
2134 bus_size_t bc_align;
2135 size_t mapsize;
2136
2137 ring->qid = qid;
2138 ring->queued = 0;
2139 ring->cur = 0;
2140 ring->cur_hw = 0;
2141 ring->tail = 0;
2142 ring->tail_hw = 0;
2143
2144 /* Allocate TX descriptors (256-byte aligned). */
2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2147 if (err) {
2148 device_printf(sc->sc_dev,
2149 "could not allocate TX ring DMA memory\n");
2150 goto fail;
2151 }
2152 ring->desc = ring->desc_dma.vaddr;
2153
2154 /*
2155 * The hardware supports up to 512 Tx rings which is more
2156 * than we currently need.
2157 *
2158 * In DQA mode we use 1 command queue + 1 default queue for
2159 * management, control, and non-QoS data frames.
2160 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2161 *
2162 * Tx aggregation requires additional queues, one queue per TID for
2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2164 * Firmware may assign its own internal IDs for these queues
2165 * depending on which TID gets aggregation enabled first.
2166 * The driver maintains a table mapping driver-side queue IDs
2167 * to firmware-side queue IDs.
2168 */
2169
2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2173 bc_align = 128;
2174 } else {
2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2176 bc_align = 64;
2177 }
2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2179 bc_align);
2180 if (err) {
2181 device_printf(sc->sc_dev,
2182 "could not allocate byte count table DMA memory\n");
2183 goto fail;
2184 }
2185
2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2188 IWX_FIRST_TB_SIZE_ALIGN);
2189 if (err) {
2190 device_printf(sc->sc_dev,
2191 "could not allocate cmd DMA memory\n");
2192 goto fail;
2193 }
2194 ring->cmd = ring->cmd_dma.vaddr;
2195
2196 /* FW commands may require more mapped space than packets. */
2197 if (qid == IWX_DQA_CMD_QUEUE)
2198 mapsize = (sizeof(struct iwx_cmd_header) +
2199 IWX_MAX_CMD_PAYLOAD_SIZE);
2200 else
2201 mapsize = MCLBYTES;
2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2204 mapsize, 0, NULL, NULL, &ring->data_dmat);
2205
2206 paddr = ring->cmd_dma.paddr;
2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2208 struct iwx_tx_data *data = &ring->data[i];
2209
2210 data->cmd_paddr = paddr;
2211 paddr += sizeof(struct iwx_device_cmd);
2212
2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2214 if (err) {
2215 device_printf(sc->sc_dev,
2216 "could not create TX buf DMA map\n");
2217 goto fail;
2218 }
2219 }
2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2221 return 0;
2222
2223 fail:
2224 return err;
2225 }
2226
2227 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2229 {
2230 int i;
2231
2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2233 struct iwx_tx_data *data = &ring->data[i];
2234
2235 if (data->m != NULL) {
2236 bus_dmamap_sync(ring->data_dmat, data->map,
2237 BUS_DMASYNC_POSTWRITE);
2238 bus_dmamap_unload(ring->data_dmat, data->map);
2239 m_freem(data->m);
2240 data->m = NULL;
2241 }
2242 }
2243
2244 /* Clear byte count table. */
2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2246
2247 /* Clear TX descriptors. */
2248 memset(ring->desc, 0, ring->desc_dma.size);
2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2250 BUS_DMASYNC_PREWRITE);
2251 sc->qfullmsk &= ~(1 << ring->qid);
2252 sc->qenablemsk &= ~(1 << ring->qid);
2253 for (i = 0; i < nitems(sc->aggqid); i++) {
2254 if (sc->aggqid[i] == ring->qid) {
2255 sc->aggqid[i] = 0;
2256 break;
2257 }
2258 }
2259 ring->queued = 0;
2260 ring->cur = 0;
2261 ring->cur_hw = 0;
2262 ring->tail = 0;
2263 ring->tail_hw = 0;
2264 ring->tid = 0;
2265 }
2266
2267 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2269 {
2270 int i;
2271
2272 iwx_dma_contig_free(&ring->desc_dma);
2273 iwx_dma_contig_free(&ring->cmd_dma);
2274 iwx_dma_contig_free(&ring->bc_tbl);
2275
2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2277 struct iwx_tx_data *data = &ring->data[i];
2278
2279 if (data->m != NULL) {
2280 bus_dmamap_sync(ring->data_dmat, data->map,
2281 BUS_DMASYNC_POSTWRITE);
2282 bus_dmamap_unload(ring->data_dmat, data->map);
2283 m_freem(data->m);
2284 data->m = NULL;
2285 }
2286 if (data->map != NULL) {
2287 bus_dmamap_destroy(ring->data_dmat, data->map);
2288 data->map = NULL;
2289 }
2290 }
2291 if (ring->data_dmat != NULL) {
2292 bus_dma_tag_destroy(ring->data_dmat);
2293 ring->data_dmat = NULL;
2294 }
2295 }
2296
2297 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2298 iwx_enable_rfkill_int(struct iwx_softc *sc)
2299 {
2300 if (!sc->sc_msix) {
2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2303 } else {
2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2305 sc->sc_fh_init_mask);
2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2309 }
2310
2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2313 }
2314
2315 static int
iwx_check_rfkill(struct iwx_softc * sc)2316 iwx_check_rfkill(struct iwx_softc *sc)
2317 {
2318 uint32_t v;
2319 int rv;
2320
2321 /*
2322 * "documentation" is not really helpful here:
2323 * 27: HW_RF_KILL_SW
2324 * Indicates state of (platform's) hardware RF-Kill switch
2325 *
2326 * But apparently when it's off, it's on ...
2327 */
2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2330 if (rv) {
2331 sc->sc_flags |= IWX_FLAG_RFKILL;
2332 } else {
2333 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2334 }
2335
2336 return rv;
2337 }
2338
2339 static void
iwx_enable_interrupts(struct iwx_softc * sc)2340 iwx_enable_interrupts(struct iwx_softc *sc)
2341 {
2342 if (!sc->sc_msix) {
2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2345 } else {
2346 /*
2347 * fh/hw_mask keeps all the unmasked causes.
2348 * Unlike msi, in msix cause is enabled when it is unset.
2349 */
2350 sc->sc_hw_mask = sc->sc_hw_init_mask;
2351 sc->sc_fh_mask = sc->sc_fh_init_mask;
2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2353 ~sc->sc_fh_mask);
2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2355 ~sc->sc_hw_mask);
2356 }
2357 }
2358
2359 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2361 {
2362 if (!sc->sc_msix) {
2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2365 } else {
2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2369 /*
2370 * Leave all the FH causes enabled to get the ALIVE
2371 * notification.
2372 */
2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2374 ~sc->sc_fh_init_mask);
2375 sc->sc_fh_mask = sc->sc_fh_init_mask;
2376 }
2377 }
2378
2379 #if 0
2380 static void
2381 iwx_restore_interrupts(struct iwx_softc *sc)
2382 {
2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2384 }
2385 #endif
2386
2387 static void
iwx_disable_interrupts(struct iwx_softc * sc)2388 iwx_disable_interrupts(struct iwx_softc *sc)
2389 {
2390 if (!sc->sc_msix) {
2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2392
2393 /* acknowledge all interrupts */
2394 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2396 } else {
2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2398 sc->sc_fh_init_mask);
2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2400 sc->sc_hw_init_mask);
2401 }
2402 }
2403
2404 static void
iwx_ict_reset(struct iwx_softc * sc)2405 iwx_ict_reset(struct iwx_softc *sc)
2406 {
2407 iwx_disable_interrupts(sc);
2408
2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2410 sc->ict_cur = 0;
2411
2412 /* Set physical address of ICT (4KB aligned). */
2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2414 IWX_CSR_DRAM_INT_TBL_ENABLE
2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2418
2419 /* Switch to ICT interrupt mode in driver. */
2420 sc->sc_flags |= IWX_FLAG_USE_ICT;
2421
2422 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2423 iwx_enable_interrupts(sc);
2424 }
2425
2426 #define IWX_HW_READY_TIMEOUT 50
2427 static int
iwx_set_hw_ready(struct iwx_softc * sc)2428 iwx_set_hw_ready(struct iwx_softc *sc)
2429 {
2430 int ready;
2431
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2434
2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_HW_READY_TIMEOUT);
2439 if (ready)
2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2442
2443 DPRINTF(("%s: ready=%d\n", __func__, ready));
2444 return ready;
2445 }
2446 #undef IWX_HW_READY_TIMEOUT
2447
2448 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2449 iwx_prepare_card_hw(struct iwx_softc *sc)
2450 {
2451 int t = 0;
2452 int ntries;
2453
2454 if (iwx_set_hw_ready(sc))
2455 return 0;
2456
2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2459 DELAY(1000);
2460
2461 for (ntries = 0; ntries < 10; ntries++) {
2462 /* If HW is not ready, prepare the conditions to check again */
2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2465
2466 do {
2467 if (iwx_set_hw_ready(sc))
2468 return 0;
2469 DELAY(200);
2470 t += 200;
2471 } while (t < 150000);
2472 DELAY(25000);
2473 }
2474
2475 return ETIMEDOUT;
2476 }
2477
2478 static int
iwx_force_power_gating(struct iwx_softc * sc)2479 iwx_force_power_gating(struct iwx_softc *sc)
2480 {
2481 int err;
2482
2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2485 if (err)
2486 return err;
2487 DELAY(20);
2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2491 if (err)
2492 return err;
2493 DELAY(20);
2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2496 return err;
2497 }
2498
2499 static void
iwx_apm_config(struct iwx_softc * sc)2500 iwx_apm_config(struct iwx_softc *sc)
2501 {
2502 uint16_t lctl, cap;
2503 int pcie_ptr;
2504 int error;
2505
2506 /*
2507 * L0S states have been found to be unstable with our devices
2508 * and in newer hardware they are not officially supported at
2509 * all, so we must always set the L0S_DISABLED bit.
2510 */
2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2512
2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2514 if (error != 0) {
2515 printf("can't fill pcie_ptr\n");
2516 return;
2517 }
2518
2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2520 sizeof(lctl));
2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2523 #define PCI_PCIE_DCSR2 0x28
2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2525 sizeof(lctl));
2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2530 DEVNAME(sc),
2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2532 sc->sc_ltr_enabled ? "En" : "Dis"));
2533 #undef PCI_PCIE_LCSR_ASPM_L0S
2534 #undef PCI_PCIE_DCSR2
2535 #undef PCI_PCIE_DCSR2_LTREN
2536 #undef PCI_PCIE_LCSR_ASPM_L1
2537 }
2538
2539 /*
2540 * Start up NIC's basic functionality after it has been reset
2541 * e.g. after platform boot or shutdown.
2542 * NOTE: This does not load uCode nor start the embedded processor
2543 */
2544 static int
iwx_apm_init(struct iwx_softc * sc)2545 iwx_apm_init(struct iwx_softc *sc)
2546 {
2547 int err = 0;
2548
2549 /*
2550 * Disable L0s without affecting L1;
2551 * don't wait for ICH L0s (ICH bug W/A)
2552 */
2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2555
2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2558
2559 /*
2560 * Enable HAP INTA (interrupt from management bus) to
2561 * wake device's PCI Express link L1a -> L0s
2562 */
2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2565
2566 iwx_apm_config(sc);
2567
2568 /*
2569 * Set "initialization complete" bit to move adapter from
2570 * D0U* --> D0A* (powered-up active) state.
2571 */
2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2573
2574 /*
2575 * Wait for clock stabilization; once stabilized, access to
2576 * device-internal resources is supported, e.g. iwx_write_prph()
2577 * and accesses to uCode SRAM.
2578 */
2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2582 printf("%s: timeout waiting for clock stabilization\n",
2583 DEVNAME(sc));
2584 err = ETIMEDOUT;
2585 goto out;
2586 }
2587 out:
2588 if (err)
2589 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2590 return err;
2591 }
2592
2593 static void
iwx_apm_stop(struct iwx_softc * sc)2594 iwx_apm_stop(struct iwx_softc *sc)
2595 {
2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2601 DELAY(1000);
2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2604 DELAY(5000);
2605
2606 /* stop device's busmaster DMA activity */
2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2608
2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2613
2614 /*
2615 * Clear "initialization complete" bit to move adapter from
2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2617 */
2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2620 }
2621
2622 static void
iwx_init_msix_hw(struct iwx_softc * sc)2623 iwx_init_msix_hw(struct iwx_softc *sc)
2624 {
2625 iwx_conf_msix_hw(sc, 0);
2626
2627 if (!sc->sc_msix)
2628 return;
2629
2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2631 sc->sc_fh_mask = sc->sc_fh_init_mask;
2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2633 sc->sc_hw_mask = sc->sc_hw_init_mask;
2634 }
2635
2636 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2638 {
2639 int vector = 0;
2640
2641 if (!sc->sc_msix) {
2642 /* Newer chips default to MSIX. */
2643 if (!stopped && iwx_nic_lock(sc)) {
2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2645 IWX_UREG_CHICK_MSI_ENABLE);
2646 iwx_nic_unlock(sc);
2647 }
2648 return;
2649 }
2650
2651 if (!stopped && iwx_nic_lock(sc)) {
2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2653 IWX_UREG_CHICK_MSIX_ENABLE);
2654 iwx_nic_unlock(sc);
2655 }
2656
2657 /* Disable all interrupts */
2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2660
2661 /* Map fallback-queue (command/mgmt) to a single vector */
2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2664 /* Map RSS queue (data) to the same vector */
2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2667
2668 /* Enable the RX queues cause interrupts */
2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2671
2672 /* Map non-RX causes to the same vector */
2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2703
2704 /* Enable non-RX causes interrupts */
2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_S2D |
2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2722 }
2723
2724 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2725 iwx_clear_persistence_bit(struct iwx_softc *sc)
2726 {
2727 uint32_t hpm, wprot;
2728
2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2732 if (wprot & IWX_PREG_WFPM_ACCESS) {
2733 printf("%s: cannot clear persistence bit\n",
2734 DEVNAME(sc));
2735 return EPERM;
2736 }
2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2738 hpm & ~IWX_PERSISTENCE_BIT);
2739 }
2740
2741 return 0;
2742 }
2743
2744 static int
iwx_start_hw(struct iwx_softc * sc)2745 iwx_start_hw(struct iwx_softc *sc)
2746 {
2747 int err;
2748
2749 err = iwx_prepare_card_hw(sc);
2750 if (err)
2751 return err;
2752
2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2754 err = iwx_clear_persistence_bit(sc);
2755 if (err)
2756 return err;
2757 }
2758
2759 /* Reset the entire device */
2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2761 DELAY(5000);
2762
2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2764 sc->sc_integrated) {
2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2767 DELAY(20);
2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2771 printf("%s: timeout waiting for clock stabilization\n",
2772 DEVNAME(sc));
2773 return ETIMEDOUT;
2774 }
2775
2776 err = iwx_force_power_gating(sc);
2777 if (err)
2778 return err;
2779
2780 /* Reset the entire device */
2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2782 DELAY(5000);
2783 }
2784
2785 err = iwx_apm_init(sc);
2786 if (err)
2787 return err;
2788
2789 iwx_init_msix_hw(sc);
2790
2791 iwx_enable_rfkill_int(sc);
2792 iwx_check_rfkill(sc);
2793
2794 return 0;
2795 }
2796
2797 static void
iwx_stop_device(struct iwx_softc * sc)2798 iwx_stop_device(struct iwx_softc *sc)
2799 {
2800 int i;
2801
2802 iwx_disable_interrupts(sc);
2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2804
2805 iwx_disable_rx_dma(sc);
2806 iwx_reset_rx_ring(sc, &sc->rxq);
2807 for (i = 0; i < nitems(sc->txq); i++)
2808 iwx_reset_tx_ring(sc, &sc->txq[i]);
2809 #if 0
2810 /* XXX-THJ: Tidy up BA state on stop */
2811 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2813 if (ba->ba_state != IEEE80211_BA_AGREED)
2814 continue;
2815 ieee80211_delba_request(ic, ni, 0, 1, i);
2816 }
2817 #endif
2818 /* Make sure (redundant) we've released our request to stay awake */
2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2821 if (sc->sc_nic_locks > 0)
2822 printf("%s: %d active NIC locks forcefully cleared\n",
2823 DEVNAME(sc), sc->sc_nic_locks);
2824 sc->sc_nic_locks = 0;
2825
2826 /* Stop the device, and put it in low power state */
2827 iwx_apm_stop(sc);
2828
2829 /* Reset the on-board processor. */
2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2831 DELAY(5000);
2832
2833 /*
2834 * Upon stop, the IVAR table gets erased, so msi-x won't
2835 * work. This causes a bug in RF-KILL flows, since the interrupt
2836 * that enables radio won't fire on the correct irq, and the
2837 * driver won't be able to handle the interrupt.
2838 * Configure the IVAR table again after reset.
2839 */
2840 iwx_conf_msix_hw(sc, 1);
2841
2842 /*
2843 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2844 * Clear the interrupt again.
2845 */
2846 iwx_disable_interrupts(sc);
2847
2848 /* Even though we stop the HW we still want the RF kill interrupt. */
2849 iwx_enable_rfkill_int(sc);
2850 iwx_check_rfkill(sc);
2851
2852 iwx_prepare_card_hw(sc);
2853
2854 iwx_ctxt_info_free_paging(sc);
2855 iwx_dma_contig_free(&sc->pnvm_dma);
2856 }
2857
2858 static void
iwx_nic_config(struct iwx_softc * sc)2859 iwx_nic_config(struct iwx_softc *sc)
2860 {
2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2862 uint32_t mask, val, reg_val = 0;
2863
2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2867 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2869 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2870
2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2875
2876 /* radio configuration */
2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2880
2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2888
2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2890 val &= ~mask;
2891 val |= reg_val;
2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2893 }
2894
2895 static int
iwx_nic_rx_init(struct iwx_softc * sc)2896 iwx_nic_rx_init(struct iwx_softc *sc)
2897 {
2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2899
2900 /*
2901 * We don't configure the RFH; the firmware will do that.
2902 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2903 */
2904 return 0;
2905 }
2906
2907 static int
iwx_nic_init(struct iwx_softc * sc)2908 iwx_nic_init(struct iwx_softc *sc)
2909 {
2910 int err;
2911
2912 iwx_apm_init(sc);
2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2914 iwx_nic_config(sc);
2915
2916 err = iwx_nic_rx_init(sc);
2917 if (err)
2918 return err;
2919
2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2921
2922 return 0;
2923 }
2924
2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2926 const uint8_t iwx_ac_to_tx_fifo[] = {
2927 IWX_GEN2_EDCA_TX_FIFO_BE,
2928 IWX_GEN2_EDCA_TX_FIFO_BK,
2929 IWX_GEN2_EDCA_TX_FIFO_VI,
2930 IWX_GEN2_EDCA_TX_FIFO_VO,
2931 };
2932
2933 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2935 int num_slots)
2936 {
2937 struct iwx_rx_packet *pkt;
2938 struct iwx_tx_queue_cfg_rsp *resp;
2939 struct iwx_tx_queue_cfg_cmd cmd_v0;
2940 struct iwx_scd_queue_cfg_cmd cmd_v3;
2941 struct iwx_host_cmd hcmd = {
2942 .flags = IWX_CMD_WANT_RESP,
2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2944 };
2945 struct iwx_tx_ring *ring = &sc->txq[qid];
2946 int err, fwqid, cmd_ver;
2947 uint32_t wr_idx;
2948 size_t resp_len;
2949
2950 DPRINTF(("%s: tid=%i\n", __func__, tid));
2951 DPRINTF(("%s: qid=%i\n", __func__, qid));
2952 iwx_reset_tx_ring(sc, ring);
2953
2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2955 IWX_SCD_QUEUE_CONFIG_CMD);
2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2957 memset(&cmd_v0, 0, sizeof(cmd_v0));
2958 cmd_v0.sta_id = sta_id;
2959 cmd_v0.tid = tid;
2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2964 hcmd.id = IWX_SCD_QUEUE_CFG;
2965 hcmd.data[0] = &cmd_v0;
2966 hcmd.len[0] = sizeof(cmd_v0);
2967 } else if (cmd_ver == 3) {
2968 memset(&cmd_v3, 0, sizeof(cmd_v3));
2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2973 cmd_v3.u.add.flags = htole32(0);
2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2975 cmd_v3.u.add.tid = tid;
2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2977 IWX_SCD_QUEUE_CONFIG_CMD);
2978 hcmd.data[0] = &cmd_v3;
2979 hcmd.len[0] = sizeof(cmd_v3);
2980 } else {
2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2982 DEVNAME(sc), cmd_ver);
2983 return ENOTSUP;
2984 }
2985
2986 err = iwx_send_cmd(sc, &hcmd);
2987 if (err)
2988 return err;
2989
2990 pkt = hcmd.resp_pkt;
2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2992 err = EIO;
2993 goto out;
2994 }
2995
2996 resp_len = iwx_rx_packet_payload_len(pkt);
2997 if (resp_len != sizeof(*resp)) {
2998 err = EIO;
2999 goto out;
3000 }
3001
3002 resp = (void *)pkt->data;
3003 fwqid = le16toh(resp->queue_number);
3004 wr_idx = le16toh(resp->write_pointer);
3005
3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3007 if (fwqid != qid) {
3008 DPRINTF(("%s: === fwqid != qid\n", __func__));
3009 err = EIO;
3010 goto out;
3011 }
3012
3013 if (wr_idx != ring->cur_hw) {
3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3015 err = EIO;
3016 goto out;
3017 }
3018
3019 sc->qenablemsk |= (1 << qid);
3020 ring->tid = tid;
3021 out:
3022 iwx_free_resp(sc, &hcmd);
3023 return err;
3024 }
3025
3026 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3028 {
3029 struct iwx_rx_packet *pkt;
3030 struct iwx_tx_queue_cfg_rsp *resp;
3031 struct iwx_tx_queue_cfg_cmd cmd_v0;
3032 struct iwx_scd_queue_cfg_cmd cmd_v3;
3033 struct iwx_host_cmd hcmd = {
3034 .flags = IWX_CMD_WANT_RESP,
3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3036 };
3037 struct iwx_tx_ring *ring = &sc->txq[qid];
3038 int err, cmd_ver;
3039
3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3041 IWX_SCD_QUEUE_CONFIG_CMD);
3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3043 memset(&cmd_v0, 0, sizeof(cmd_v0));
3044 cmd_v0.sta_id = sta_id;
3045 cmd_v0.tid = tid;
3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3047 cmd_v0.cb_size = htole32(0);
3048 cmd_v0.byte_cnt_addr = htole64(0);
3049 cmd_v0.tfdq_addr = htole64(0);
3050 hcmd.id = IWX_SCD_QUEUE_CFG;
3051 hcmd.data[0] = &cmd_v0;
3052 hcmd.len[0] = sizeof(cmd_v0);
3053 } else if (cmd_ver == 3) {
3054 memset(&cmd_v3, 0, sizeof(cmd_v3));
3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3057 cmd_v3.u.remove.tid = tid;
3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3059 IWX_SCD_QUEUE_CONFIG_CMD);
3060 hcmd.data[0] = &cmd_v3;
3061 hcmd.len[0] = sizeof(cmd_v3);
3062 } else {
3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3064 DEVNAME(sc), cmd_ver);
3065 return ENOTSUP;
3066 }
3067
3068 err = iwx_send_cmd(sc, &hcmd);
3069 if (err)
3070 return err;
3071
3072 pkt = hcmd.resp_pkt;
3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3074 err = EIO;
3075 goto out;
3076 }
3077
3078 sc->qenablemsk &= ~(1 << qid);
3079 iwx_reset_tx_ring(sc, ring);
3080 out:
3081 iwx_free_resp(sc, &hcmd);
3082 return err;
3083 }
3084
3085 static void
iwx_post_alive(struct iwx_softc * sc)3086 iwx_post_alive(struct iwx_softc *sc)
3087 {
3088 int txcmd_ver;
3089
3090 iwx_ict_reset(sc);
3091
3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3094 sc->sc_rate_n_flags_version = 2;
3095 else
3096 sc->sc_rate_n_flags_version = 1;
3097
3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3099 }
3100
3101 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3103 uint32_t duration_tu)
3104 {
3105
3106 struct iwx_session_prot_cmd cmd = {
3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3108 in->in_color)),
3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3111 .duration_tu = htole32(duration_tu),
3112 };
3113 uint32_t cmd_id;
3114 int err;
3115
3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3118 if (!err)
3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3120 return err;
3121 }
3122
3123 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3125 {
3126 struct iwx_session_prot_cmd cmd = {
3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3128 in->in_color)),
3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3131 .duration_tu = 0,
3132 };
3133 uint32_t cmd_id;
3134
3135 /* Do nothing if the time event has already ended. */
3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3137 return;
3138
3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3142 }
3143
3144 /*
3145 * NVM read access and content parsing. We do not support
3146 * external NVM or writing NVM.
3147 */
3148
3149 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3151 {
3152 uint8_t tx_ant;
3153
3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3156
3157 if (sc->sc_nvm.valid_tx_ant)
3158 tx_ant &= sc->sc_nvm.valid_tx_ant;
3159
3160 return tx_ant;
3161 }
3162
3163 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3165 {
3166 uint8_t rx_ant;
3167
3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3170
3171 if (sc->sc_nvm.valid_rx_ant)
3172 rx_ant &= sc->sc_nvm.valid_rx_ant;
3173
3174 return rx_ant;
3175 }
3176
3177 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3179 struct ieee80211_channel chans[])
3180 {
3181 struct iwx_softc *sc = ic->ic_softc;
3182 struct iwx_nvm_data *data = &sc->sc_nvm;
3183 uint8_t bands[IEEE80211_MODE_BYTES];
3184 const uint8_t *nvm_channels;
3185 uint32_t ch_flags;
3186 int ch_idx, nchan;
3187
3188 if (sc->sc_uhb_supported) {
3189 nchan = nitems(iwx_nvm_channels_uhb);
3190 nvm_channels = iwx_nvm_channels_uhb;
3191 } else {
3192 nchan = nitems(iwx_nvm_channels_8000);
3193 nvm_channels = iwx_nvm_channels_8000;
3194 }
3195
3196 /* 2.4Ghz; 1-13: 11b/g channels. */
3197 if (!data->sku_cap_band_24GHz_enable)
3198 goto band_5;
3199
3200 memset(bands, 0, sizeof(bands));
3201 setbit(bands, IEEE80211_MODE_11B);
3202 setbit(bands, IEEE80211_MODE_11G);
3203 setbit(bands, IEEE80211_MODE_11NG);
3204 for (ch_idx = 0;
3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3206 ch_idx++) {
3207
3208 uint32_t nflags = 0;
3209 int cflags = 0;
3210
3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3212 ch_flags = le32_to_cpup(
3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3214 } else {
3215 ch_flags = le16_to_cpup(
3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3217 }
3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3219 continue;
3220
3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3222 cflags |= NET80211_CBW_FLAG_HT40;
3223
3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3225
3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3227 nvm_channels[ch_idx],
3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3230 nflags, bands, cflags);
3231 }
3232
3233 band_5:
3234 /* 5Ghz */
3235 if (!data->sku_cap_band_52GHz_enable)
3236 goto band_6;
3237
3238
3239 memset(bands, 0, sizeof(bands));
3240 setbit(bands, IEEE80211_MODE_11A);
3241 setbit(bands, IEEE80211_MODE_11NA);
3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3243
3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3246 ch_idx++) {
3247 uint32_t nflags = 0;
3248 int cflags = 0;
3249
3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3251 ch_flags = le32_to_cpup(
3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3253 else
3254 ch_flags = le16_to_cpup(
3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3256
3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3258 continue;
3259
3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3261 cflags |= NET80211_CBW_FLAG_HT40;
3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3263 cflags |= NET80211_CBW_FLAG_VHT80;
3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3265 cflags |= NET80211_CBW_FLAG_VHT160;
3266
3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3268
3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3270 nvm_channels[ch_idx],
3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3273 nflags, bands, cflags);
3274 }
3275 band_6:
3276 /* 6GHz one day ... */
3277 return;
3278 }
3279
3280 static int
iwx_mimo_enabled(struct iwx_softc * sc)3281 iwx_mimo_enabled(struct iwx_softc *sc)
3282 {
3283
3284 return !sc->sc_nvm.sku_cap_mimo_disable;
3285 }
3286
3287 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3289 uint16_t ssn, uint16_t buf_size)
3290 {
3291 reorder_buf->head_sn = ssn;
3292 reorder_buf->num_stored = 0;
3293 reorder_buf->buf_size = buf_size;
3294 reorder_buf->last_amsdu = 0;
3295 reorder_buf->last_sub_index = 0;
3296 reorder_buf->removed = 0;
3297 reorder_buf->valid = 0;
3298 reorder_buf->consec_oldsn_drops = 0;
3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3300 reorder_buf->consec_oldsn_prev_drop = 0;
3301 }
3302
3303 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3305 {
3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3307
3308 reorder_buf->removed = 1;
3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3310 }
3311
3312 #define IWX_MAX_RX_BA_SESSIONS 16
3313
3314 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3316 {
3317 int i;
3318
3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3320 if (sc->sc_rxba_data[i].baid ==
3321 IWX_RX_REORDER_DATA_INVALID_BAID)
3322 continue;
3323 if (sc->sc_rxba_data[i].tid == tid)
3324 return &sc->sc_rxba_data[i];
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3333 uint8_t *baid)
3334 {
3335 struct iwx_rx_baid_cfg_cmd cmd;
3336 uint32_t new_baid = 0;
3337 int err;
3338
3339 IWX_ASSERT_LOCKED(sc);
3340
3341 memset(&cmd, 0, sizeof(cmd));
3342
3343 if (start) {
3344 cmd.action = IWX_RX_BAID_ACTION_ADD;
3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3346 cmd.alloc.tid = tid;
3347 cmd.alloc.ssn = htole16(ssn);
3348 cmd.alloc.win_size = htole16(winsize);
3349 } else {
3350 struct iwx_rxba_data *rxba;
3351
3352 rxba = iwx_find_rxba_data(sc, tid);
3353 if (rxba == NULL)
3354 return ENOENT;
3355 *baid = rxba->baid;
3356
3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3360 cmd.remove_v1.baid = rxba->baid;
3361 } else {
3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3363 cmd.remove.tid = tid;
3364 }
3365 }
3366
3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3369 if (err)
3370 return err;
3371
3372 if (start) {
3373 if (new_baid >= nitems(sc->sc_rxba_data))
3374 return ERANGE;
3375 *baid = new_baid;
3376 }
3377
3378 return 0;
3379 }
3380
3381 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3384 {
3385 int err;
3386 struct iwx_rxba_data *rxba = NULL;
3387 uint8_t baid = 0;
3388
3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3390 return;
3391 }
3392
3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3395 timeout_val, start, &baid);
3396 } else {
3397 panic("sta_rx_agg unsupported hw");
3398 }
3399 if (err) {
3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3401 return;
3402 } else
3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3404
3405 rxba = &sc->sc_rxba_data[baid];
3406
3407 /* Deaggregation is done in hardware. */
3408 if (start) {
3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3410 return;
3411 }
3412 rxba->sta_id = IWX_STATION_ID;
3413 rxba->tid = tid;
3414 rxba->baid = baid;
3415 rxba->timeout = timeout_val;
3416 getmicrouptime(&rxba->last_rx);
3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3418 winsize);
3419 if (timeout_val != 0) {
3420 DPRINTF(("%s: timeout_val != 0\n", __func__));
3421 return;
3422 }
3423 } else
3424 iwx_clear_reorder_buffer(sc, rxba);
3425
3426 if (start) {
3427 sc->sc_rx_ba_sessions++;
3428 } else if (sc->sc_rx_ba_sessions > 0)
3429 sc->sc_rx_ba_sessions--;
3430 }
3431
3432 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3433 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3434 uint8_t tid)
3435 {
3436 int err, qid;
3437
3438 qid = sc->aggqid[tid];
3439 if (qid == 0) {
3440 /* Firmware should pick the next unused Tx queue. */
3441 qid = fls(sc->qenablemsk);
3442 }
3443
3444 DPRINTF(("%s: qid=%i\n", __func__, qid));
3445
3446 /*
3447 * Simply enable the queue.
3448 * Firmware handles Tx Ba session setup and teardown.
3449 */
3450 if ((sc->qenablemsk & (1 << qid)) == 0) {
3451 if (!iwx_nic_lock(sc)) {
3452 return;
3453 }
3454 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3455 IWX_TX_RING_COUNT);
3456 iwx_nic_unlock(sc);
3457 if (err) {
3458 printf("%s: could not enable Tx queue %d "
3459 "(error %d)\n", DEVNAME(sc), qid, err);
3460 return;
3461 }
3462 }
3463 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3464 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3465 sc->aggqid[tid] = qid;
3466 }
3467
3468 static void
iwx_ba_rx_task(void * arg,int npending __unused)3469 iwx_ba_rx_task(void *arg, int npending __unused)
3470 {
3471 struct iwx_softc *sc = arg;
3472 struct ieee80211com *ic = &sc->sc_ic;
3473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3474 struct ieee80211_node *ni = vap->iv_bss;
3475 int tid;
3476
3477 IWX_LOCK(sc);
3478 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3479 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3480 break;
3481 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3482 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3483 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3484 ba->ba_flags));
3485 if (ba->ba_flags == IWX_BA_DONE) {
3486 DPRINTF(("%s: ampdu for tid %i already added\n",
3487 __func__, tid));
3488 break;
3489 }
3490
3491 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3492 tid));
3493 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3494 ba->ba_winsize, ba->ba_timeout_val, 1);
3495 sc->ba_rx.start_tidmask &= ~(1 << tid);
3496 ba->ba_flags = IWX_BA_DONE;
3497 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3498 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3499 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3500 }
3501 }
3502 IWX_UNLOCK(sc);
3503 }
3504
3505 static void
iwx_ba_tx_task(void * arg,int npending __unused)3506 iwx_ba_tx_task(void *arg, int npending __unused)
3507 {
3508 struct iwx_softc *sc = arg;
3509 struct ieee80211com *ic = &sc->sc_ic;
3510 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3511 struct ieee80211_node *ni = vap->iv_bss;
3512 int tid;
3513
3514 IWX_LOCK(sc);
3515 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3516 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3517 break;
3518 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3519 DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
3520 tid));
3521 iwx_sta_tx_agg_start(sc, ni, tid);
3522 sc->ba_tx.start_tidmask &= ~(1 << tid);
3523 sc->sc_flags |= IWX_FLAG_AMPDUTX;
3524 }
3525 }
3526
3527 IWX_UNLOCK(sc);
3528 }
3529
3530 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3531 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3532 {
3533 uint32_t mac_addr0, mac_addr1;
3534
3535 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3536
3537 if (!iwx_nic_lock(sc))
3538 return;
3539
3540 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3541 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3542
3543 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3544
3545 /* If OEM fused a valid address, use it instead of the one in OTP. */
3546 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3547 iwx_nic_unlock(sc);
3548 return;
3549 }
3550
3551 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3552 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3553
3554 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3555
3556 iwx_nic_unlock(sc);
3557 }
3558
3559 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3560 iwx_is_valid_mac_addr(const uint8_t *addr)
3561 {
3562 static const uint8_t reserved_mac[] = {
3563 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3564 };
3565
3566 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3567 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3568 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3569 !ETHER_IS_MULTICAST(addr));
3570 }
3571
3572 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3573 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3574 {
3575 const uint8_t *hw_addr;
3576
3577 hw_addr = (const uint8_t *)&mac_addr0;
3578 dest[0] = hw_addr[3];
3579 dest[1] = hw_addr[2];
3580 dest[2] = hw_addr[1];
3581 dest[3] = hw_addr[0];
3582
3583 hw_addr = (const uint8_t *)&mac_addr1;
3584 dest[4] = hw_addr[1];
3585 dest[5] = hw_addr[0];
3586 }
3587
3588 static int
iwx_nvm_get(struct iwx_softc * sc)3589 iwx_nvm_get(struct iwx_softc *sc)
3590 {
3591 struct iwx_nvm_get_info cmd = {};
3592 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3593 struct iwx_host_cmd hcmd = {
3594 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3595 .data = { &cmd, },
3596 .len = { sizeof(cmd) },
3597 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3598 IWX_NVM_GET_INFO)
3599 };
3600 int err = 0;
3601 uint32_t mac_flags;
3602 /*
3603 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3604 * in v3, except for the channel profile part of the
3605 * regulatory. So we can just access the new struct, with the
3606 * exception of the latter.
3607 */
3608 struct iwx_nvm_get_info_rsp *rsp;
3609 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3610 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3611 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3612
3613 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3614 err = iwx_send_cmd(sc, &hcmd);
3615 if (err) {
3616 printf("%s: failed to send cmd (error %d)", __func__, err);
3617 return err;
3618 }
3619
3620 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3621 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3622 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3623 printf("%s: resp_len=%zu\n", __func__, resp_len);
3624 err = EIO;
3625 goto out;
3626 }
3627
3628 memset(nvm, 0, sizeof(*nvm));
3629
3630 iwx_set_mac_addr_from_csr(sc, nvm);
3631 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3632 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3633 err = EINVAL;
3634 goto out;
3635 }
3636
3637 rsp = (void *)hcmd.resp_pkt->data;
3638
3639 /* Initialize general data */
3640 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3641 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3642
3643 /* Initialize MAC sku data */
3644 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3645 nvm->sku_cap_11ac_enable =
3646 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3647 nvm->sku_cap_11n_enable =
3648 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3649 nvm->sku_cap_11ax_enable =
3650 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3651 nvm->sku_cap_band_24GHz_enable =
3652 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3653 nvm->sku_cap_band_52GHz_enable =
3654 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3655 nvm->sku_cap_mimo_disable =
3656 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3657
3658 /* Initialize PHY sku data */
3659 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3660 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3661
3662 if (le32toh(rsp->regulatory.lar_enabled) &&
3663 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3664 nvm->lar_enabled = 1;
3665 }
3666
3667 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3668 if (v4) {
3669 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3670 } else {
3671 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3672 }
3673 out:
3674 iwx_free_resp(sc, &hcmd);
3675 return err;
3676 }
3677
3678 static int
iwx_load_firmware(struct iwx_softc * sc)3679 iwx_load_firmware(struct iwx_softc *sc)
3680 {
3681 struct iwx_fw_sects *fws;
3682 int err;
3683
3684 IWX_ASSERT_LOCKED(sc)
3685
3686 sc->sc_uc.uc_intr = 0;
3687 sc->sc_uc.uc_ok = 0;
3688
3689 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3690 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3691 err = iwx_ctxt_info_gen3_init(sc, fws);
3692 else
3693 err = iwx_ctxt_info_init(sc, fws);
3694 if (err) {
3695 printf("%s: could not init context info\n", DEVNAME(sc));
3696 return err;
3697 }
3698
3699 /* wait for the firmware to load */
3700 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3701 if (err || !sc->sc_uc.uc_ok) {
3702 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3703 iwx_ctxt_info_free_paging(sc);
3704 }
3705
3706 iwx_dma_contig_free(&sc->iml_dma);
3707 iwx_ctxt_info_free_fw_img(sc);
3708
3709 if (!sc->sc_uc.uc_ok)
3710 return EINVAL;
3711
3712 return err;
3713 }
3714
3715 static int
iwx_start_fw(struct iwx_softc * sc)3716 iwx_start_fw(struct iwx_softc *sc)
3717 {
3718 int err;
3719
3720 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3721
3722 iwx_disable_interrupts(sc);
3723
3724 /* make sure rfkill handshake bits are cleared */
3725 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3726 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3727 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3728
3729 /* clear (again), then enable firmware load interrupt */
3730 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3731
3732 err = iwx_nic_init(sc);
3733 if (err) {
3734 printf("%s: unable to init nic\n", DEVNAME(sc));
3735 return err;
3736 }
3737
3738 iwx_enable_fwload_interrupt(sc);
3739
3740 return iwx_load_firmware(sc);
3741 }
3742
3743 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3744 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3745 size_t len)
3746 {
3747 const struct iwx_ucode_tlv *tlv;
3748 uint32_t sha1 = 0;
3749 uint16_t mac_type = 0, rf_id = 0;
3750 uint8_t *pnvm_data = NULL, *tmp;
3751 int hw_match = 0;
3752 uint32_t size = 0;
3753 int err;
3754
3755 while (len >= sizeof(*tlv)) {
3756 uint32_t tlv_len, tlv_type;
3757
3758 len -= sizeof(*tlv);
3759 tlv = (const void *)data;
3760
3761 tlv_len = le32toh(tlv->length);
3762 tlv_type = le32toh(tlv->type);
3763
3764 if (len < tlv_len) {
3765 printf("%s: invalid TLV len: %zd/%u\n",
3766 DEVNAME(sc), len, tlv_len);
3767 err = EINVAL;
3768 goto out;
3769 }
3770
3771 data += sizeof(*tlv);
3772
3773 switch (tlv_type) {
3774 case IWX_UCODE_TLV_PNVM_VERSION:
3775 if (tlv_len < sizeof(uint32_t))
3776 break;
3777
3778 sha1 = le32_to_cpup((const uint32_t *)data);
3779 break;
3780 case IWX_UCODE_TLV_HW_TYPE:
3781 if (tlv_len < 2 * sizeof(uint16_t))
3782 break;
3783
3784 if (hw_match)
3785 break;
3786
3787 mac_type = le16_to_cpup((const uint16_t *)data);
3788 rf_id = le16_to_cpup((const uint16_t *)(data +
3789 sizeof(uint16_t)));
3790
3791 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3792 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3793 hw_match = 1;
3794 break;
3795 case IWX_UCODE_TLV_SEC_RT: {
3796 const struct iwx_pnvm_section *section;
3797 uint32_t data_len;
3798
3799 section = (const void *)data;
3800 data_len = tlv_len - sizeof(*section);
3801
3802 /* TODO: remove, this is a deprecated separator */
3803 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3804 break;
3805
3806 tmp = malloc(size + data_len, M_DEVBUF,
3807 M_WAITOK | M_ZERO);
3808 if (tmp == NULL) {
3809 err = ENOMEM;
3810 goto out;
3811 }
3812 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3813 memcpy(tmp, pnvm_data, size);
3814 memcpy(tmp + size, section->data, data_len);
3815 free(pnvm_data, M_DEVBUF);
3816 pnvm_data = tmp;
3817 size += data_len;
3818 break;
3819 }
3820 case IWX_UCODE_TLV_PNVM_SKU:
3821 /* New PNVM section started, stop parsing. */
3822 goto done;
3823 default:
3824 break;
3825 }
3826
3827 if (roundup(tlv_len, 4) > len)
3828 break;
3829 len -= roundup(tlv_len, 4);
3830 data += roundup(tlv_len, 4);
3831 }
3832 done:
3833 if (!hw_match || size == 0) {
3834 err = ENOENT;
3835 goto out;
3836 }
3837
3838 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3839 if (err) {
3840 printf("%s: could not allocate DMA memory for PNVM\n",
3841 DEVNAME(sc));
3842 err = ENOMEM;
3843 goto out;
3844 }
3845 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3846 iwx_ctxt_info_gen3_set_pnvm(sc);
3847 sc->sc_pnvm_ver = sha1;
3848 out:
3849 free(pnvm_data, M_DEVBUF);
3850 return err;
3851 }
3852
3853 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3854 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3855 {
3856 const struct iwx_ucode_tlv *tlv;
3857
3858 while (len >= sizeof(*tlv)) {
3859 uint32_t tlv_len, tlv_type;
3860
3861 len -= sizeof(*tlv);
3862 tlv = (const void *)data;
3863
3864 tlv_len = le32toh(tlv->length);
3865 tlv_type = le32toh(tlv->type);
3866
3867 if (len < tlv_len || roundup(tlv_len, 4) > len)
3868 return EINVAL;
3869
3870 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3871 const struct iwx_sku_id *sku_id =
3872 (const void *)(data + sizeof(*tlv));
3873
3874 data += sizeof(*tlv) + roundup(tlv_len, 4);
3875 len -= roundup(tlv_len, 4);
3876
3877 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3878 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3879 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3880 iwx_pnvm_handle_section(sc, data, len) == 0)
3881 return 0;
3882 } else {
3883 data += sizeof(*tlv) + roundup(tlv_len, 4);
3884 len -= roundup(tlv_len, 4);
3885 }
3886 }
3887
3888 return ENOENT;
3889 }
3890
3891 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3892 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3893 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3894 {
3895 struct iwx_prph_scratch *prph_scratch;
3896 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3897
3898 prph_scratch = sc->prph_scratch_dma.vaddr;
3899 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3900
3901 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3902 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3903
3904 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3905 }
3906
3907 /*
3908 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3909 * This data apparently contains regulatory information and affects device
3910 * channel configuration.
3911 * The SKU of AX210 devices tells us which PNVM file section is needed.
3912 * Pre-AX210 devices store NVM data onboard.
3913 */
3914 static int
iwx_load_pnvm(struct iwx_softc * sc)3915 iwx_load_pnvm(struct iwx_softc *sc)
3916 {
3917 const int wait_flags = IWX_PNVM_COMPLETE;
3918 int err = 0;
3919 const struct firmware *pnvm;
3920
3921 if (sc->sc_sku_id[0] == 0 &&
3922 sc->sc_sku_id[1] == 0 &&
3923 sc->sc_sku_id[2] == 0)
3924 return 0;
3925
3926 if (sc->sc_pnvm_name) {
3927 if (sc->pnvm_dma.vaddr == NULL) {
3928 IWX_UNLOCK(sc);
3929 pnvm = firmware_get(sc->sc_pnvm_name);
3930 if (pnvm == NULL) {
3931 printf("%s: could not read %s (error %d)\n",
3932 DEVNAME(sc), sc->sc_pnvm_name, err);
3933 IWX_LOCK(sc);
3934 return EINVAL;
3935 }
3936 sc->sc_pnvm = pnvm;
3937
3938 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3939 IWX_LOCK(sc);
3940 if (err && err != ENOENT) {
3941 return EINVAL;
3942 }
3943 } else
3944 iwx_ctxt_info_gen3_set_pnvm(sc);
3945 }
3946
3947 if (!iwx_nic_lock(sc)) {
3948 return EBUSY;
3949 }
3950
3951 /*
3952 * If we don't have a platform NVM file simply ask firmware
3953 * to proceed without it.
3954 */
3955
3956 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3957 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3958
3959 /* Wait for the pnvm complete notification from firmware. */
3960 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3961 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3962 if (err)
3963 break;
3964 }
3965
3966 iwx_nic_unlock(sc);
3967
3968 return err;
3969 }
3970
3971 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)3972 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3973 {
3974 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3975 .valid = htole32(valid_tx_ant),
3976 };
3977
3978 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3979 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3980 }
3981
3982 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)3983 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3984 {
3985 struct iwx_phy_cfg_cmd phy_cfg_cmd;
3986
3987 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3988 phy_cfg_cmd.calib_control.event_trigger =
3989 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3990 phy_cfg_cmd.calib_control.flow_trigger =
3991 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3992
3993 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3994 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3995 }
3996
3997 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)3998 iwx_send_dqa_cmd(struct iwx_softc *sc)
3999 {
4000 struct iwx_dqa_enable_cmd dqa_cmd = {
4001 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4002 };
4003 uint32_t cmd_id;
4004
4005 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4006 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4007 }
4008
4009 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4010 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4011 {
4012 int err;
4013
4014 IWX_UNLOCK(sc);
4015 err = iwx_read_firmware(sc);
4016 IWX_LOCK(sc);
4017 if (err)
4018 return err;
4019
4020 err = iwx_start_fw(sc);
4021 if (err)
4022 return err;
4023
4024 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4025 err = iwx_load_pnvm(sc);
4026 if (err)
4027 return err;
4028 }
4029
4030 iwx_post_alive(sc);
4031
4032 return 0;
4033 }
4034
4035 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4036 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4037 {
4038 const int wait_flags = IWX_INIT_COMPLETE;
4039 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4040 struct iwx_init_extended_cfg_cmd init_cfg = {
4041 .init_flags = htole32(IWX_INIT_NVM),
4042 };
4043
4044 int err;
4045
4046 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4047 printf("%s: radio is disabled by hardware switch\n",
4048 DEVNAME(sc));
4049 return EPERM;
4050 }
4051
4052 sc->sc_init_complete = 0;
4053 err = iwx_load_ucode_wait_alive(sc);
4054 if (err) {
4055 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4056 "%s: failed to load init firmware\n", DEVNAME(sc));
4057 return err;
4058 } else {
4059 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4060 "%s: successfully loaded init firmware\n", __func__);
4061 }
4062
4063 /*
4064 * Send init config command to mark that we are sending NVM
4065 * access commands
4066 */
4067 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4068 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4069 if (err) {
4070 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4071 err);
4072 return err;
4073 }
4074
4075 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4076 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4077 if (err) {
4078 return err;
4079 }
4080
4081 /* Wait for the init complete notification from the firmware. */
4082 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4083 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4084 if (err) {
4085 DPRINTF(("%s: will return err=%d\n", __func__, err));
4086 return err;
4087 } else {
4088 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4089 __func__));
4090 }
4091 }
4092
4093 if (readnvm) {
4094 err = iwx_nvm_get(sc);
4095 DPRINTF(("%s: err=%d\n", __func__, err));
4096 if (err) {
4097 printf("%s: failed to read nvm (error %d)\n",
4098 DEVNAME(sc), err);
4099 return err;
4100 } else {
4101 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4102 }
4103 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4104 }
4105 return 0;
4106 }
4107
4108 static int
iwx_config_ltr(struct iwx_softc * sc)4109 iwx_config_ltr(struct iwx_softc *sc)
4110 {
4111 struct iwx_ltr_config_cmd cmd = {
4112 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4113 };
4114
4115 if (!sc->sc_ltr_enabled)
4116 return 0;
4117
4118 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4119 }
4120
4121 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4122 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4123 bus_dma_segment_t *seg)
4124 {
4125 struct iwx_rx_data *data = &ring->data[idx];
4126
4127 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4128 struct iwx_rx_transfer_desc *desc = ring->desc;
4129 desc[idx].rbid = htole16(idx & 0xffff);
4130 desc[idx].addr = htole64((*seg).ds_addr);
4131 bus_dmamap_sync(ring->data_dmat, data->map,
4132 BUS_DMASYNC_PREWRITE);
4133 } else {
4134 ((uint64_t *)ring->desc)[idx] =
4135 htole64((*seg).ds_addr);
4136 bus_dmamap_sync(ring->data_dmat, data->map,
4137 BUS_DMASYNC_PREWRITE);
4138 }
4139 }
4140
4141 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4142 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4143 {
4144 struct iwx_rx_ring *ring = &sc->rxq;
4145 struct iwx_rx_data *data = &ring->data[idx];
4146 struct mbuf *m;
4147 int err;
4148 int fatal = 0;
4149 bus_dma_segment_t seg;
4150 int nsegs;
4151
4152 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4153 if (m == NULL)
4154 return ENOBUFS;
4155
4156 if (data->m != NULL) {
4157 bus_dmamap_unload(ring->data_dmat, data->map);
4158 fatal = 1;
4159 }
4160
4161 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4162 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4163 &nsegs, BUS_DMA_NOWAIT);
4164 if (err) {
4165 /* XXX */
4166 if (fatal)
4167 panic("could not load RX mbuf");
4168 m_freem(m);
4169 return err;
4170 }
4171 data->m = m;
4172 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4173
4174 /* Update RX descriptor. */
4175 iwx_update_rx_desc(sc, ring, idx, &seg);
4176 return 0;
4177 }
4178
4179 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4180 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4181 struct iwx_rx_mpdu_desc *desc)
4182 {
4183 int energy_a, energy_b;
4184
4185 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4186 energy_a = desc->v3.energy_a;
4187 energy_b = desc->v3.energy_b;
4188 } else {
4189 energy_a = desc->v1.energy_a;
4190 energy_b = desc->v1.energy_b;
4191 }
4192 energy_a = energy_a ? -energy_a : -256;
4193 energy_b = energy_b ? -energy_b : -256;
4194 return MAX(energy_a, energy_b);
4195 }
4196
4197 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4198 iwx_rxmq_get_chains(struct iwx_softc *sc,
4199 struct iwx_rx_mpdu_desc *desc)
4200 {
4201
4202 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4203 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4204 IWX_RATE_MCS_ANT_POS);
4205 else
4206 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4207 IWX_RATE_MCS_ANT_POS);
4208 }
4209
4210 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4211 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4212 struct iwx_rx_data *data)
4213 {
4214 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4215 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4216 int qid = cmd_hdr->qid;
4217 struct iwx_tx_ring *ring = &sc->txq[qid];
4218
4219 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4220 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4221 }
4222
4223 /*
4224 * Retrieve the average noise (in dBm) among receivers.
4225 */
4226 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4227 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4228 {
4229 int i, total, nbant, noise;
4230
4231 total = nbant = noise = 0;
4232 for (i = 0; i < 3; i++) {
4233 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4234 if (noise) {
4235 total += noise;
4236 nbant++;
4237 }
4238 }
4239
4240 /* There should be at least one antenna but check anyway. */
4241 return (nbant == 0) ? -127 : (total / nbant) - 107;
4242 }
4243
4244 #if 0
4245 int
4246 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4247 struct ieee80211_rxinfo *rxi)
4248 {
4249 struct ieee80211com *ic = &sc->sc_ic;
4250 struct ieee80211_key *k;
4251 struct ieee80211_frame *wh;
4252 uint64_t pn, *prsc;
4253 uint8_t *ivp;
4254 uint8_t tid;
4255 int hdrlen, hasqos;
4256
4257 wh = mtod(m, struct ieee80211_frame *);
4258 hdrlen = ieee80211_get_hdrlen(wh);
4259 ivp = (uint8_t *)wh + hdrlen;
4260
4261 /* find key for decryption */
4262 k = ieee80211_get_rxkey(ic, m, ni);
4263 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4264 return 1;
4265
4266 /* Check that ExtIV bit is be set. */
4267 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4268 return 1;
4269
4270 hasqos = ieee80211_has_qos(wh);
4271 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4272 prsc = &k->k_rsc[tid];
4273
4274 /* Extract the 48-bit PN from the CCMP header. */
4275 pn = (uint64_t)ivp[0] |
4276 (uint64_t)ivp[1] << 8 |
4277 (uint64_t)ivp[4] << 16 |
4278 (uint64_t)ivp[5] << 24 |
4279 (uint64_t)ivp[6] << 32 |
4280 (uint64_t)ivp[7] << 40;
4281 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4282 if (pn < *prsc) {
4283 ic->ic_stats.is_ccmp_replays++;
4284 return 1;
4285 }
4286 } else if (pn <= *prsc) {
4287 ic->ic_stats.is_ccmp_replays++;
4288 return 1;
4289 }
4290 /* Last seen packet number is updated in ieee80211_inputm(). */
4291
4292 /*
4293 * Some firmware versions strip the MIC, and some don't. It is not
4294 * clear which of the capability flags could tell us what to expect.
4295 * For now, keep things simple and just leave the MIC in place if
4296 * it is present.
4297 *
4298 * The IV will be stripped by ieee80211_inputm().
4299 */
4300 return 0;
4301 }
4302 #endif
4303
4304 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4305 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4306 {
4307 struct ieee80211_frame *wh;
4308 int ret = 0;
4309 uint8_t type, subtype;
4310
4311 wh = mtod(m, struct ieee80211_frame *);
4312
4313 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4314 if (type == IEEE80211_FC0_TYPE_CTL) {
4315 return 0;
4316 }
4317
4318 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4319 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4320 return 0;
4321 }
4322
4323
4324 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4325 IEEE80211_FC0_TYPE_CTL)
4326 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4327 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4328 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4329 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4330 ret = 1;
4331 goto out;
4332 }
4333 /* Check whether decryption was successful or not. */
4334 if ((rx_pkt_status &
4335 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4336 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4337 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4338 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4339 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4340 ret = 1;
4341 goto out;
4342 }
4343 }
4344 out:
4345 return ret;
4346 }
4347
4348 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4349 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4350 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4351 uint32_t device_timestamp, uint8_t rssi)
4352 {
4353 struct ieee80211com *ic = &sc->sc_ic;
4354 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4355 struct ieee80211_frame *wh;
4356 struct ieee80211_node *ni;
4357
4358 /*
4359 * We need to turn the hardware provided channel index into a channel
4360 * and then find it in our ic_channels array
4361 */
4362 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4363 /*
4364 * OpenBSD points this at the ibss chan, which it defaults to
4365 * channel 1 and then never touches again. Skip a step.
4366 */
4367 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4368 chanidx = 1;
4369 }
4370
4371 int channel = chanidx;
4372 for (int i = 0; i < ic->ic_nchans; i++) {
4373 if (ic->ic_channels[i].ic_ieee == channel) {
4374 chanidx = i;
4375 }
4376 }
4377 ic->ic_curchan = &ic->ic_channels[chanidx];
4378
4379 wh = mtod(m, struct ieee80211_frame *);
4380 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4381
4382 #if 0 /* XXX hw decrypt */
4383 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4384 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4385 m_freem(m);
4386 ieee80211_release_node(ic, ni);
4387 return;
4388 }
4389 #endif
4390 if (ieee80211_radiotap_active_vap(vap)) {
4391 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4392 uint16_t chan_flags;
4393 int have_legacy_rate = 1;
4394 uint8_t mcs, rate;
4395
4396 tap->wr_flags = 0;
4397 if (is_shortpre)
4398 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4399 tap->wr_chan_freq =
4400 htole16(ic->ic_channels[chanidx].ic_freq);
4401 chan_flags = ic->ic_channels[chanidx].ic_flags;
4402 #if 0
4403 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4404 ic->ic_curmode != IEEE80211_MODE_11AC) {
4405 chan_flags &= ~IEEE80211_CHAN_HT;
4406 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4407 }
4408 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4409 chan_flags &= ~IEEE80211_CHAN_VHT;
4410 #else
4411 chan_flags &= ~IEEE80211_CHAN_HT;
4412 #endif
4413 tap->wr_chan_flags = htole16(chan_flags);
4414 tap->wr_dbm_antsignal = rssi;
4415 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4416 tap->wr_tsft = device_timestamp;
4417
4418 if (sc->sc_rate_n_flags_version >= 2) {
4419 uint32_t mod_type = (rate_n_flags &
4420 IWX_RATE_MCS_MOD_TYPE_MSK);
4421 const struct ieee80211_rateset *rs = NULL;
4422 uint32_t ridx;
4423 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4424 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4425 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4426 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4427 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4428 rs = &ieee80211_std_rateset_11b;
4429 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4430 rs = &ieee80211_std_rateset_11a;
4431 if (rs && ridx < rs->rs_nrates) {
4432 rate = (rs->rs_rates[ridx] &
4433 IEEE80211_RATE_VAL);
4434 } else
4435 rate = 0;
4436 } else {
4437 have_legacy_rate = ((rate_n_flags &
4438 (IWX_RATE_MCS_HT_MSK_V1 |
4439 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4440 mcs = (rate_n_flags &
4441 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4442 IWX_RATE_HT_MCS_NSS_MSK_V1));
4443 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4444 }
4445 if (!have_legacy_rate) {
4446 tap->wr_rate = (0x80 | mcs);
4447 } else {
4448 switch (rate) {
4449 /* CCK rates. */
4450 case 10: tap->wr_rate = 2; break;
4451 case 20: tap->wr_rate = 4; break;
4452 case 55: tap->wr_rate = 11; break;
4453 case 110: tap->wr_rate = 22; break;
4454 /* OFDM rates. */
4455 case 0xd: tap->wr_rate = 12; break;
4456 case 0xf: tap->wr_rate = 18; break;
4457 case 0x5: tap->wr_rate = 24; break;
4458 case 0x7: tap->wr_rate = 36; break;
4459 case 0x9: tap->wr_rate = 48; break;
4460 case 0xb: tap->wr_rate = 72; break;
4461 case 0x1: tap->wr_rate = 96; break;
4462 case 0x3: tap->wr_rate = 108; break;
4463 /* Unknown rate: should not happen. */
4464 default: tap->wr_rate = 0;
4465 }
4466 // XXX hack - this needs rebased with the new rate stuff anyway
4467 tap->wr_rate = rate;
4468 }
4469 }
4470
4471 IWX_UNLOCK(sc);
4472 if (ni == NULL) {
4473 if (ieee80211_input_mimo_all(ic, m) == -1)
4474 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4475 } else {
4476
4477 if (ieee80211_input_mimo(ni, m) == -1)
4478 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4479 ieee80211_free_node(ni);
4480 }
4481 IWX_LOCK(sc);
4482 }
4483
4484 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4485 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4486 size_t maxlen)
4487 {
4488 struct ieee80211com *ic = &sc->sc_ic;
4489 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4490 struct ieee80211_node *ni = vap->iv_bss;
4491 struct ieee80211_key *k;
4492 struct ieee80211_rx_stats rxs;
4493 struct iwx_rx_mpdu_desc *desc;
4494 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4495 int rssi;
4496 uint8_t chanidx;
4497 uint16_t phy_info;
4498 size_t desc_size;
4499 int pad = 0;
4500
4501 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4502 desc_size = sizeof(*desc);
4503 else
4504 desc_size = IWX_RX_DESC_SIZE_V1;
4505
4506 if (maxlen < desc_size) {
4507 m_freem(m);
4508 return; /* drop */
4509 }
4510
4511 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4512
4513 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4514 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4515 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4516 m_freem(m);
4517 return; /* drop */
4518 }
4519
4520 len = le16toh(desc->mpdu_len);
4521 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4522 /* Allow control frames in monitor mode. */
4523 if (len < sizeof(struct ieee80211_frame_cts)) {
4524 m_freem(m);
4525 return;
4526 }
4527
4528 } else if (len < sizeof(struct ieee80211_frame)) {
4529 m_freem(m);
4530 return;
4531 }
4532 if (len > maxlen - desc_size) {
4533 m_freem(m);
4534 return;
4535 }
4536
4537 // TODO: arithmetic on a pointer to void is a GNU extension
4538 m->m_data = (char *)pktdata + desc_size;
4539 m->m_pkthdr.len = m->m_len = len;
4540
4541 /* Account for padding following the frame header. */
4542 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4543 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4544 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4545 if (type == IEEE80211_FC0_TYPE_CTL) {
4546 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4547 case IEEE80211_FC0_SUBTYPE_CTS:
4548 hdrlen = sizeof(struct ieee80211_frame_cts);
4549 break;
4550 case IEEE80211_FC0_SUBTYPE_ACK:
4551 hdrlen = sizeof(struct ieee80211_frame_ack);
4552 break;
4553 default:
4554 hdrlen = sizeof(struct ieee80211_frame_min);
4555 break;
4556 }
4557 } else
4558 hdrlen = ieee80211_hdrsize(wh);
4559
4560 if ((le16toh(desc->status) &
4561 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4562 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4563 // CCMP header length
4564 hdrlen += 8;
4565 }
4566
4567 memmove(m->m_data + 2, m->m_data, hdrlen);
4568 m_adj(m, 2);
4569
4570 }
4571
4572 if ((le16toh(desc->status) &
4573 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4574 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4575 pad = 1;
4576 }
4577
4578 // /*
4579 // * Hardware de-aggregates A-MSDUs and copies the same MAC header
4580 // * in place for each subframe. But it leaves the 'A-MSDU present'
4581 // * bit set in the frame header. We need to clear this bit ourselves.
4582 // * (XXX This workaround is not required on AX200/AX201 devices that
4583 // * have been tested by me, but it's unclear when this problem was
4584 // * fixed in the hardware. It definitely affects the 9k generation.
4585 // * Leaving this in place for now since some 9k/AX200 hybrids seem
4586 // * to exist that we may eventually add support for.)
4587 // *
4588 // * And we must allow the same CCMP PN for subframes following the
4589 // * first subframe. Otherwise they would be discarded as replays.
4590 // */
4591 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4592 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4593 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4594 // uint8_t subframe_idx = (desc->amsdu_info &
4595 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4596 // if (subframe_idx > 0)
4597 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4598 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4599 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4600 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4601 // struct ieee80211_qosframe_addr4 *);
4602 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4603 // } else if (ieee80211_has_qos(wh) &&
4604 // m->m_len >= sizeof(struct ieee80211_qosframe)) {
4605 // struct ieee80211_qosframe *qwh = mtod(m,
4606 // struct ieee80211_qosframe *);
4607 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4608 // }
4609 }
4610
4611 /*
4612 * Verify decryption before duplicate detection. The latter uses
4613 * the TID supplied in QoS frame headers and this TID is implicitly
4614 * verified as part of the CCMP nonce.
4615 */
4616 k = ieee80211_crypto_get_txkey(ni, m);
4617 if (k != NULL &&
4618 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4619 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4620 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4621 m_freem(m);
4622 return;
4623 }
4624
4625 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4626 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4627 chanidx = desc->v3.channel;
4628 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4629 } else {
4630 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4631 chanidx = desc->v1.channel;
4632 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4633 }
4634
4635 phy_info = le16toh(desc->phy_info);
4636
4637 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4638 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4639 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4640
4641 memset(&rxs, 0, sizeof(rxs));
4642 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4643 rxs.r_flags |= IEEE80211_R_BAND;
4644 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4645 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4646
4647 rxs.c_ieee = chanidx;
4648 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4649 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4650 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4651 rxs.c_rx_tsf = device_timestamp;
4652 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4653 if (rxs.c_chain != 0)
4654 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4655
4656 /* rssi is in 1/2db units */
4657 rxs.c_rssi = rssi * 2;
4658 rxs.c_nf = sc->sc_noise;
4659
4660 if (pad) {
4661 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4662 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4663 }
4664
4665 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4666 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4667 return;
4668 }
4669
4670 ieee80211_add_rx_params(m, &rxs);
4671
4672 #if 0
4673 if (iwx_rx_reorder(sc, m, chanidx, desc,
4674 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4675 rate_n_flags, device_timestamp, &rxi, ml))
4676 return;
4677 #endif
4678
4679 if (pad) {
4680 #define TRIM 8
4681 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4682 hdrlen = ieee80211_hdrsize(wh);
4683 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4684 m_adj(m, TRIM);
4685 #undef TRIM
4686 }
4687
4688 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4689 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4690 rate_n_flags, device_timestamp, rssi);
4691 }
4692
4693 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4694 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4695 {
4696 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4697 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4698 int i;
4699
4700 /* First TB is never cleared - it is bidirectional DMA data. */
4701 for (i = 1; i < num_tbs; i++) {
4702 struct iwx_tfh_tb *tb = &desc->tbs[i];
4703 memset(tb, 0, sizeof(*tb));
4704 }
4705 desc->num_tbs = htole16(1);
4706
4707 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4708 BUS_DMASYNC_PREWRITE);
4709 }
4710
4711 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4712 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4713 struct iwx_tx_data *txd)
4714 {
4715 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4716 bus_dmamap_unload(ring->data_dmat, txd->map);
4717
4718 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4719 txd->m = NULL;
4720 txd->in = NULL;
4721 }
4722
4723 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4724 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4725 {
4726 struct iwx_tx_data *txd;
4727
4728 while (ring->tail_hw != idx) {
4729 txd = &ring->data[ring->tail];
4730 if (txd->m != NULL) {
4731 iwx_clear_tx_desc(sc, ring, ring->tail);
4732 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4733 iwx_txd_done(sc, ring, txd);
4734 ring->queued--;
4735 if (ring->queued < 0)
4736 panic("caught negative queue count");
4737 }
4738 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4739 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4740 }
4741 }
4742
4743 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4744 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4745 struct iwx_rx_data *data)
4746 {
4747 struct ieee80211com *ic = &sc->sc_ic;
4748 struct ifnet *ifp = IC2IFP(ic);
4749 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4750 int qid = cmd_hdr->qid, status, txfail;
4751 struct iwx_tx_ring *ring = &sc->txq[qid];
4752 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4753 uint32_t ssn;
4754 uint32_t len = iwx_rx_packet_len(pkt);
4755 int idx = cmd_hdr->idx;
4756 struct iwx_tx_data *txd = &ring->data[idx];
4757 struct mbuf *m = txd->m;
4758
4759 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4760
4761 /* Sanity checks. */
4762 if (sizeof(*tx_resp) > len)
4763 return;
4764 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4765 return;
4766 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4767 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4768 return;
4769
4770 sc->sc_tx_timer[qid] = 0;
4771
4772 if (tx_resp->frame_count > 1) /* A-MPDU */
4773 return;
4774
4775 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4776 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4777 status != IWX_TX_STATUS_DIRECT_DONE);
4778
4779 #ifdef __not_yet__
4780 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4781 ieee80211_tx_complete(&in->in_ni, m, txfail);
4782 #else
4783 if (txfail)
4784 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4785 else {
4786 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4787 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4788 if (m->m_flags & M_MCAST)
4789 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4790 }
4791 #endif
4792 /*
4793 * On hardware supported by iwx(4) the SSN counter corresponds
4794 * to a Tx ring index rather than a sequence number.
4795 * Frames up to this index (non-inclusive) can now be freed.
4796 */
4797 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4798 ssn = le32toh(ssn);
4799 if (ssn < sc->max_tfd_queue_size) {
4800 iwx_txq_advance(sc, ring, ssn);
4801 iwx_clear_oactive(sc, ring);
4802 }
4803 }
4804
4805 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4806 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4807 {
4808 if (ring->queued < iwx_lomark) {
4809 sc->qfullmsk &= ~(1 << ring->qid);
4810 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4811 /*
4812 * Well, we're in interrupt context, but then again
4813 * I guess net80211 does all sorts of stunts in
4814 * interrupt context, so maybe this is no biggie.
4815 */
4816 iwx_start(sc);
4817 }
4818 }
4819 }
4820
4821 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4822 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4823 {
4824 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4825 struct ieee80211com *ic = &sc->sc_ic;
4826 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4827 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4828 struct ieee80211_node *ni = &in->in_ni;
4829 struct iwx_tx_ring *ring;
4830 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4831 int qid;
4832
4833 // if (ic->ic_state != IEEE80211_S_RUN)
4834 // return;
4835
4836 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4837 return;
4838
4839 if (ba_res->sta_id != IWX_STATION_ID)
4840 return;
4841
4842 in = (void *)ni;
4843
4844 tfd_cnt = le16toh(ba_res->tfd_cnt);
4845 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4846 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4847 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4848 sizeof(ba_res->tfd[0]) * tfd_cnt))
4849 return;
4850
4851 for (i = 0; i < tfd_cnt; i++) {
4852 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4853 uint8_t tid;
4854
4855 tid = ba_tfd->tid;
4856 if (tid >= nitems(sc->aggqid))
4857 continue;
4858
4859 qid = sc->aggqid[tid];
4860 if (qid != htole16(ba_tfd->q_num))
4861 continue;
4862
4863 ring = &sc->txq[qid];
4864
4865 #if 0
4866 ba = &ni->ni_tx_ba[tid];
4867 if (ba->ba_state != IEEE80211_BA_AGREED)
4868 continue;
4869 #endif
4870 idx = le16toh(ba_tfd->tfd_index);
4871 sc->sc_tx_timer[qid] = 0;
4872 iwx_txq_advance(sc, ring, idx);
4873 iwx_clear_oactive(sc, ring);
4874 }
4875 }
4876
4877 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4878 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4879 struct iwx_rx_data *data)
4880 {
4881 struct ieee80211com *ic = &sc->sc_ic;
4882 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4883 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4884 uint32_t missed;
4885
4886 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4887 (vap->iv_state != IEEE80211_S_RUN))
4888 return;
4889
4890 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4891 BUS_DMASYNC_POSTREAD);
4892
4893 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4894 if (missed > vap->iv_bmissthreshold) {
4895 ieee80211_beacon_miss(ic);
4896 }
4897
4898 }
4899
4900 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4901 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4902 {
4903 struct iwx_binding_cmd cmd;
4904 struct ieee80211com *ic = &sc->sc_ic;
4905 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4906 struct iwx_vap *ivp = IWX_VAP(vap);
4907 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4908 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4909 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4910 uint32_t status;
4911
4912 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4913 panic("binding already added");
4914 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4915 panic("binding already removed");
4916
4917 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4918 return EINVAL;
4919
4920 memset(&cmd, 0, sizeof(cmd));
4921
4922 cmd.id_and_color
4923 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4924 cmd.action = htole32(action);
4925 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4926
4927 cmd.macs[0] = htole32(mac_id);
4928 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4929 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4930
4931 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4932 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4933 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4934 else
4935 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4936
4937 status = 0;
4938 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4939 &cmd, &status);
4940 if (err == 0 && status != 0)
4941 err = EIO;
4942
4943 return err;
4944 }
4945
4946 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4947 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4948 {
4949 int ctlchan = ieee80211_chan2ieee(ic, chan);
4950 int midpoint = chan->ic_vht_ch_freq1;
4951
4952 /*
4953 * The FW is expected to check the control channel position only
4954 * when in HT/VHT and the channel width is not 20MHz. Return
4955 * this value as the default one:
4956 */
4957 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4958
4959 switch (ctlchan - midpoint) {
4960 case -6:
4961 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4962 break;
4963 case -2:
4964 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4965 break;
4966 case 2:
4967 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4968 break;
4969 case 6:
4970 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4971 break;
4972 default:
4973 break;
4974 }
4975
4976 return pos;
4977 }
4978
4979 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)4980 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4981 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4982 uint8_t vht_chan_width, int cmdver)
4983 {
4984 struct ieee80211com *ic = &sc->sc_ic;
4985 struct iwx_phy_context_cmd_uhb cmd;
4986 uint8_t active_cnt, idle_cnt;
4987 struct ieee80211_channel *chan = ctxt->channel;
4988
4989 memset(&cmd, 0, sizeof(cmd));
4990 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4991 ctxt->color));
4992 cmd.action = htole32(action);
4993
4994 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
4995 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4996 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4997 else
4998 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4999
5000 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5001 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5002 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5003
5004 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5005 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5006 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5007 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5008 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5009 if (IEEE80211_IS_CHAN_HT40D(chan))
5010 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5011 else
5012 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5013 } else {
5014 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5015 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5016 }
5017
5018 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5019 IWX_RLC_CONFIG_CMD) != 2) {
5020 idle_cnt = chains_static;
5021 active_cnt = chains_dynamic;
5022 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5023 IWX_PHY_RX_CHAIN_VALID_POS);
5024 cmd.rxchain_info |= htole32(idle_cnt <<
5025 IWX_PHY_RX_CHAIN_CNT_POS);
5026 cmd.rxchain_info |= htole32(active_cnt <<
5027 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5028 }
5029
5030 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5031 }
5032
5033 #if 0
5034 int
5035 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5036 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5037 uint8_t vht_chan_width, int cmdver)
5038 {
5039 struct ieee80211com *ic = &sc->sc_ic;
5040 struct iwx_phy_context_cmd cmd;
5041 uint8_t active_cnt, idle_cnt;
5042 struct ieee80211_channel *chan = ctxt->channel;
5043
5044 memset(&cmd, 0, sizeof(cmd));
5045 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5046 ctxt->color));
5047 cmd.action = htole32(action);
5048
5049 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5050 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5051 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5052 else
5053 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5054
5055 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5056 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5057 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5058 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5059 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5060 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5061 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5062 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5063 /* secondary chan above -> control chan below */
5064 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5065 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5066 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5067 /* secondary chan below -> control chan above */
5068 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5069 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5070 } else {
5071 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5072 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5073 }
5074 } else {
5075 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5076 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5077 }
5078
5079 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5080 IWX_RLC_CONFIG_CMD) != 2) {
5081 idle_cnt = chains_static;
5082 active_cnt = chains_dynamic;
5083 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5084 IWX_PHY_RX_CHAIN_VALID_POS);
5085 cmd.rxchain_info |= htole32(idle_cnt <<
5086 IWX_PHY_RX_CHAIN_CNT_POS);
5087 cmd.rxchain_info |= htole32(active_cnt <<
5088 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5089 }
5090
5091 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5092 }
5093 #endif
5094
5095 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5096 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5097 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5098 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5099 {
5100 int cmdver;
5101
5102 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5103 if (cmdver != 3 && cmdver != 4) {
5104 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5105 DEVNAME(sc));
5106 return ENOTSUP;
5107 }
5108
5109 /*
5110 * Intel increased the size of the fw_channel_info struct and neglected
5111 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5112 * member in the middle.
5113 * To keep things simple we use a separate function to handle the larger
5114 * variant of the phy context command.
5115 */
5116 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5117 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5118 chains_dynamic, action, sco, vht_chan_width, cmdver);
5119 } else
5120 panic("Unsupported old hardware contact thj@");
5121
5122 #if 0
5123 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5124 action, sco, vht_chan_width, cmdver);
5125 #endif
5126 }
5127
5128 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5129 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5130 {
5131 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5132 struct iwx_tfh_tfd *desc;
5133 struct iwx_tx_data *txdata;
5134 struct iwx_device_cmd *cmd;
5135 struct mbuf *m;
5136 bus_addr_t paddr;
5137 uint64_t addr;
5138 int err = 0, i, paylen, off/*, s*/;
5139 int idx, code, async, group_id;
5140 size_t hdrlen, datasz;
5141 uint8_t *data;
5142 int generation = sc->sc_generation;
5143 bus_dma_segment_t seg[10];
5144 int nsegs;
5145
5146 code = hcmd->id;
5147 async = hcmd->flags & IWX_CMD_ASYNC;
5148 idx = ring->cur;
5149
5150 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5151 paylen += hcmd->len[i];
5152 }
5153
5154 /* If this command waits for a response, allocate response buffer. */
5155 hcmd->resp_pkt = NULL;
5156 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5157 uint8_t *resp_buf;
5158 KASSERT(!async, ("async command want response"));
5159 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5160 ("wrong pkt len 1"));
5161 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5162 ("wrong pkt len 2"));
5163 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5164 return ENOSPC;
5165 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5166 M_NOWAIT | M_ZERO);
5167 if (resp_buf == NULL)
5168 return ENOMEM;
5169 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5170 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5171 } else {
5172 sc->sc_cmd_resp_pkt[idx] = NULL;
5173 }
5174
5175 desc = &ring->desc[idx];
5176 txdata = &ring->data[idx];
5177
5178 /*
5179 * XXX Intel inside (tm)
5180 * Firmware API versions >= 50 reject old-style commands in
5181 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5182 * that such commands were in the LONG_GROUP instead in order
5183 * for firmware to accept them.
5184 */
5185 if (iwx_cmd_groupid(code) == 0) {
5186 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5187 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5188 } else
5189 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5190
5191 group_id = iwx_cmd_groupid(code);
5192
5193 hdrlen = sizeof(cmd->hdr_wide);
5194 datasz = sizeof(cmd->data_wide);
5195
5196 if (paylen > datasz) {
5197 /* Command is too large to fit in pre-allocated space. */
5198 size_t totlen = hdrlen + paylen;
5199 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5200 printf("%s: firmware command too long (%zd bytes)\n",
5201 DEVNAME(sc), totlen);
5202 err = EINVAL;
5203 goto out;
5204 }
5205 if (totlen > IWX_RBUF_SIZE)
5206 panic("totlen > IWX_RBUF_SIZE");
5207 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5208 if (m == NULL) {
5209 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5210 DEVNAME(sc), IWX_RBUF_SIZE);
5211 err = ENOMEM;
5212 goto out;
5213 }
5214 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5215 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5216 seg, &nsegs, BUS_DMA_NOWAIT);
5217 if (nsegs > 20)
5218 panic("nsegs > 20");
5219 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5220 if (err) {
5221 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5222 DEVNAME(sc), totlen);
5223 m_freem(m);
5224 goto out;
5225 }
5226 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5227 cmd = mtod(m, struct iwx_device_cmd *);
5228 paddr = seg[0].ds_addr;
5229 } else {
5230 cmd = &ring->cmd[idx];
5231 paddr = txdata->cmd_paddr;
5232 }
5233
5234 memset(cmd, 0, sizeof(*cmd));
5235 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5236 cmd->hdr_wide.group_id = group_id;
5237 cmd->hdr_wide.qid = ring->qid;
5238 cmd->hdr_wide.idx = idx;
5239 cmd->hdr_wide.length = htole16(paylen);
5240 cmd->hdr_wide.version = iwx_cmd_version(code);
5241 data = cmd->data_wide;
5242
5243 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5244 if (hcmd->len[i] == 0)
5245 continue;
5246 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5247 off += hcmd->len[i];
5248 }
5249 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5250
5251 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5252 addr = htole64(paddr);
5253 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5254 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5255 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5256 paylen));
5257 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5258 IWX_FIRST_TB_SIZE);
5259 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5260 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5261 desc->num_tbs = htole16(2);
5262 } else
5263 desc->num_tbs = htole16(1);
5264
5265 if (paylen > datasz) {
5266 bus_dmamap_sync(ring->data_dmat, txdata->map,
5267 BUS_DMASYNC_PREWRITE);
5268 } else {
5269 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5270 BUS_DMASYNC_PREWRITE);
5271 }
5272 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5273 BUS_DMASYNC_PREWRITE);
5274
5275 /* Kick command ring. */
5276 ring->queued++;
5277 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5278 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5279 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5280 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5281
5282 if (!async) {
5283 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5284 if (err == 0) {
5285 /* if hardware is no longer up, return error */
5286 if (generation != sc->sc_generation) {
5287 err = ENXIO;
5288 goto out;
5289 }
5290
5291 /* Response buffer will be freed in iwx_free_resp(). */
5292 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5293 sc->sc_cmd_resp_pkt[idx] = NULL;
5294 } else if (generation == sc->sc_generation) {
5295 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5296 sc->sc_cmd_resp_pkt[idx] = NULL;
5297 }
5298 }
5299 out:
5300 return err;
5301 }
5302
5303 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5304 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5305 uint16_t len, const void *data)
5306 {
5307 struct iwx_host_cmd cmd = {
5308 .id = id,
5309 .len = { len, },
5310 .data = { data, },
5311 .flags = flags,
5312 };
5313
5314 return iwx_send_cmd(sc, &cmd);
5315 }
5316
5317 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5318 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5319 uint32_t *status)
5320 {
5321 struct iwx_rx_packet *pkt;
5322 struct iwx_cmd_response *resp;
5323 int err, resp_len;
5324
5325 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5326 cmd->flags |= IWX_CMD_WANT_RESP;
5327 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5328
5329 err = iwx_send_cmd(sc, cmd);
5330 if (err)
5331 return err;
5332
5333 pkt = cmd->resp_pkt;
5334 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5335 return EIO;
5336
5337 resp_len = iwx_rx_packet_payload_len(pkt);
5338 if (resp_len != sizeof(*resp)) {
5339 iwx_free_resp(sc, cmd);
5340 return EIO;
5341 }
5342
5343 resp = (void *)pkt->data;
5344 *status = le32toh(resp->status);
5345 iwx_free_resp(sc, cmd);
5346 return err;
5347 }
5348
5349 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5350 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5351 const void *data, uint32_t *status)
5352 {
5353 struct iwx_host_cmd cmd = {
5354 .id = id,
5355 .len = { len, },
5356 .data = { data, },
5357 };
5358
5359 return iwx_send_cmd_status(sc, &cmd, status);
5360 }
5361
5362 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5363 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5364 {
5365 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5366 ("hcmd flags !IWX_CMD_WANT_RESP"));
5367 free(hcmd->resp_pkt, M_DEVBUF);
5368 hcmd->resp_pkt = NULL;
5369 }
5370
5371 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5372 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5373 {
5374 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5375 struct iwx_tx_data *data;
5376
5377 if (qid != IWX_DQA_CMD_QUEUE) {
5378 return; /* Not a command ack. */
5379 }
5380
5381 data = &ring->data[idx];
5382
5383 if (data->m != NULL) {
5384 bus_dmamap_sync(ring->data_dmat, data->map,
5385 BUS_DMASYNC_POSTWRITE);
5386 bus_dmamap_unload(ring->data_dmat, data->map);
5387 m_freem(data->m);
5388 data->m = NULL;
5389 }
5390 wakeup(&ring->desc[idx]);
5391
5392 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5393 if (ring->queued == 0) {
5394 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5395 DEVNAME(sc), code));
5396 } else if (ring->queued > 0)
5397 ring->queued--;
5398 }
5399
5400 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5401 iwx_fw_rateidx_ofdm(uint8_t rval)
5402 {
5403 /* Firmware expects indices which match our 11a rate set. */
5404 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5405 int i;
5406
5407 for (i = 0; i < rs->rs_nrates; i++) {
5408 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5409 return i;
5410 }
5411
5412 return 0;
5413 }
5414
5415 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5416 iwx_fw_rateidx_cck(uint8_t rval)
5417 {
5418 /* Firmware expects indices which match our 11b rate set. */
5419 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5420 int i;
5421
5422 for (i = 0; i < rs->rs_nrates; i++) {
5423 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5424 return i;
5425 }
5426
5427 return 0;
5428 }
5429
5430 static int
iwx_min_basic_rate(struct ieee80211com * ic)5431 iwx_min_basic_rate(struct ieee80211com *ic)
5432 {
5433 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5434 struct ieee80211_node *ni = vap->iv_bss;
5435 struct ieee80211_rateset *rs = &ni->ni_rates;
5436 struct ieee80211_channel *c = ni->ni_chan;
5437 int i, min, rval;
5438
5439 min = -1;
5440
5441 if (c == IEEE80211_CHAN_ANYC) {
5442 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5443 return -1;
5444 }
5445
5446 for (i = 0; i < rs->rs_nrates; i++) {
5447 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5448 continue;
5449 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5450 if (min == -1)
5451 min = rval;
5452 else if (rval < min)
5453 min = rval;
5454 }
5455
5456 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5457 if (min == -1)
5458 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5459
5460 return min;
5461 }
5462
5463 /*
5464 * Determine the Tx command flags and Tx rate+flags to use.
5465 * Return the selected Tx rate.
5466 */
5467 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5468 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5469 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5470 struct mbuf *m)
5471 {
5472 struct ieee80211com *ic = &sc->sc_ic;
5473 struct ieee80211_node *ni = &in->in_ni;
5474 struct ieee80211_rateset *rs = &ni->ni_rates;
5475 const struct iwx_rate *rinfo = NULL;
5476 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5477 int ridx = iwx_min_basic_rate(ic);
5478 int min_ridx, rate_flags;
5479 uint8_t rval;
5480
5481 /* We're in the process of clearing the node, no channel already */
5482 if (ridx == -1)
5483 return NULL;
5484
5485 min_ridx = iwx_rval2ridx(ridx);
5486
5487 *flags = 0;
5488
5489 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5490 type != IEEE80211_FC0_TYPE_DATA) {
5491 /* for non-data, use the lowest supported rate */
5492 ridx = min_ridx;
5493 *flags |= IWX_TX_FLAGS_CMD_RATE;
5494 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5495 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5496 & ~IEEE80211_RATE_MCS];
5497 } else {
5498 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5499 & IEEE80211_RATE_VAL);
5500 ridx = iwx_rval2ridx(rval);
5501 if (ridx < min_ridx)
5502 ridx = min_ridx;
5503 }
5504
5505 if (m->m_flags & M_EAPOL)
5506 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5507
5508 rinfo = &iwx_rates[ridx];
5509
5510 /*
5511 * Do not fill rate_n_flags if firmware controls the Tx rate.
5512 * For data frames we rely on Tx rate scaling in firmware by default.
5513 */
5514 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5515 *rate_n_flags = 0;
5516 return rinfo;
5517 }
5518
5519 /*
5520 * Forcing a CCK/OFDM legacy rate is important for management frames.
5521 * Association will only succeed if we do this correctly.
5522 */
5523
5524 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5525 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5526 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5527 if (IWX_RIDX_IS_CCK(ridx)) {
5528 if (sc->sc_rate_n_flags_version >= 2)
5529 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5530 else
5531 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5532 } else if (sc->sc_rate_n_flags_version >= 2)
5533 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5534
5535 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5536 & IEEE80211_RATE_VAL);
5537 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5538 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5539
5540 if (sc->sc_rate_n_flags_version >= 2) {
5541 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5542 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5543 IWX_RATE_LEGACY_RATE_MSK);
5544 } else {
5545 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5546 IWX_RATE_LEGACY_RATE_MSK);
5547 }
5548 } else
5549 rate_flags |= rinfo->plcp;
5550
5551 *rate_n_flags = rate_flags;
5552 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5553 __func__, __LINE__,*flags);
5554 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5555 __func__, __LINE__, *rate_n_flags);
5556
5557 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5558 print_ratenflags(__func__, __LINE__,
5559 *rate_n_flags, sc->sc_rate_n_flags_version);
5560
5561 return rinfo;
5562 }
5563
5564 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5565 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5566 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5567 {
5568 uint8_t filled_tfd_size, num_fetch_chunks;
5569 uint16_t len = byte_cnt;
5570 uint16_t bc_ent;
5571
5572 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5573 num_tbs * sizeof(struct iwx_tfh_tb);
5574 /*
5575 * filled_tfd_size contains the number of filled bytes in the TFD.
5576 * Dividing it by 64 will give the number of chunks to fetch
5577 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5578 * If, for example, TFD contains only 3 TBs then 32 bytes
5579 * of the TFD are used, and only one chunk of 64 bytes should
5580 * be fetched
5581 */
5582 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5583
5584 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5585 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5586 /* Starting from AX210, the HW expects bytes */
5587 bc_ent = htole16(len | (num_fetch_chunks << 14));
5588 scd_bc_tbl[idx].tfd_offset = bc_ent;
5589 } else {
5590 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5591 /* Before AX210, the HW expects DW */
5592 len = howmany(len, 4);
5593 bc_ent = htole16(len | (num_fetch_chunks << 12));
5594 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5595 }
5596
5597 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5598 }
5599
5600 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5601 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5602 {
5603 struct ieee80211com *ic = &sc->sc_ic;
5604 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5605 struct iwx_node *in = (void *)ni;
5606 struct iwx_tx_ring *ring;
5607 struct iwx_tx_data *data;
5608 struct iwx_tfh_tfd *desc;
5609 struct iwx_device_cmd *cmd;
5610 struct ieee80211_frame *wh;
5611 struct ieee80211_key *k = NULL;
5612 const struct iwx_rate *rinfo;
5613 uint64_t paddr;
5614 u_int hdrlen;
5615 uint32_t rate_n_flags;
5616 uint16_t num_tbs, flags, offload_assist = 0;
5617 uint8_t type, subtype;
5618 int i, totlen, err, pad, qid;
5619 #define IWM_MAX_SCATTER 20
5620 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5621 int nsegs;
5622 struct mbuf *m1;
5623 size_t txcmd_size;
5624
5625 wh = mtod(m, struct ieee80211_frame *);
5626 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5627 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5628 hdrlen = ieee80211_anyhdrsize(wh);
5629
5630 qid = sc->first_data_qid;
5631
5632 /* Put QoS frames on the data queue which maps to their TID. */
5633 if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
5634 uint16_t qos = ieee80211_gettid(wh);
5635 uint8_t tid = qos & IEEE80211_QOS_TID;
5636 #if 0
5637 /*
5638 * XXX-THJ: TODO when we enable ba we need to manage the
5639 * mappings
5640 */
5641 struct ieee80211_tx_ba *ba;
5642 ba = &ni->ni_tx_ba[tid];
5643
5644 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5645 type == IEEE80211_FC0_TYPE_DATA &&
5646 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5647 subtype != IEEE80211_FC0_SUBTYPE_BAR &&
5648 sc->aggqid[tid] != 0 /*&&
5649 ba->ba_state == IEEE80211_BA_AGREED*/) {
5650 qid = sc->aggqid[tid];
5651 #else
5652 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5653 type == IEEE80211_FC0_TYPE_DATA &&
5654 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5655 sc->aggqid[tid] != 0) {
5656 qid = sc->aggqid[tid];
5657 #endif
5658 }
5659 }
5660
5661 ring = &sc->txq[qid];
5662 desc = &ring->desc[ring->cur];
5663 memset(desc, 0, sizeof(*desc));
5664 data = &ring->data[ring->cur];
5665
5666 cmd = &ring->cmd[ring->cur];
5667 cmd->hdr.code = IWX_TX_CMD;
5668 cmd->hdr.flags = 0;
5669 cmd->hdr.qid = ring->qid;
5670 cmd->hdr.idx = ring->cur;
5671
5672 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5673 if (rinfo == NULL)
5674 return EINVAL;
5675
5676 if (ieee80211_radiotap_active_vap(vap)) {
5677 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5678
5679 tap->wt_flags = 0;
5680 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5681 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5682 tap->wt_rate = rinfo->rate;
5683 if (k != NULL)
5684 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5685 ieee80211_radiotap_tx(vap, m);
5686 }
5687
5688 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5689 k = ieee80211_crypto_get_txkey(ni, m);
5690 if (k == NULL) {
5691 printf("%s: k is NULL!\n", __func__);
5692 m_freem(m);
5693 return (ENOBUFS);
5694 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5695 k->wk_keytsc++;
5696 } else {
5697 k->wk_cipher->ic_encap(k, m);
5698
5699 /* 802.11 headers may have moved */
5700 wh = mtod(m, struct ieee80211_frame *);
5701 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5702 }
5703 } else
5704 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5705
5706 totlen = m->m_pkthdr.len;
5707
5708 if (hdrlen & 3) {
5709 /* First segment length must be a multiple of 4. */
5710 pad = 4 - (hdrlen & 3);
5711 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5712 } else
5713 pad = 0;
5714
5715 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5716 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5717 memset(tx, 0, sizeof(*tx));
5718 tx->len = htole16(totlen);
5719 tx->offload_assist = htole32(offload_assist);
5720 tx->flags = htole16(flags);
5721 tx->rate_n_flags = htole32(rate_n_flags);
5722 memcpy(tx->hdr, wh, hdrlen);
5723 txcmd_size = sizeof(*tx);
5724 } else {
5725 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5726 memset(tx, 0, sizeof(*tx));
5727 tx->len = htole16(totlen);
5728 tx->offload_assist = htole16(offload_assist);
5729 tx->flags = htole32(flags);
5730 tx->rate_n_flags = htole32(rate_n_flags);
5731 memcpy(tx->hdr, wh, hdrlen);
5732 txcmd_size = sizeof(*tx);
5733 }
5734
5735 /* Trim 802.11 header. */
5736 m_adj(m, hdrlen);
5737
5738 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5739 &nsegs, BUS_DMA_NOWAIT);
5740 if (err && err != EFBIG) {
5741 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5742 m_freem(m);
5743 return err;
5744 }
5745 if (err) {
5746 /* Too many DMA segments, linearize mbuf. */
5747 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5748 if (m1 == NULL) {
5749 printf("%s: could not defrag mbufs\n", __func__);
5750 m_freem(m);
5751 return (ENOBUFS);
5752 }
5753 m = m1;
5754 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5755 segs, &nsegs, BUS_DMA_NOWAIT);
5756 if (err) {
5757 printf("%s: can't map mbuf (error %d)\n", __func__,
5758 err);
5759 m_freem(m);
5760 return (err);
5761 }
5762 }
5763 data->m = m;
5764 data->in = in;
5765
5766 /* Fill TX descriptor. */
5767 num_tbs = 2 + nsegs;
5768 desc->num_tbs = htole16(num_tbs);
5769
5770 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5771 paddr = htole64(data->cmd_paddr);
5772 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5773 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5774 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5775 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5776 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5777 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5778 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5779
5780 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5781 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5782
5783 /* Other DMA segments are for data payload. */
5784 for (i = 0; i < nsegs; i++) {
5785 seg = &segs[i];
5786 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5787 paddr = htole64(seg->ds_addr);
5788 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5789 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5790 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5791 }
5792
5793 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5794 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5795 BUS_DMASYNC_PREWRITE);
5796 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5797 BUS_DMASYNC_PREWRITE);
5798
5799 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5800
5801 /* Kick TX ring. */
5802 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5803 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5804 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5805
5806 /* Mark TX ring as full if we reach a certain threshold. */
5807 if (++ring->queued > iwx_himark) {
5808 sc->qfullmsk |= 1 << ring->qid;
5809 }
5810
5811 sc->sc_tx_timer[ring->qid] = 15;
5812
5813 return 0;
5814 }
5815
5816 static int
5817 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5818 {
5819 struct iwx_rx_packet *pkt;
5820 struct iwx_tx_path_flush_cmd_rsp *resp;
5821 struct iwx_tx_path_flush_cmd flush_cmd = {
5822 .sta_id = htole32(sta_id),
5823 .tid_mask = htole16(tids),
5824 };
5825 struct iwx_host_cmd hcmd = {
5826 .id = IWX_TXPATH_FLUSH,
5827 .len = { sizeof(flush_cmd), },
5828 .data = { &flush_cmd, },
5829 .flags = IWX_CMD_WANT_RESP,
5830 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5831 };
5832 int err, resp_len, i, num_flushed_queues;
5833
5834 err = iwx_send_cmd(sc, &hcmd);
5835 if (err)
5836 return err;
5837
5838 pkt = hcmd.resp_pkt;
5839 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5840 err = EIO;
5841 goto out;
5842 }
5843
5844 resp_len = iwx_rx_packet_payload_len(pkt);
5845 /* Some firmware versions don't provide a response. */
5846 if (resp_len == 0)
5847 goto out;
5848 else if (resp_len != sizeof(*resp)) {
5849 err = EIO;
5850 goto out;
5851 }
5852
5853 resp = (void *)pkt->data;
5854
5855 if (le16toh(resp->sta_id) != sta_id) {
5856 err = EIO;
5857 goto out;
5858 }
5859
5860 num_flushed_queues = le16toh(resp->num_flushed_queues);
5861 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5862 err = EIO;
5863 goto out;
5864 }
5865
5866 for (i = 0; i < num_flushed_queues; i++) {
5867 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5868 uint16_t tid = le16toh(queue_info->tid);
5869 uint16_t read_after = le16toh(queue_info->read_after_flush);
5870 uint16_t qid = le16toh(queue_info->queue_num);
5871 struct iwx_tx_ring *txq;
5872
5873 if (qid >= nitems(sc->txq))
5874 continue;
5875
5876 txq = &sc->txq[qid];
5877 if (tid != txq->tid)
5878 continue;
5879
5880 iwx_txq_advance(sc, txq, read_after);
5881 }
5882 out:
5883 iwx_free_resp(sc, &hcmd);
5884 return err;
5885 }
5886
5887 #define IWX_FLUSH_WAIT_MS 2000
5888
5889 static int
5890 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5891 {
5892 struct iwx_add_sta_cmd cmd;
5893 int err;
5894 uint32_t status;
5895
5896 memset(&cmd, 0, sizeof(cmd));
5897 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5898 in->in_color));
5899 cmd.sta_id = IWX_STATION_ID;
5900 cmd.add_modify = IWX_STA_MODE_MODIFY;
5901 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5902 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5903
5904 status = IWX_ADD_STA_SUCCESS;
5905 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5906 sizeof(cmd), &cmd, &status);
5907 if (err) {
5908 printf("%s: could not update sta (error %d)\n",
5909 DEVNAME(sc), err);
5910 return err;
5911 }
5912
5913 switch (status & IWX_ADD_STA_STATUS_MASK) {
5914 case IWX_ADD_STA_SUCCESS:
5915 break;
5916 default:
5917 err = EIO;
5918 printf("%s: Couldn't %s draining for station\n",
5919 DEVNAME(sc), drain ? "enable" : "disable");
5920 break;
5921 }
5922
5923 return err;
5924 }
5925
5926 static int
5927 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5928 {
5929 int err;
5930
5931 IWX_ASSERT_LOCKED(sc);
5932
5933 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5934
5935 err = iwx_drain_sta(sc, in, 1);
5936 if (err)
5937 goto done;
5938
5939 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5940 if (err) {
5941 printf("%s: could not flush Tx path (error %d)\n",
5942 DEVNAME(sc), err);
5943 goto done;
5944 }
5945
5946 /*
5947 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5948 * fc drive rand has has been replaced in OpenBSD.
5949 */
5950
5951 err = iwx_drain_sta(sc, in, 0);
5952 done:
5953 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5954 return err;
5955 }
5956
5957 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
5958
5959 static int
5960 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5961 struct iwx_beacon_filter_cmd *cmd)
5962 {
5963 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5964 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5965 }
5966
5967 static int
5968 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5969 {
5970 struct iwx_beacon_filter_cmd cmd = {
5971 IWX_BF_CMD_CONFIG_DEFAULTS,
5972 .bf_enable_beacon_filter = htole32(1),
5973 .ba_enable_beacon_abort = htole32(enable),
5974 };
5975
5976 if (!sc->sc_bf.bf_enabled)
5977 return 0;
5978
5979 sc->sc_bf.ba_enabled = enable;
5980 return iwx_beacon_filter_send_cmd(sc, &cmd);
5981 }
5982
5983 static void
5984 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5985 struct iwx_mac_power_cmd *cmd)
5986 {
5987 struct ieee80211com *ic = &sc->sc_ic;
5988 struct ieee80211_node *ni = &in->in_ni;
5989 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5990 int dtim_period, dtim_msec, keep_alive;
5991
5992 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5993 in->in_color));
5994 if (vap->iv_dtim_period)
5995 dtim_period = vap->iv_dtim_period;
5996 else
5997 dtim_period = 1;
5998
5999 /*
6000 * Regardless of power management state the driver must set
6001 * keep alive period. FW will use it for sending keep alive NDPs
6002 * immediately after association. Check that keep alive period
6003 * is at least 3 * DTIM.
6004 */
6005 dtim_msec = dtim_period * ni->ni_intval;
6006 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6007 keep_alive = roundup(keep_alive, 1000) / 1000;
6008 cmd->keep_alive_seconds = htole16(keep_alive);
6009
6010 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6011 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6012 }
6013
6014 static int
6015 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6016 {
6017 int err;
6018 int ba_enable;
6019 struct iwx_mac_power_cmd cmd;
6020
6021 memset(&cmd, 0, sizeof(cmd));
6022
6023 iwx_power_build_cmd(sc, in, &cmd);
6024
6025 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6026 sizeof(cmd), &cmd);
6027 if (err != 0)
6028 return err;
6029
6030 ba_enable = !!(cmd.flags &
6031 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6032 return iwx_update_beacon_abort(sc, in, ba_enable);
6033 }
6034
6035 static int
6036 iwx_power_update_device(struct iwx_softc *sc)
6037 {
6038 struct iwx_device_power_cmd cmd = { };
6039 struct ieee80211com *ic = &sc->sc_ic;
6040
6041 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6042 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6043
6044 return iwx_send_cmd_pdu(sc,
6045 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6046 }
6047 #if 0
6048 static int
6049 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6050 {
6051 struct iwx_beacon_filter_cmd cmd = {
6052 IWX_BF_CMD_CONFIG_DEFAULTS,
6053 .bf_enable_beacon_filter = htole32(1),
6054 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6055 };
6056 int err;
6057
6058 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6059 if (err == 0)
6060 sc->sc_bf.bf_enabled = 1;
6061
6062 return err;
6063 }
6064 #endif
6065 static int
6066 iwx_disable_beacon_filter(struct iwx_softc *sc)
6067 {
6068 struct iwx_beacon_filter_cmd cmd;
6069 int err;
6070
6071 memset(&cmd, 0, sizeof(cmd));
6072
6073 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6074 if (err == 0)
6075 sc->sc_bf.bf_enabled = 0;
6076
6077 return err;
6078 }
6079
6080 static int
6081 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6082 {
6083 struct iwx_add_sta_cmd add_sta_cmd;
6084 int err, i;
6085 uint32_t status, aggsize;
6086 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6087 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6088 struct ieee80211com *ic = &sc->sc_ic;
6089 struct ieee80211_node *ni = &in->in_ni;
6090 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6091
6092 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6093 panic("STA already added");
6094
6095 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6096
6097 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6098 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6099 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6100 } else {
6101 add_sta_cmd.sta_id = IWX_STATION_ID;
6102 add_sta_cmd.station_type = IWX_STA_LINK;
6103 }
6104 add_sta_cmd.mac_id_n_color
6105 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6106 if (!update) {
6107 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6108 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6109 etheranyaddr);
6110 else
6111 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6112 in->in_macaddr);
6113 }
6114 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6115 ether_sprintf(add_sta_cmd.addr)));
6116 add_sta_cmd.add_modify = update ? 1 : 0;
6117 add_sta_cmd.station_flags_msk
6118 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6119
6120 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6121 add_sta_cmd.station_flags_msk
6122 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6123 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6124
6125 if (iwx_mimo_enabled(sc)) {
6126 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6127 add_sta_cmd.station_flags |=
6128 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6129 } else {
6130 int hasmimo = 0;
6131 for (i = 0; i < htrs->rs_nrates; i++) {
6132 if (htrs->rs_rates[i] > 7) {
6133 hasmimo = 1;
6134 break;
6135 }
6136 }
6137 if (hasmimo) {
6138 add_sta_cmd.station_flags |=
6139 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6140 }
6141 }
6142 }
6143
6144 if (ni->ni_flags & IEEE80211_NODE_HT &&
6145 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6146 add_sta_cmd.station_flags |= htole32(
6147 IWX_STA_FLG_FAT_EN_40MHZ);
6148 }
6149
6150
6151 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6152 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6153 add_sta_cmd.station_flags |= htole32(
6154 IWX_STA_FLG_FAT_EN_80MHZ);
6155 }
6156 // XXX-misha: TODO get real ampdu size
6157 aggsize = max_aggsize;
6158 } else {
6159 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6160 IEEE80211_HTCAP_MAXRXAMPDU);
6161 }
6162
6163 if (aggsize > max_aggsize)
6164 aggsize = max_aggsize;
6165 add_sta_cmd.station_flags |= htole32((aggsize <<
6166 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6167 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6168
6169 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6170 IEEE80211_HTCAP_MPDUDENSITY)) {
6171 case IEEE80211_HTCAP_MPDUDENSITY_2:
6172 add_sta_cmd.station_flags
6173 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6174 break;
6175 case IEEE80211_HTCAP_MPDUDENSITY_4:
6176 add_sta_cmd.station_flags
6177 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6178 break;
6179 case IEEE80211_HTCAP_MPDUDENSITY_8:
6180 add_sta_cmd.station_flags
6181 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6182 break;
6183 case IEEE80211_HTCAP_MPDUDENSITY_16:
6184 add_sta_cmd.station_flags
6185 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6186 break;
6187 default:
6188 break;
6189 }
6190 }
6191
6192 status = IWX_ADD_STA_SUCCESS;
6193 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6194 &add_sta_cmd, &status);
6195 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6196 err = EIO;
6197
6198 return err;
6199 }
6200
6201 static int
6202 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6203 {
6204 struct ieee80211com *ic = &sc->sc_ic;
6205 struct iwx_rm_sta_cmd rm_sta_cmd;
6206 int err;
6207
6208 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6209 panic("sta already removed");
6210
6211 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6212 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6213 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6214 else
6215 rm_sta_cmd.sta_id = IWX_STATION_ID;
6216
6217 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6218 &rm_sta_cmd);
6219
6220 return err;
6221 }
6222
6223 static int
6224 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6225 {
6226 int err, i, cmd_ver;
6227
6228 err = iwx_flush_sta(sc, in);
6229 if (err) {
6230 printf("%s: could not flush Tx path (error %d)\n",
6231 DEVNAME(sc), err);
6232 return err;
6233 }
6234
6235 /*
6236 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6237 * before a station gets removed.
6238 */
6239 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6240 IWX_SCD_QUEUE_CONFIG_CMD);
6241 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6242 err = iwx_disable_mgmt_queue(sc);
6243 if (err)
6244 return err;
6245 for (i = IWX_FIRST_AGG_TX_QUEUE;
6246 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6247 struct iwx_tx_ring *ring = &sc->txq[i];
6248 if ((sc->qenablemsk & (1 << i)) == 0)
6249 continue;
6250 err = iwx_disable_txq(sc, IWX_STATION_ID,
6251 ring->qid, ring->tid);
6252 if (err) {
6253 printf("%s: could not disable Tx queue %d "
6254 "(error %d)\n", DEVNAME(sc), ring->qid,
6255 err);
6256 return err;
6257 }
6258 }
6259 }
6260
6261 err = iwx_rm_sta_cmd(sc, in);
6262 if (err) {
6263 printf("%s: could not remove STA (error %d)\n",
6264 DEVNAME(sc), err);
6265 return err;
6266 }
6267
6268 in->in_flags = 0;
6269
6270 sc->sc_rx_ba_sessions = 0;
6271 sc->ba_rx.start_tidmask = 0;
6272 sc->ba_rx.stop_tidmask = 0;
6273 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6274 sc->ba_tx.start_tidmask = 0;
6275 sc->ba_tx.stop_tidmask = 0;
6276 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6277 sc->qenablemsk &= ~(1 << i);
6278
6279 #if 0
6280 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6281 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6282 if (ba->ba_state != IEEE80211_BA_AGREED)
6283 continue;
6284 ieee80211_delba_request(ic, ni, 0, 1, i);
6285 }
6286 #endif
6287 /* Clear ampdu rx state (GOS-1525) */
6288 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6289 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6290 ba->ba_flags = 0;
6291 }
6292
6293 return 0;
6294 }
6295
6296 static uint8_t
6297 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6298 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6299 int n_ssids, uint32_t channel_cfg_flags)
6300 {
6301 struct ieee80211com *ic = &sc->sc_ic;
6302 struct ieee80211_scan_state *ss = ic->ic_scan;
6303 struct ieee80211_channel *c;
6304 uint8_t nchan;
6305 int j;
6306
6307 for (nchan = j = 0;
6308 j < ss->ss_last &&
6309 nchan < sc->sc_capa_n_scan_channels;
6310 j++) {
6311 uint8_t channel_num;
6312
6313 c = ss->ss_chans[j];
6314 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6315 if (isset(sc->sc_ucode_api,
6316 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6317 chan->v2.channel_num = channel_num;
6318 if (IEEE80211_IS_CHAN_2GHZ(c))
6319 chan->v2.band = IWX_PHY_BAND_24;
6320 else
6321 chan->v2.band = IWX_PHY_BAND_5;
6322 chan->v2.iter_count = 1;
6323 chan->v2.iter_interval = 0;
6324 } else {
6325 chan->v1.channel_num = channel_num;
6326 chan->v1.iter_count = 1;
6327 chan->v1.iter_interval = htole16(0);
6328 }
6329 chan->flags |= htole32(channel_cfg_flags);
6330 chan++;
6331 nchan++;
6332 }
6333
6334 return nchan;
6335 }
6336
6337 static int
6338 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6339 {
6340 struct ieee80211com *ic = &sc->sc_ic;
6341 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6342 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6343 struct ieee80211_rateset *rs;
6344 size_t remain = sizeof(preq->buf);
6345 uint8_t *frm, *pos;
6346
6347 memset(preq, 0, sizeof(*preq));
6348
6349 if (remain < sizeof(*wh) + 2)
6350 return ENOBUFS;
6351
6352 /*
6353 * Build a probe request frame. Most of the following code is a
6354 * copy & paste of what is done in net80211.
6355 */
6356 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6357 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6358 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6359 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6360 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6361 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6362 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6363 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6364
6365 frm = (uint8_t *)(wh + 1);
6366 *frm++ = IEEE80211_ELEMID_SSID;
6367 *frm++ = 0;
6368 /* hardware inserts SSID */
6369
6370 /* Tell the firmware where the MAC header is. */
6371 preq->mac_header.offset = 0;
6372 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6373 remain -= frm - (uint8_t *)wh;
6374
6375 /* Fill in 2GHz IEs and tell firmware where they are. */
6376 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6377 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6378 if (remain < 4 + rs->rs_nrates)
6379 return ENOBUFS;
6380 } else if (remain < 2 + rs->rs_nrates)
6381 return ENOBUFS;
6382 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6383 pos = frm;
6384 frm = ieee80211_add_rates(frm, rs);
6385 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6386 frm = ieee80211_add_xrates(frm, rs);
6387 remain -= frm - pos;
6388
6389 if (isset(sc->sc_enabled_capa,
6390 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6391 if (remain < 3)
6392 return ENOBUFS;
6393 *frm++ = IEEE80211_ELEMID_DSPARMS;
6394 *frm++ = 1;
6395 *frm++ = 0;
6396 remain -= 3;
6397 }
6398 preq->band_data[0].len = htole16(frm - pos);
6399
6400 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6401 /* Fill in 5GHz IEs. */
6402 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6403 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6404 if (remain < 4 + rs->rs_nrates)
6405 return ENOBUFS;
6406 } else if (remain < 2 + rs->rs_nrates)
6407 return ENOBUFS;
6408 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6409 pos = frm;
6410 frm = ieee80211_add_rates(frm, rs);
6411 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6412 frm = ieee80211_add_xrates(frm, rs);
6413 preq->band_data[1].len = htole16(frm - pos);
6414 remain -= frm - pos;
6415 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6416 if (remain < 14)
6417 return ENOBUFS;
6418 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6419 remain -= frm - pos;
6420 preq->band_data[1].len = htole16(frm - pos);
6421 }
6422 }
6423
6424 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6425 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6426 pos = frm;
6427 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6428 if (remain < 28)
6429 return ENOBUFS;
6430 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6431 /* XXX add WME info? */
6432 remain -= frm - pos;
6433 }
6434
6435 preq->common_data.len = htole16(frm - pos);
6436
6437 return 0;
6438 }
6439
6440 static int
6441 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6442 {
6443 struct iwx_scan_config scan_cfg;
6444 struct iwx_host_cmd hcmd = {
6445 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6446 .len[0] = sizeof(scan_cfg),
6447 .data[0] = &scan_cfg,
6448 .flags = 0,
6449 };
6450 int cmdver;
6451
6452 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6453 printf("%s: firmware does not support reduced scan config\n",
6454 DEVNAME(sc));
6455 return ENOTSUP;
6456 }
6457
6458 memset(&scan_cfg, 0, sizeof(scan_cfg));
6459
6460 /*
6461 * SCAN_CFG version >= 5 implies that the broadcast
6462 * STA ID field is deprecated.
6463 */
6464 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6465 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6466 scan_cfg.bcast_sta_id = 0xff;
6467
6468 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6469 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6470
6471 return iwx_send_cmd(sc, &hcmd);
6472 }
6473
6474 static uint16_t
6475 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6476 {
6477 struct ieee80211com *ic = &sc->sc_ic;
6478 struct ieee80211_scan_state *ss = ic->ic_scan;
6479 uint16_t flags = 0;
6480
6481 if (ss->ss_nssid == 0) {
6482 DPRINTF(("%s: Passive scan started\n", __func__));
6483 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6484 }
6485
6486 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6487 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6488 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6489
6490 return flags;
6491 }
6492
6493 #define IWX_SCAN_DWELL_ACTIVE 10
6494 #define IWX_SCAN_DWELL_PASSIVE 110
6495
6496 /* adaptive dwell max budget time [TU] for full scan */
6497 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6498 /* adaptive dwell max budget time [TU] for directed scan */
6499 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6500 /* adaptive dwell default high band APs number */
6501 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6502 /* adaptive dwell default low band APs number */
6503 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6504 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6505 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6506 /* adaptive dwell number of APs override for p2p friendly GO channels */
6507 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6508 /* adaptive dwell number of APs override for social channels */
6509 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6510
6511 static void
6512 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6513 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6514 {
6515 uint32_t suspend_time, max_out_time;
6516 uint8_t active_dwell, passive_dwell;
6517
6518 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6519 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6520
6521 general_params->adwell_default_social_chn =
6522 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6523 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6524 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6525
6526 if (bgscan)
6527 general_params->adwell_max_budget =
6528 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6529 else
6530 general_params->adwell_max_budget =
6531 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6532
6533 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6534 if (bgscan) {
6535 max_out_time = htole32(120);
6536 suspend_time = htole32(120);
6537 } else {
6538 max_out_time = htole32(0);
6539 suspend_time = htole32(0);
6540 }
6541 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6542 htole32(max_out_time);
6543 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6544 htole32(suspend_time);
6545 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6546 htole32(max_out_time);
6547 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6548 htole32(suspend_time);
6549
6550 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6551 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6552 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6553 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6554 }
6555
6556 static void
6557 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6558 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6559 {
6560 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6561
6562 gp->flags = htole16(gen_flags);
6563
6564 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6565 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6566 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6567 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6568
6569 gp->scan_start_mac_id = 0;
6570 }
6571
6572 static void
6573 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6574 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6575 int n_ssid)
6576 {
6577 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6578
6579 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6580 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6581
6582 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6583 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6584 }
6585
6586 static int
6587 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6588 {
6589 struct ieee80211com *ic = &sc->sc_ic;
6590 struct ieee80211_scan_state *ss = ic->ic_scan;
6591 struct iwx_host_cmd hcmd = {
6592 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6593 .len = { 0, },
6594 .data = { NULL, },
6595 .flags = 0,
6596 };
6597 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6598 struct iwx_scan_req_params_v14 *scan_p;
6599 int err, async = bgscan, n_ssid = 0;
6600 uint16_t gen_flags;
6601 uint32_t bitmap_ssid = 0;
6602
6603 IWX_ASSERT_LOCKED(sc);
6604
6605 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6606
6607 scan_p = &cmd->scan_params;
6608
6609 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6610 cmd->uid = htole32(0);
6611
6612 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6613 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6614 gen_flags, bgscan);
6615
6616 scan_p->periodic_params.schedule[0].interval = htole16(0);
6617 scan_p->periodic_params.schedule[0].iter_count = 1;
6618
6619 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6620 if (err) {
6621 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6622 err);
6623 return err;
6624 }
6625
6626 for (int i=0; i < ss->ss_nssid; i++) {
6627 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6628 scan_p->probe_params.direct_scan[i].len =
6629 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6630 DPRINTF(("%s: Active scan started for ssid ", __func__));
6631 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6632 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6633 n_ssid++;
6634 bitmap_ssid |= (1 << i);
6635 }
6636 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6637
6638 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6639 n_ssid);
6640
6641 hcmd.len[0] = sizeof(*cmd);
6642 hcmd.data[0] = (void *)cmd;
6643 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6644
6645 err = iwx_send_cmd(sc, &hcmd);
6646 return err;
6647 }
6648
6649 static void
6650 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6651 {
6652 char alpha2[3];
6653
6654 snprintf(alpha2, sizeof(alpha2), "%c%c",
6655 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6656
6657 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6658 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6659
6660 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6661 }
6662
6663 uint8_t
6664 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6665 {
6666 int i;
6667 uint8_t rval;
6668
6669 for (i = 0; i < rs->rs_nrates; i++) {
6670 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6671 if (rval == iwx_rates[ridx].rate)
6672 return rs->rs_rates[i];
6673 }
6674
6675 return 0;
6676 }
6677
6678 static int
6679 iwx_rval2ridx(int rval)
6680 {
6681 int ridx;
6682
6683 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6684 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6685 continue;
6686 if (rval == iwx_rates[ridx].rate)
6687 break;
6688 }
6689
6690 return ridx;
6691 }
6692
6693 static void
6694 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6695 int *ofdm_rates)
6696 {
6697 struct ieee80211_node *ni = &in->in_ni;
6698 struct ieee80211_rateset *rs = &ni->ni_rates;
6699 int lowest_present_ofdm = -1;
6700 int lowest_present_cck = -1;
6701 uint8_t cck = 0;
6702 uint8_t ofdm = 0;
6703 int i;
6704
6705 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6706 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6707 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6708 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6709 continue;
6710 cck |= (1 << i);
6711 if (lowest_present_cck == -1 || lowest_present_cck > i)
6712 lowest_present_cck = i;
6713 }
6714 }
6715 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6716 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6717 continue;
6718 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6719 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6720 lowest_present_ofdm = i;
6721 }
6722
6723 /*
6724 * Now we've got the basic rates as bitmaps in the ofdm and cck
6725 * variables. This isn't sufficient though, as there might not
6726 * be all the right rates in the bitmap. E.g. if the only basic
6727 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6728 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6729 *
6730 * [...] a STA responding to a received frame shall transmit
6731 * its Control Response frame [...] at the highest rate in the
6732 * BSSBasicRateSet parameter that is less than or equal to the
6733 * rate of the immediately previous frame in the frame exchange
6734 * sequence ([...]) and that is of the same modulation class
6735 * ([...]) as the received frame. If no rate contained in the
6736 * BSSBasicRateSet parameter meets these conditions, then the
6737 * control frame sent in response to a received frame shall be
6738 * transmitted at the highest mandatory rate of the PHY that is
6739 * less than or equal to the rate of the received frame, and
6740 * that is of the same modulation class as the received frame.
6741 *
6742 * As a consequence, we need to add all mandatory rates that are
6743 * lower than all of the basic rates to these bitmaps.
6744 */
6745
6746 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6747 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6748 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6749 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6750 /* 6M already there or needed so always add */
6751 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6752
6753 /*
6754 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6755 * Note, however:
6756 * - if no CCK rates are basic, it must be ERP since there must
6757 * be some basic rates at all, so they're OFDM => ERP PHY
6758 * (or we're in 5 GHz, and the cck bitmap will never be used)
6759 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6760 * - if 5.5M is basic, 1M and 2M are mandatory
6761 * - if 2M is basic, 1M is mandatory
6762 * - if 1M is basic, that's the only valid ACK rate.
6763 * As a consequence, it's not as complicated as it sounds, just add
6764 * any lower rates to the ACK rate bitmap.
6765 */
6766 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6767 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6768 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6769 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6770 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6771 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6772 /* 1M already there or needed so always add */
6773 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6774
6775 *cck_rates = cck;
6776 *ofdm_rates = ofdm;
6777 }
6778
6779 static void
6780 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6781 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6782 {
6783 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6784 struct ieee80211com *ic = &sc->sc_ic;
6785 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6786 struct ieee80211_node *ni = vap->iv_bss;
6787 int cck_ack_rates, ofdm_ack_rates;
6788
6789 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6790 in->in_color));
6791 cmd->action = htole32(action);
6792
6793 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6794 return;
6795
6796 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6797 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6798 else if (ic->ic_opmode == IEEE80211_M_STA)
6799 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6800 else
6801 panic("unsupported operating mode %d", ic->ic_opmode);
6802 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6803
6804 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6805 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6806 ether_sprintf(cmd->node_addr)));
6807 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6808 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6809 return;
6810 }
6811
6812 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6813 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6814 ether_sprintf(cmd->bssid_addr)));
6815 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6816 cmd->cck_rates = htole32(cck_ack_rates);
6817 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6818
6819 cmd->cck_short_preamble
6820 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6821 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6822 cmd->short_slot
6823 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6824 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6825
6826 struct chanAccParams chp;
6827 ieee80211_wme_vap_getparams(vap, &chp);
6828
6829 for (int i = 0; i < WME_NUM_AC; i++) {
6830 int txf = iwx_ac_to_tx_fifo[i];
6831 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6832 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6833 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6834 cmd->ac[txf].fifos_mask = (1 << txf);
6835 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6836
6837 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6838 }
6839
6840 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6841 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6842 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6843 }
6844
6845 if (ni->ni_flags & IEEE80211_NODE_HT) {
6846 switch (vap->iv_curhtprotmode) {
6847 case IEEE80211_HTINFO_OPMODE_PURE:
6848 break;
6849 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6850 case IEEE80211_HTINFO_OPMODE_MIXED:
6851 cmd->protection_flags |=
6852 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6853 IWX_MAC_PROT_FLG_FAT_PROT);
6854 break;
6855 case IEEE80211_HTINFO_OPMODE_HT20PR:
6856 if (in->in_phyctxt &&
6857 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6858 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6859 cmd->protection_flags |=
6860 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6861 IWX_MAC_PROT_FLG_FAT_PROT);
6862 }
6863 break;
6864 default:
6865 break;
6866 }
6867 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6868 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6869 }
6870
6871 if (ic->ic_flags & IEEE80211_F_USEPROT)
6872 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6873 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6874 #undef IWX_EXP2
6875 }
6876
6877 static void
6878 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6879 struct iwx_mac_data_sta *sta, int assoc)
6880 {
6881 struct ieee80211_node *ni = &in->in_ni;
6882 struct ieee80211com *ic = &sc->sc_ic;
6883 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6884 uint32_t dtim_off;
6885 uint64_t tsf;
6886 int dtim_period;
6887
6888 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6889 tsf = le64toh(ni->ni_tstamp.tsf);
6890 dtim_period = vap->iv_dtim_period;
6891
6892 sta->is_assoc = htole32(assoc);
6893
6894 if (assoc) {
6895 sta->dtim_time = htole32(tsf + dtim_off);
6896 sta->dtim_tsf = htole64(tsf + dtim_off);
6897 // XXX: unset in iwm
6898 sta->assoc_beacon_arrive_time = 0;
6899 }
6900 sta->bi = htole32(ni->ni_intval);
6901 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6902 sta->data_policy = htole32(0);
6903 sta->listen_interval = htole32(10);
6904 sta->assoc_id = htole32(ni->ni_associd);
6905 }
6906
6907 static int
6908 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6909 int assoc)
6910 {
6911 struct ieee80211com *ic = &sc->sc_ic;
6912 struct ieee80211_node *ni = &in->in_ni;
6913 struct iwx_mac_ctx_cmd cmd;
6914 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6915
6916 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6917 panic("MAC already added");
6918 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6919 panic("MAC already removed");
6920
6921 memset(&cmd, 0, sizeof(cmd));
6922
6923 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6924
6925 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6926 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6927 sizeof(cmd), &cmd);
6928 }
6929
6930 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6931 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6932 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6933 IWX_MAC_FILTER_ACCEPT_GRP |
6934 IWX_MAC_FILTER_IN_BEACON |
6935 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6936 IWX_MAC_FILTER_IN_CRC32);
6937 // XXX: dtim period is in vap
6938 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6939 /*
6940 * Allow beacons to pass through as long as we are not
6941 * associated or we do not have dtim period information.
6942 */
6943 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6944 }
6945 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6946 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6947 }
6948
6949 static int
6950 iwx_clear_statistics(struct iwx_softc *sc)
6951 {
6952 struct iwx_statistics_cmd scmd = {
6953 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6954 };
6955 struct iwx_host_cmd cmd = {
6956 .id = IWX_STATISTICS_CMD,
6957 .len[0] = sizeof(scmd),
6958 .data[0] = &scmd,
6959 .flags = IWX_CMD_WANT_RESP,
6960 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
6961 };
6962 int err;
6963
6964 err = iwx_send_cmd(sc, &cmd);
6965 if (err)
6966 return err;
6967
6968 iwx_free_resp(sc, &cmd);
6969 return 0;
6970 }
6971
6972 static int
6973 iwx_scan(struct iwx_softc *sc)
6974 {
6975 int err;
6976 err = iwx_umac_scan_v14(sc, 0);
6977
6978 if (err) {
6979 printf("%s: could not initiate scan\n", DEVNAME(sc));
6980 return err;
6981 }
6982 return 0;
6983 }
6984
6985 static int
6986 iwx_bgscan(struct ieee80211com *ic)
6987 {
6988 struct iwx_softc *sc = ic->ic_softc;
6989 int err;
6990
6991 err = iwx_umac_scan_v14(sc, 1);
6992 if (err) {
6993 printf("%s: could not initiate scan\n", DEVNAME(sc));
6994 return err;
6995 }
6996 return 0;
6997 }
6998
6999 static int
7000 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7001 {
7002 int err;
7003
7004 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7005
7006 /*
7007 * Non-QoS frames use the "MGMT" TID and queue.
7008 * Other TIDs and data queues are reserved for QoS data frames.
7009 */
7010 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7011 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7012 if (err) {
7013 printf("%s: could not enable Tx queue %d (error %d)\n",
7014 DEVNAME(sc), sc->first_data_qid, err);
7015 return err;
7016 }
7017
7018 return 0;
7019 }
7020
7021 static int
7022 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7023 {
7024 int err, cmd_ver;
7025
7026 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7027 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7028 IWX_SCD_QUEUE_CONFIG_CMD);
7029 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7030 return 0;
7031
7032 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7033
7034 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7035 IWX_MGMT_TID);
7036 if (err) {
7037 printf("%s: could not disable Tx queue %d (error %d)\n",
7038 DEVNAME(sc), sc->first_data_qid, err);
7039 return err;
7040 }
7041
7042 return 0;
7043 }
7044
7045 static int
7046 iwx_rs_rval2idx(uint8_t rval)
7047 {
7048 /* Firmware expects indices which match our 11g rate set. */
7049 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7050 int i;
7051
7052 for (i = 0; i < rs->rs_nrates; i++) {
7053 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7054 return i;
7055 }
7056
7057 return -1;
7058 }
7059
7060 static uint16_t
7061 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7062 {
7063 uint16_t htrates = 0;
7064 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7065 int i;
7066
7067 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7068 for (i = 0; i < htrs->rs_nrates; i++) {
7069 if (htrs->rs_rates[i] <= 7)
7070 htrates |= (1 << htrs->rs_rates[i]);
7071 }
7072 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7073 for (i = 0; i < htrs->rs_nrates; i++) {
7074 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7075 htrates |= (1 << (htrs->rs_rates[i] - 8));
7076 }
7077 } else
7078 panic(("iwx_rs_ht_rates"));
7079
7080 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7081 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7082
7083 return htrates;
7084 }
7085
7086 uint16_t
7087 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7088 {
7089 uint16_t rx_mcs;
7090 int max_mcs = -1;
7091 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7092 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7093 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7094 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7095 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7096
7097 switch (rx_mcs) {
7098 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7099 break;
7100 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7101 max_mcs = 7;
7102 break;
7103 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7104 max_mcs = 8;
7105 break;
7106 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7107 /* Disable VHT MCS 9 for 20MHz-only stations. */
7108 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7109 max_mcs = 8;
7110 else
7111 max_mcs = 9;
7112 break;
7113 default:
7114 /* Should not happen; Values above cover the possible range. */
7115 panic("invalid VHT Rx MCS value %u", rx_mcs);
7116 }
7117
7118 return ((1 << (max_mcs + 1)) - 1);
7119 }
7120
7121 static int
7122 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7123 {
7124 #if 1
7125 panic("iwx: Trying to init rate set on untested version");
7126 #else
7127 struct ieee80211_node *ni = &in->in_ni;
7128 struct ieee80211_rateset *rs = &ni->ni_rates;
7129 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7130 uint32_t cmd_id;
7131 int i;
7132 size_t cmd_size = sizeof(cfg_cmd);
7133
7134 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7135
7136 for (i = 0; i < rs->rs_nrates; i++) {
7137 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7138 int idx = iwx_rs_rval2idx(rval);
7139 if (idx == -1)
7140 return EINVAL;
7141 cfg_cmd.non_ht_rates |= (1 << idx);
7142 }
7143
7144 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7145 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7146 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7147 htole16(iwx_rs_vht_rates(sc, ni, 1));
7148 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7149 htole16(iwx_rs_vht_rates(sc, ni, 2));
7150 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7151 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7152 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7153 htole16(iwx_rs_ht_rates(sc, ni,
7154 IEEE80211_HT_RATESET_SISO));
7155 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7156 htole16(iwx_rs_ht_rates(sc, ni,
7157 IEEE80211_HT_RATESET_MIMO2));
7158 } else
7159 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7160
7161 cfg_cmd.sta_id = IWX_STATION_ID;
7162 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7163 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7164 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7165 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7166 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7167 else
7168 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7169 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7170 if (ni->ni_flags & IEEE80211_NODE_VHT)
7171 cfg_cmd.max_mpdu_len = htole16(3895);
7172 else
7173 cfg_cmd.max_mpdu_len = htole16(3839);
7174 if (ni->ni_flags & IEEE80211_NODE_HT) {
7175 if (ieee80211_node_supports_ht_sgi20(ni)) {
7176 cfg_cmd.sgi_ch_width_supp |= (1 <<
7177 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7178 }
7179 if (ieee80211_node_supports_ht_sgi40(ni)) {
7180 cfg_cmd.sgi_ch_width_supp |= (1 <<
7181 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7182 }
7183 }
7184 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7185 ieee80211_node_supports_vht_sgi80(ni))
7186 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7187
7188 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7189 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7190 #endif
7191 }
7192
7193 static int
7194 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7195 {
7196 struct ieee80211_node *ni = &in->in_ni;
7197 struct ieee80211_rateset *rs = &ni->ni_rates;
7198 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7199 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7200 uint32_t cmd_id;
7201 int i;
7202 int sgi80 = 0;
7203 size_t cmd_size = sizeof(cfg_cmd);
7204
7205 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7206
7207 for (i = 0; i < rs->rs_nrates; i++) {
7208 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7209 int idx = iwx_rs_rval2idx(rval);
7210 if (idx == -1)
7211 return EINVAL;
7212 cfg_cmd.non_ht_rates |= (1 << idx);
7213 }
7214 for (i = 0; i < htrs->rs_nrates; i++) {
7215 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7216 }
7217
7218 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7219 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7220 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7221 htole16(iwx_rs_vht_rates(sc, ni, 1));
7222 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7223 htole16(iwx_rs_vht_rates(sc, ni, 2));
7224
7225 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7226 __func__, __LINE__,
7227 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7228 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7229 __func__, __LINE__,
7230 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7231 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7232 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7233 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7234 htole16(iwx_rs_ht_rates(sc, ni,
7235 IEEE80211_HT_RATESET_SISO));
7236 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7237 htole16(iwx_rs_ht_rates(sc, ni,
7238 IEEE80211_HT_RATESET_MIMO2));
7239
7240 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7241 __func__, __LINE__,
7242 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7243 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7244 __func__, __LINE__,
7245 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7246 } else
7247 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7248
7249 cfg_cmd.sta_id = IWX_STATION_ID;
7250 #if 0
7251 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7252 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7253 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7254 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7255 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7256 else
7257 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7258 #endif
7259 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7260 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7261 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7262 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7263 } else {
7264 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7265 }
7266
7267 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7268 if (ni->ni_flags & IEEE80211_NODE_VHT)
7269 cfg_cmd.max_mpdu_len = htole16(3895);
7270 else
7271 cfg_cmd.max_mpdu_len = htole16(3839);
7272 if (ni->ni_flags & IEEE80211_NODE_HT) {
7273 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7274 cfg_cmd.sgi_ch_width_supp |= (1 <<
7275 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7276 }
7277 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7278 cfg_cmd.sgi_ch_width_supp |= (1 <<
7279 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7280 }
7281 }
7282 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7283 IEEE80211_VHTCAP_SHORT_GI_80);
7284 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7285 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7286 }
7287
7288 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7289 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7290 }
7291
7292 static int
7293 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7294 {
7295 int cmd_ver;
7296
7297 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7298 IWX_TLC_MNG_CONFIG_CMD);
7299 if (cmd_ver == 4)
7300 return iwx_rs_init_v4(sc, in);
7301 else
7302 return iwx_rs_init_v3(sc, in);
7303 }
7304
7305 static void
7306 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7307 {
7308 struct ieee80211com *ic = &sc->sc_ic;
7309 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7310 struct ieee80211_node *ni = (void *)vap->iv_bss;
7311
7312 struct ieee80211_rateset *rs = &ni->ni_rates;
7313 uint32_t rate_n_flags;
7314 uint8_t plcp, rval;
7315 int i, cmd_ver, rate_n_flags_ver2 = 0;
7316
7317 if (notif->sta_id != IWX_STATION_ID ||
7318 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7319 return;
7320
7321 rate_n_flags = le32toh(notif->rate);
7322
7323 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7324 print_ratenflags(__func__, __LINE__,
7325 rate_n_flags, sc->sc_rate_n_flags_version);
7326
7327 cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7328 IWX_TLC_MNG_UPDATE_NOTIF);
7329 if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7330 rate_n_flags_ver2 = 1;
7331
7332 if (rate_n_flags_ver2) {
7333 uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7334 if (mod_type == IWX_RATE_MCS_HT_MSK) {
7335
7336 ieee80211_node_set_txrate_dot11rate(ni,
7337 IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
7338 IEEE80211_RATE_MCS);
7339 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7340 "%s:%d new MCS: %d rate_n_flags: %x\n",
7341 __func__, __LINE__,
7342 ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
7343 rate_n_flags);
7344 return;
7345 }
7346 } else {
7347 if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7348 ieee80211_node_set_txrate_dot11rate(ni,
7349 rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7350 IWX_RATE_HT_MCS_NSS_MSK_V1));
7351
7352 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7353 "%s:%d new MCS idx: %d rate_n_flags: %x\n",
7354 __func__, __LINE__,
7355 ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
7356 return;
7357 }
7358 }
7359
7360 if (rate_n_flags_ver2) {
7361 const struct ieee80211_rateset *rs;
7362 uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7363 if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7364 rs = &ieee80211_std_rateset_11a;
7365 else
7366 rs = &ieee80211_std_rateset_11b;
7367 if (ridx < rs->rs_nrates)
7368 rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7369 else
7370 rval = 0;
7371 } else {
7372 plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7373
7374 rval = 0;
7375 for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7376 if (iwx_rates[i].plcp == plcp) {
7377 rval = iwx_rates[i].rate;
7378 break;
7379 }
7380 }
7381 }
7382
7383 if (rval) {
7384 uint8_t rv;
7385 for (i = 0; i < rs->rs_nrates; i++) {
7386 rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7387 if (rv == rval) {
7388 ieee80211_node_set_txrate_dot11rate(ni, i);
7389 break;
7390 }
7391 }
7392 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7393 "%s:%d new rate %d\n", __func__, __LINE__,
7394 ieee80211_node_get_txrate_dot11rate(ni));
7395 }
7396 }
7397
7398 static int
7399 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7400 uint8_t chains_static, uint8_t chains_dynamic)
7401 {
7402 struct iwx_rlc_config_cmd cmd;
7403 uint32_t cmd_id;
7404 uint8_t active_cnt, idle_cnt;
7405
7406 memset(&cmd, 0, sizeof(cmd));
7407
7408 idle_cnt = chains_static;
7409 active_cnt = chains_dynamic;
7410
7411 cmd.phy_id = htole32(phyctxt->id);
7412 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7413 IWX_PHY_RX_CHAIN_VALID_POS);
7414 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7415 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7416 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7417
7418 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7419 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7420 }
7421
7422 static int
7423 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7424 struct ieee80211_channel *chan, uint8_t chains_static,
7425 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7426 uint8_t vht_chan_width)
7427 {
7428 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7429 int err;
7430
7431 if (chan == IEEE80211_CHAN_ANYC) {
7432 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7433 DEVNAME(sc));
7434 return EIO;
7435 }
7436
7437 if (isset(sc->sc_enabled_capa,
7438 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7439 (phyctxt->channel->ic_flags & band_flags) !=
7440 (chan->ic_flags & band_flags)) {
7441 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7442 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7443 vht_chan_width);
7444 if (err) {
7445 printf("%s: could not remove PHY context "
7446 "(error %d)\n", DEVNAME(sc), err);
7447 return err;
7448 }
7449 phyctxt->channel = chan;
7450 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7451 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7452 vht_chan_width);
7453 if (err) {
7454 printf("%s: could not add PHY context "
7455 "(error %d)\n", DEVNAME(sc), err);
7456 return err;
7457 }
7458 } else {
7459 phyctxt->channel = chan;
7460 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7461 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7462 vht_chan_width);
7463 if (err) {
7464 printf("%s: could not update PHY context (error %d)\n",
7465 DEVNAME(sc), err);
7466 return err;
7467 }
7468 }
7469
7470 phyctxt->sco = sco;
7471 phyctxt->vht_chan_width = vht_chan_width;
7472
7473 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7474 phyctxt->channel->ic_ieee));
7475 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7476 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7477 phyctxt->vht_chan_width));
7478
7479 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7480 IWX_RLC_CONFIG_CMD) == 2)
7481 return iwx_phy_send_rlc(sc, phyctxt,
7482 chains_static, chains_dynamic);
7483
7484 return 0;
7485 }
7486
7487 static int
7488 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7489 {
7490 struct ieee80211com *ic = &sc->sc_ic;
7491 struct iwx_node *in;
7492 struct iwx_vap *ivp = IWX_VAP(vap);
7493 struct ieee80211_node *ni;
7494 uint32_t duration;
7495 int generation = sc->sc_generation, err;
7496
7497 IWX_ASSERT_LOCKED(sc);
7498
7499 ni = ieee80211_ref_node(vap->iv_bss);
7500 in = IWX_NODE(ni);
7501
7502 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7503 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7504 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7505 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7506 if (err)
7507 return err;
7508 } else {
7509 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7510 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7511 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7512 if (err)
7513 return err;
7514 }
7515 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7516 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7517 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7518 ether_sprintf(in->in_macaddr)));
7519
7520 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7521 if (err) {
7522 printf("%s: could not add MAC context (error %d)\n",
7523 DEVNAME(sc), err);
7524 return err;
7525 }
7526 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7527
7528 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7529 if (err) {
7530 printf("%s: could not add binding (error %d)\n",
7531 DEVNAME(sc), err);
7532 goto rm_mac_ctxt;
7533 }
7534 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7535
7536 err = iwx_add_sta_cmd(sc, in, 0);
7537 if (err) {
7538 printf("%s: could not add sta (error %d)\n",
7539 DEVNAME(sc), err);
7540 goto rm_binding;
7541 }
7542 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7543
7544 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7545 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7546 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7547 IWX_TX_RING_COUNT);
7548 if (err)
7549 goto rm_sta;
7550 return 0;
7551 }
7552
7553 err = iwx_enable_mgmt_queue(sc);
7554 if (err)
7555 goto rm_sta;
7556
7557 err = iwx_clear_statistics(sc);
7558 if (err)
7559 goto rm_mgmt_queue;
7560
7561 /*
7562 * Prevent the FW from wandering off channel during association
7563 * by "protecting" the session with a time event.
7564 */
7565 if (in->in_ni.ni_intval)
7566 duration = in->in_ni.ni_intval * 9;
7567 else
7568 duration = 900;
7569 return iwx_schedule_session_protection(sc, in, duration);
7570
7571 rm_mgmt_queue:
7572 if (generation == sc->sc_generation)
7573 iwx_disable_mgmt_queue(sc);
7574 rm_sta:
7575 if (generation == sc->sc_generation) {
7576 iwx_rm_sta_cmd(sc, in);
7577 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7578 }
7579 rm_binding:
7580 if (generation == sc->sc_generation) {
7581 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7582 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7583 }
7584 rm_mac_ctxt:
7585 if (generation == sc->sc_generation) {
7586 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7587 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7588 }
7589 return err;
7590 }
7591
7592 static int
7593 iwx_deauth(struct iwx_softc *sc)
7594 {
7595 struct ieee80211com *ic = &sc->sc_ic;
7596 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7597 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7598 int err;
7599
7600 IWX_ASSERT_LOCKED(sc);
7601
7602 iwx_unprotect_session(sc, in);
7603
7604 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7605 err = iwx_rm_sta(sc, in);
7606 if (err)
7607 return err;
7608 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7609 }
7610
7611 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7612 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7613 if (err) {
7614 printf("%s: could not remove binding (error %d)\n",
7615 DEVNAME(sc), err);
7616 return err;
7617 }
7618 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7619 }
7620
7621 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7622 IWX_FLAG_MAC_ACTIVE));
7623 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7624 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7625 if (err) {
7626 printf("%s: could not remove MAC context (error %d)\n",
7627 DEVNAME(sc), err);
7628 return err;
7629 }
7630 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7631 }
7632
7633 /* Move unused PHY context to a default channel. */
7634 //TODO uncommented in obsd, but stays on the way of auth->auth
7635 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7636 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7637 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7638 if (err)
7639 return err;
7640
7641 return 0;
7642 }
7643
7644 static int
7645 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7646 {
7647 struct ieee80211com *ic = &sc->sc_ic;
7648 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7649 struct ieee80211_node *ni = &in->in_ni;
7650 struct iwx_vap *ivp = IWX_VAP(vap);
7651 int err;
7652
7653 IWX_ASSERT_LOCKED(sc);
7654
7655 if (ni->ni_flags & IEEE80211_NODE_HT) {
7656 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7657 uint8_t sco, vht_chan_width;
7658 sco = IEEE80211_HTOP0_SCO_SCN;
7659 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7660 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7661 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7662 else
7663 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7664 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7665 ivp->phy_ctxt->channel, chains, chains,
7666 0, sco, vht_chan_width);
7667 if (err) {
7668 printf("%s: failed to update PHY\n", DEVNAME(sc));
7669 return err;
7670 }
7671 }
7672
7673 /* Update STA again to apply HT and VHT settings. */
7674 err = iwx_add_sta_cmd(sc, in, 1);
7675 if (err) {
7676 printf("%s: could not update STA (error %d)\n",
7677 DEVNAME(sc), err);
7678 return err;
7679 }
7680
7681 /* We have now been assigned an associd by the AP. */
7682 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7683 if (err) {
7684 printf("%s: failed to update MAC\n", DEVNAME(sc));
7685 return err;
7686 }
7687
7688 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7689 if (err) {
7690 printf("%s: could not set sf full on (error %d)\n",
7691 DEVNAME(sc), err);
7692 return err;
7693 }
7694
7695 err = iwx_allow_mcast(sc);
7696 if (err) {
7697 printf("%s: could not allow mcast (error %d)\n",
7698 DEVNAME(sc), err);
7699 return err;
7700 }
7701
7702 err = iwx_power_update_device(sc);
7703 if (err) {
7704 printf("%s: could not send power command (error %d)\n",
7705 DEVNAME(sc), err);
7706 return err;
7707 }
7708 #ifdef notyet
7709 /*
7710 * Disabled for now. Default beacon filter settings
7711 * prevent net80211 from getting ERP and HT protection
7712 * updates from beacons.
7713 */
7714 err = iwx_enable_beacon_filter(sc, in);
7715 if (err) {
7716 printf("%s: could not enable beacon filter\n",
7717 DEVNAME(sc));
7718 return err;
7719 }
7720 #endif
7721 err = iwx_power_mac_update_mode(sc, in);
7722 if (err) {
7723 printf("%s: could not update MAC power (error %d)\n",
7724 DEVNAME(sc), err);
7725 return err;
7726 }
7727
7728 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7729 return 0;
7730
7731 err = iwx_rs_init(sc, in);
7732 if (err) {
7733 printf("%s: could not init rate scaling (error %d)\n",
7734 DEVNAME(sc), err);
7735 return err;
7736 }
7737
7738 return 0;
7739 }
7740
7741 static int
7742 iwx_run_stop(struct iwx_softc *sc)
7743 {
7744 struct ieee80211com *ic = &sc->sc_ic;
7745 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7746 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7747 struct ieee80211_node *ni = &in->in_ni;
7748 int err, i;
7749
7750 IWX_ASSERT_LOCKED(sc);
7751
7752 err = iwx_flush_sta(sc, in);
7753 if (err) {
7754 printf("%s: could not flush Tx path (error %d)\n",
7755 DEVNAME(sc), err);
7756 return err;
7757 }
7758
7759 /*
7760 * Stop Rx BA sessions now. We cannot rely on the BA task
7761 * for this when moving out of RUN state since it runs in a
7762 * separate thread.
7763 * Note that in->in_ni (struct ieee80211_node) already represents
7764 * our new access point in case we are roaming between APs.
7765 * This means we cannot rely on struct ieee802111_node to tell
7766 * us which BA sessions exist.
7767 */
7768 // TODO agg
7769 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7770 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7771 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7772 continue;
7773 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7774 }
7775
7776 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7777 if (err)
7778 return err;
7779
7780 err = iwx_disable_beacon_filter(sc);
7781 if (err) {
7782 printf("%s: could not disable beacon filter (error %d)\n",
7783 DEVNAME(sc), err);
7784 return err;
7785 }
7786
7787 /* Mark station as disassociated. */
7788 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7789 if (err) {
7790 printf("%s: failed to update MAC\n", DEVNAME(sc));
7791 return err;
7792 }
7793
7794 return 0;
7795 }
7796
7797 static struct ieee80211_node *
7798 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7799 {
7800 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7801 M_NOWAIT | M_ZERO);
7802 }
7803
7804 #if 0
7805 int
7806 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7807 struct ieee80211_key *k)
7808 {
7809 struct iwx_softc *sc = ic->ic_softc;
7810 struct iwx_node *in = (void *)ni;
7811 struct iwx_setkey_task_arg *a;
7812 int err;
7813
7814 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7815 /* Fallback to software crypto for other ciphers. */
7816 err = ieee80211_set_key(ic, ni, k);
7817 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7818 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7819 return err;
7820 }
7821
7822 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7823 return ENOSPC;
7824
7825 a = &sc->setkey_arg[sc->setkey_cur];
7826 a->sta_id = IWX_STATION_ID;
7827 a->ni = ni;
7828 a->k = k;
7829 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7830 sc->setkey_nkeys++;
7831 iwx_add_task(sc, systq, &sc->setkey_task);
7832 return EBUSY;
7833 }
7834
7835 int
7836 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7837 struct ieee80211_key *k)
7838 {
7839 struct ieee80211com *ic = &sc->sc_ic;
7840 struct iwx_node *in = (void *)ni;
7841 struct iwx_add_sta_key_cmd cmd;
7842 uint32_t status;
7843 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7844 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7845 int err;
7846
7847 /*
7848 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7849 * Currently we only implement station mode where 'ni' is always
7850 * ic->ic_bss so there is no need to validate arguments beyond this:
7851 */
7852 KASSERT(ni == ic->ic_bss);
7853
7854 memset(&cmd, 0, sizeof(cmd));
7855
7856 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7857 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7858 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7859 IWX_STA_KEY_FLG_KEYID_MSK));
7860 if (k->k_flags & IEEE80211_KEY_GROUP) {
7861 cmd.common.key_offset = 1;
7862 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7863 } else
7864 cmd.common.key_offset = 0;
7865
7866 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7867 cmd.common.sta_id = sta_id;
7868
7869 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7870
7871 status = IWX_ADD_STA_SUCCESS;
7872 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7873 &status);
7874 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7875 return ECANCELED;
7876 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7877 err = EIO;
7878 if (err) {
7879 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7880 IEEE80211_REASON_AUTH_LEAVE);
7881 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7882 return err;
7883 }
7884
7885 if (k->k_flags & IEEE80211_KEY_GROUP)
7886 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7887 else
7888 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7889
7890 if ((in->in_flags & want_keymask) == want_keymask) {
7891 DPRINTF(("marking port %s valid\n",
7892 ether_sprintf(ni->ni_macaddr)));
7893 ni->ni_port_valid = 1;
7894 ieee80211_set_link_state(ic, LINK_STATE_UP);
7895 }
7896
7897 return 0;
7898 }
7899
7900 void
7901 iwx_setkey_task(void *arg)
7902 {
7903 struct iwx_softc *sc = arg;
7904 struct iwx_setkey_task_arg *a;
7905 int err = 0, s = splnet();
7906
7907 while (sc->setkey_nkeys > 0) {
7908 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7909 break;
7910 a = &sc->setkey_arg[sc->setkey_tail];
7911 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7912 a->sta_id = 0;
7913 a->ni = NULL;
7914 a->k = NULL;
7915 sc->setkey_tail = (sc->setkey_tail + 1) %
7916 nitems(sc->setkey_arg);
7917 sc->setkey_nkeys--;
7918 }
7919
7920 refcnt_rele_wake(&sc->task_refs);
7921 splx(s);
7922 }
7923
7924 void
7925 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7926 struct ieee80211_key *k)
7927 {
7928 struct iwx_softc *sc = ic->ic_softc;
7929 struct iwx_add_sta_key_cmd cmd;
7930
7931 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7932 /* Fallback to software crypto for other ciphers. */
7933 ieee80211_delete_key(ic, ni, k);
7934 return;
7935 }
7936
7937 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7938 return;
7939
7940 memset(&cmd, 0, sizeof(cmd));
7941
7942 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7943 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7944 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7945 IWX_STA_KEY_FLG_KEYID_MSK));
7946 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7947 if (k->k_flags & IEEE80211_KEY_GROUP)
7948 cmd.common.key_offset = 1;
7949 else
7950 cmd.common.key_offset = 0;
7951 cmd.common.sta_id = IWX_STATION_ID;
7952
7953 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7954 }
7955 #endif
7956
7957 static int
7958 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
7959 {
7960 struct ieee80211com *ic = vap->iv_ic;
7961 struct iwx_softc *sc = ic->ic_softc;
7962 enum ieee80211_state ostate = vap->iv_state;
7963 int err = 0;
7964
7965 IWX_LOCK(sc);
7966
7967 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
7968 switch (ostate) {
7969 case IEEE80211_S_RUN:
7970 err = iwx_run_stop(sc);
7971 if (err)
7972 goto out;
7973 /* FALLTHROUGH */
7974 case IEEE80211_S_ASSOC:
7975 case IEEE80211_S_AUTH:
7976 if (nstate <= IEEE80211_S_AUTH) {
7977 err = iwx_deauth(sc);
7978 if (err)
7979 goto out;
7980 }
7981 /* FALLTHROUGH */
7982 case IEEE80211_S_SCAN:
7983 case IEEE80211_S_INIT:
7984 default:
7985 break;
7986 }
7987 //
7988 // /* Die now if iwx_stop() was called while we were sleeping. */
7989 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7990 // refcnt_rele_wake(&sc->task_refs);
7991 // splx(s);
7992 // return;
7993 // }
7994 }
7995
7996 switch (nstate) {
7997 case IEEE80211_S_INIT:
7998 break;
7999
8000 case IEEE80211_S_SCAN:
8001 break;
8002
8003 case IEEE80211_S_AUTH:
8004 err = iwx_auth(vap, sc);
8005 break;
8006
8007 case IEEE80211_S_ASSOC:
8008 break;
8009
8010 case IEEE80211_S_RUN:
8011 err = iwx_run(vap, sc);
8012 break;
8013 default:
8014 break;
8015 }
8016
8017 out:
8018 IWX_UNLOCK(sc);
8019
8020 return (err);
8021 }
8022
8023 static int
8024 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8025 {
8026 struct iwx_vap *ivp = IWX_VAP(vap);
8027 struct ieee80211com *ic = vap->iv_ic;
8028 enum ieee80211_state ostate = vap->iv_state;
8029 int err;
8030
8031 /*
8032 * Prevent attempts to transition towards the same state, unless
8033 * we are scanning in which case a SCAN -> SCAN transition
8034 * triggers another scan iteration. And AUTH -> AUTH is needed
8035 * to support band-steering.
8036 */
8037 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8038 nstate != IEEE80211_S_AUTH)
8039 return 0;
8040 IEEE80211_UNLOCK(ic);
8041 err = iwx_newstate_sub(vap, nstate);
8042 IEEE80211_LOCK(ic);
8043 if (err == 0)
8044 err = ivp->iv_newstate(vap, nstate, arg);
8045
8046 return (err);
8047 }
8048
8049 static void
8050 iwx_endscan(struct iwx_softc *sc)
8051 {
8052 struct ieee80211com *ic = &sc->sc_ic;
8053 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8054
8055 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8056 return;
8057
8058 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8059
8060 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8061 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8062 }
8063
8064 /*
8065 * Aging and idle timeouts for the different possible scenarios
8066 * in default configuration
8067 */
8068 static const uint32_t
8069 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8070 {
8071 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8072 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8073 },
8074 {
8075 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8076 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8077 },
8078 {
8079 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8080 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8081 },
8082 {
8083 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8084 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8085 },
8086 {
8087 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8088 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8089 },
8090 };
8091
8092 /*
8093 * Aging and idle timeouts for the different possible scenarios
8094 * in single BSS MAC configuration.
8095 */
8096 static const uint32_t
8097 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8098 {
8099 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8100 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8101 },
8102 {
8103 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8104 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8105 },
8106 {
8107 htole32(IWX_SF_MCAST_AGING_TIMER),
8108 htole32(IWX_SF_MCAST_IDLE_TIMER)
8109 },
8110 {
8111 htole32(IWX_SF_BA_AGING_TIMER),
8112 htole32(IWX_SF_BA_IDLE_TIMER)
8113 },
8114 {
8115 htole32(IWX_SF_TX_RE_AGING_TIMER),
8116 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8117 },
8118 };
8119
8120 static void
8121 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8122 struct ieee80211_node *ni)
8123 {
8124 int i, j, watermark;
8125
8126 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8127
8128 /*
8129 * If we are in association flow - check antenna configuration
8130 * capabilities of the AP station, and choose the watermark accordingly.
8131 */
8132 if (ni) {
8133 if (ni->ni_flags & IEEE80211_NODE_HT) {
8134 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8135 int hasmimo = 0;
8136 for (i = 0; i < htrs->rs_nrates; i++) {
8137 if (htrs->rs_rates[i] > 7) {
8138 hasmimo = 1;
8139 break;
8140 }
8141 }
8142 if (hasmimo)
8143 watermark = IWX_SF_W_MARK_MIMO2;
8144 else
8145 watermark = IWX_SF_W_MARK_SISO;
8146 } else {
8147 watermark = IWX_SF_W_MARK_LEGACY;
8148 }
8149 /* default watermark value for unassociated mode. */
8150 } else {
8151 watermark = IWX_SF_W_MARK_MIMO2;
8152 }
8153 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8154
8155 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8156 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8157 sf_cmd->long_delay_timeouts[i][j] =
8158 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8159 }
8160 }
8161
8162 if (ni) {
8163 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8164 sizeof(iwx_sf_full_timeout));
8165 } else {
8166 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8167 sizeof(iwx_sf_full_timeout_def));
8168 }
8169
8170 }
8171
8172 static int
8173 iwx_sf_config(struct iwx_softc *sc, int new_state)
8174 {
8175 struct ieee80211com *ic = &sc->sc_ic;
8176 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8177 struct ieee80211_node *ni = vap->iv_bss;
8178 struct iwx_sf_cfg_cmd sf_cmd = {
8179 .state = htole32(new_state),
8180 };
8181 int err = 0;
8182
8183 switch (new_state) {
8184 case IWX_SF_UNINIT:
8185 case IWX_SF_INIT_OFF:
8186 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8187 break;
8188 case IWX_SF_FULL_ON:
8189 iwx_fill_sf_command(sc, &sf_cmd, ni);
8190 break;
8191 default:
8192 return EINVAL;
8193 }
8194
8195 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8196 sizeof(sf_cmd), &sf_cmd);
8197 return err;
8198 }
8199
8200 static int
8201 iwx_send_bt_init_conf(struct iwx_softc *sc)
8202 {
8203 struct iwx_bt_coex_cmd bt_cmd;
8204
8205 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8206
8207 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8208 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8209 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8210
8211
8212 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8213 &bt_cmd);
8214 }
8215
8216 static int
8217 iwx_send_soc_conf(struct iwx_softc *sc)
8218 {
8219 struct iwx_soc_configuration_cmd cmd;
8220 int err;
8221 uint32_t cmd_id, flags = 0;
8222
8223 memset(&cmd, 0, sizeof(cmd));
8224
8225 /*
8226 * In VER_1 of this command, the discrete value is considered
8227 * an integer; In VER_2, it's a bitmask. Since we have only 2
8228 * values in VER_1, this is backwards-compatible with VER_2,
8229 * as long as we don't set any other flag bits.
8230 */
8231 if (!sc->sc_integrated) { /* VER_1 */
8232 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8233 } else { /* VER_2 */
8234 uint8_t scan_cmd_ver;
8235 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8236 flags |= (sc->sc_ltr_delay &
8237 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8238 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8239 IWX_SCAN_REQ_UMAC);
8240 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8241 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8242 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8243 }
8244 cmd.flags = htole32(flags);
8245
8246 cmd.latency = htole32(sc->sc_xtal_latency);
8247
8248 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8249 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8250 if (err)
8251 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8252 return err;
8253 }
8254
8255 static int
8256 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8257 {
8258 struct iwx_mcc_update_cmd mcc_cmd;
8259 struct iwx_host_cmd hcmd = {
8260 .id = IWX_MCC_UPDATE_CMD,
8261 .flags = IWX_CMD_WANT_RESP,
8262 .data = { &mcc_cmd },
8263 };
8264 struct iwx_rx_packet *pkt;
8265 struct iwx_mcc_update_resp *resp;
8266 size_t resp_len;
8267 int err;
8268
8269 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8270 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8271 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8272 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8273 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8274 else
8275 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8276
8277 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8278 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8279
8280 err = iwx_send_cmd(sc, &hcmd);
8281 if (err)
8282 return err;
8283
8284 pkt = hcmd.resp_pkt;
8285 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8286 err = EIO;
8287 goto out;
8288 }
8289
8290 resp_len = iwx_rx_packet_payload_len(pkt);
8291 if (resp_len < sizeof(*resp)) {
8292 err = EIO;
8293 goto out;
8294 }
8295
8296 resp = (void *)pkt->data;
8297 if (resp_len != sizeof(*resp) +
8298 resp->n_channels * sizeof(resp->channels[0])) {
8299 err = EIO;
8300 goto out;
8301 }
8302
8303 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8304 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8305
8306 out:
8307 iwx_free_resp(sc, &hcmd);
8308
8309 return err;
8310 }
8311
8312 static int
8313 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8314 {
8315 struct iwx_temp_report_ths_cmd cmd;
8316 int err;
8317
8318 /*
8319 * In order to give responsibility for critical-temperature-kill
8320 * and TX backoff to FW we need to send an empty temperature
8321 * reporting command at init time.
8322 */
8323 memset(&cmd, 0, sizeof(cmd));
8324
8325 err = iwx_send_cmd_pdu(sc,
8326 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8327 0, sizeof(cmd), &cmd);
8328 if (err)
8329 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8330 DEVNAME(sc), err);
8331
8332 return err;
8333 }
8334
8335 static int
8336 iwx_init_hw(struct iwx_softc *sc)
8337 {
8338 struct ieee80211com *ic = &sc->sc_ic;
8339 int err = 0, i;
8340
8341 err = iwx_run_init_mvm_ucode(sc, 0);
8342 if (err)
8343 return err;
8344
8345 if (!iwx_nic_lock(sc))
8346 return EBUSY;
8347
8348 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8349 if (err) {
8350 printf("%s: could not init tx ant config (error %d)\n",
8351 DEVNAME(sc), err);
8352 goto err;
8353 }
8354
8355 if (sc->sc_tx_with_siso_diversity) {
8356 err = iwx_send_phy_cfg_cmd(sc);
8357 if (err) {
8358 printf("%s: could not send phy config (error %d)\n",
8359 DEVNAME(sc), err);
8360 goto err;
8361 }
8362 }
8363
8364 err = iwx_send_bt_init_conf(sc);
8365 if (err) {
8366 printf("%s: could not init bt coex (error %d)\n",
8367 DEVNAME(sc), err);
8368 return err;
8369 }
8370
8371 err = iwx_send_soc_conf(sc);
8372 if (err) {
8373 printf("%s: iwx_send_soc_conf failed\n", __func__);
8374 return err;
8375 }
8376
8377 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8378 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8379 err = iwx_send_dqa_cmd(sc);
8380 if (err) {
8381 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8382 "failed (error %d)\n", __func__, err);
8383 return err;
8384 }
8385 }
8386 // TODO phyctxt
8387 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8388 /*
8389 * The channel used here isn't relevant as it's
8390 * going to be overwritten in the other flows.
8391 * For now use the first channel we have.
8392 */
8393 sc->sc_phyctxt[i].id = i;
8394 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8395 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8396 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8397 if (err) {
8398 printf("%s: could not add phy context %d (error %d)\n",
8399 DEVNAME(sc), i, err);
8400 goto err;
8401 }
8402 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8403 IWX_RLC_CONFIG_CMD) == 2) {
8404 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8405 if (err) {
8406 printf("%s: could not configure RLC for PHY "
8407 "%d (error %d)\n", DEVNAME(sc), i, err);
8408 goto err;
8409 }
8410 }
8411 }
8412
8413 err = iwx_config_ltr(sc);
8414 if (err) {
8415 printf("%s: PCIe LTR configuration failed (error %d)\n",
8416 DEVNAME(sc), err);
8417 }
8418
8419 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8420 err = iwx_send_temp_report_ths_cmd(sc);
8421 if (err) {
8422 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8423 __func__);
8424 goto err;
8425 }
8426 }
8427
8428 err = iwx_power_update_device(sc);
8429 if (err) {
8430 printf("%s: could not send power command (error %d)\n",
8431 DEVNAME(sc), err);
8432 goto err;
8433 }
8434
8435 if (sc->sc_nvm.lar_enabled) {
8436 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8437 if (err) {
8438 printf("%s: could not init LAR (error %d)\n",
8439 DEVNAME(sc), err);
8440 goto err;
8441 }
8442 }
8443
8444 err = iwx_config_umac_scan_reduced(sc);
8445 if (err) {
8446 printf("%s: could not configure scan (error %d)\n",
8447 DEVNAME(sc), err);
8448 goto err;
8449 }
8450
8451 err = iwx_disable_beacon_filter(sc);
8452 if (err) {
8453 printf("%s: could not disable beacon filter (error %d)\n",
8454 DEVNAME(sc), err);
8455 goto err;
8456 }
8457
8458 err:
8459 iwx_nic_unlock(sc);
8460 return err;
8461 }
8462
8463 /* Allow multicast from our BSSID. */
8464 static int
8465 iwx_allow_mcast(struct iwx_softc *sc)
8466 {
8467 struct ieee80211com *ic = &sc->sc_ic;
8468 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8469 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8470 struct iwx_mcast_filter_cmd *cmd;
8471 size_t size;
8472 int err;
8473
8474 size = roundup(sizeof(*cmd), 4);
8475 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8476 if (cmd == NULL)
8477 return ENOMEM;
8478 cmd->filter_own = 1;
8479 cmd->port_id = 0;
8480 cmd->count = 0;
8481 cmd->pass_all = 1;
8482 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8483
8484 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8485 0, size, cmd);
8486 free(cmd, M_DEVBUF);
8487 return err;
8488 }
8489
8490 static int
8491 iwx_init(struct iwx_softc *sc)
8492 {
8493 int err, generation;
8494 generation = ++sc->sc_generation;
8495 iwx_preinit(sc);
8496
8497 err = iwx_start_hw(sc);
8498 if (err) {
8499 printf("%s: iwx_start_hw failed\n", __func__);
8500 return err;
8501 }
8502
8503 err = iwx_init_hw(sc);
8504 if (err) {
8505 if (generation == sc->sc_generation)
8506 iwx_stop_device(sc);
8507 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8508 return err;
8509 }
8510
8511 sc->sc_flags |= IWX_FLAG_HW_INITED;
8512 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8513
8514 return 0;
8515 }
8516
8517 static void
8518 iwx_start(struct iwx_softc *sc)
8519 {
8520 struct ieee80211_node *ni;
8521 struct mbuf *m;
8522
8523 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8524 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8525 if (iwx_tx(sc, m, ni) != 0) {
8526 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8527 continue;
8528 }
8529 }
8530 }
8531
8532 static void
8533 iwx_stop(struct iwx_softc *sc)
8534 {
8535 struct ieee80211com *ic = &sc->sc_ic;
8536 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8537 struct iwx_vap *ivp = IWX_VAP(vap);
8538
8539 iwx_stop_device(sc);
8540
8541 /* Reset soft state. */
8542 sc->sc_generation++;
8543 ivp->phy_ctxt = NULL;
8544
8545 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8546 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8547 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8548 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8549 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8550 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8551 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8552 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8553
8554 sc->sc_rx_ba_sessions = 0;
8555 sc->ba_rx.start_tidmask = 0;
8556 sc->ba_rx.stop_tidmask = 0;
8557 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8558 sc->ba_tx.start_tidmask = 0;
8559 sc->ba_tx.stop_tidmask = 0;
8560 }
8561
8562 static void
8563 iwx_watchdog(void *arg)
8564 {
8565 struct iwx_softc *sc = arg;
8566 struct ieee80211com *ic = &sc->sc_ic;
8567 int i;
8568
8569 /*
8570 * We maintain a separate timer for each Tx queue because
8571 * Tx aggregation queues can get "stuck" while other queues
8572 * keep working. The Linux driver uses a similar workaround.
8573 */
8574 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8575 if (sc->sc_tx_timer[i] > 0) {
8576 if (--sc->sc_tx_timer[i] == 0) {
8577 printf("%s: device timeout\n", DEVNAME(sc));
8578
8579 iwx_nic_error(sc);
8580 iwx_dump_driver_status(sc);
8581 ieee80211_restart_all(ic);
8582 return;
8583 }
8584 }
8585 }
8586 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8587 }
8588
8589 /*
8590 * Note: This structure is read from the device with IO accesses,
8591 * and the reading already does the endian conversion. As it is
8592 * read with uint32_t-sized accesses, any members with a different size
8593 * need to be ordered correctly though!
8594 */
8595 struct iwx_error_event_table {
8596 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8597 uint32_t error_id; /* type of error */
8598 uint32_t trm_hw_status0; /* TRM HW status */
8599 uint32_t trm_hw_status1; /* TRM HW status */
8600 uint32_t blink2; /* branch link */
8601 uint32_t ilink1; /* interrupt link */
8602 uint32_t ilink2; /* interrupt link */
8603 uint32_t data1; /* error-specific data */
8604 uint32_t data2; /* error-specific data */
8605 uint32_t data3; /* error-specific data */
8606 uint32_t bcon_time; /* beacon timer */
8607 uint32_t tsf_low; /* network timestamp function timer */
8608 uint32_t tsf_hi; /* network timestamp function timer */
8609 uint32_t gp1; /* GP1 timer register */
8610 uint32_t gp2; /* GP2 timer register */
8611 uint32_t fw_rev_type; /* firmware revision type */
8612 uint32_t major; /* uCode version major */
8613 uint32_t minor; /* uCode version minor */
8614 uint32_t hw_ver; /* HW Silicon version */
8615 uint32_t brd_ver; /* HW board version */
8616 uint32_t log_pc; /* log program counter */
8617 uint32_t frame_ptr; /* frame pointer */
8618 uint32_t stack_ptr; /* stack pointer */
8619 uint32_t hcmd; /* last host command header */
8620 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8621 * rxtx_flag */
8622 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8623 * host_flag */
8624 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8625 * enc_flag */
8626 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8627 * time_flag */
8628 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8629 * wico interrupt */
8630 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8631 uint32_t wait_event; /* wait event() caller address */
8632 uint32_t l2p_control; /* L2pControlField */
8633 uint32_t l2p_duration; /* L2pDurationField */
8634 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8635 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8636 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8637 * (LMPM_PMG_SEL) */
8638 uint32_t u_timestamp; /* indicate when the date and time of the
8639 * compilation */
8640 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8641 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8642
8643 /*
8644 * UMAC error struct - relevant starting from family 8000 chip.
8645 * Note: This structure is read from the device with IO accesses,
8646 * and the reading already does the endian conversion. As it is
8647 * read with u32-sized accesses, any members with a different size
8648 * need to be ordered correctly though!
8649 */
8650 struct iwx_umac_error_event_table {
8651 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8652 uint32_t error_id; /* type of error */
8653 uint32_t blink1; /* branch link */
8654 uint32_t blink2; /* branch link */
8655 uint32_t ilink1; /* interrupt link */
8656 uint32_t ilink2; /* interrupt link */
8657 uint32_t data1; /* error-specific data */
8658 uint32_t data2; /* error-specific data */
8659 uint32_t data3; /* error-specific data */
8660 uint32_t umac_major;
8661 uint32_t umac_minor;
8662 uint32_t frame_pointer; /* core register 27*/
8663 uint32_t stack_pointer; /* core register 28 */
8664 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8665 uint32_t nic_isr_pref; /* ISR status register */
8666 } __packed;
8667
8668 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8669 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8670
8671 static void
8672 iwx_nic_umac_error(struct iwx_softc *sc)
8673 {
8674 struct iwx_umac_error_event_table table;
8675 uint32_t base;
8676
8677 base = sc->sc_uc.uc_umac_error_event_table;
8678
8679 if (base < 0x400000) {
8680 printf("%s: Invalid error log pointer 0x%08x\n",
8681 DEVNAME(sc), base);
8682 return;
8683 }
8684
8685 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8686 printf("%s: reading errlog failed\n", DEVNAME(sc));
8687 return;
8688 }
8689
8690 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8691 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8692 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8693 sc->sc_flags, table.valid);
8694 }
8695
8696 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8697 iwx_desc_lookup(table.error_id));
8698 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8699 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8700 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8701 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8702 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8703 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8704 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8705 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8706 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8707 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8708 table.frame_pointer);
8709 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8710 table.stack_pointer);
8711 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8712 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8713 table.nic_isr_pref);
8714 }
8715
8716 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8717 static struct {
8718 const char *name;
8719 uint8_t num;
8720 } advanced_lookup[] = {
8721 { "NMI_INTERRUPT_WDG", 0x34 },
8722 { "SYSASSERT", 0x35 },
8723 { "UCODE_VERSION_MISMATCH", 0x37 },
8724 { "BAD_COMMAND", 0x38 },
8725 { "BAD_COMMAND", 0x39 },
8726 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8727 { "FATAL_ERROR", 0x3D },
8728 { "NMI_TRM_HW_ERR", 0x46 },
8729 { "NMI_INTERRUPT_TRM", 0x4C },
8730 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8731 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8732 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8733 { "NMI_INTERRUPT_HOST", 0x66 },
8734 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8735 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8736 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8737 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8738 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8739 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8740 { "ADVANCED_SYSASSERT", 0 },
8741 };
8742
8743 static const char *
8744 iwx_desc_lookup(uint32_t num)
8745 {
8746 int i;
8747
8748 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8749 if (advanced_lookup[i].num ==
8750 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8751 return advanced_lookup[i].name;
8752
8753 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8754 return advanced_lookup[i].name;
8755 }
8756
8757 /*
8758 * Support for dumping the error log seemed like a good idea ...
8759 * but it's mostly hex junk and the only sensible thing is the
8760 * hw/ucode revision (which we know anyway). Since it's here,
8761 * I'll just leave it in, just in case e.g. the Intel guys want to
8762 * help us decipher some "ADVANCED_SYSASSERT" later.
8763 */
8764 static void
8765 iwx_nic_error(struct iwx_softc *sc)
8766 {
8767 struct iwx_error_event_table table;
8768 uint32_t base;
8769
8770 printf("%s: dumping device error log\n", DEVNAME(sc));
8771 printf("%s: GOS-3758: 1\n", __func__);
8772 base = sc->sc_uc.uc_lmac_error_event_table[0];
8773 printf("%s: GOS-3758: 2\n", __func__);
8774 if (base < 0x400000) {
8775 printf("%s: Invalid error log pointer 0x%08x\n",
8776 DEVNAME(sc), base);
8777 return;
8778 }
8779
8780 printf("%s: GOS-3758: 3\n", __func__);
8781 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8782 printf("%s: reading errlog failed\n", DEVNAME(sc));
8783 return;
8784 }
8785
8786 printf("%s: GOS-3758: 4\n", __func__);
8787 if (!table.valid) {
8788 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8789 return;
8790 }
8791
8792 printf("%s: GOS-3758: 5\n", __func__);
8793 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8794 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8795 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8796 sc->sc_flags, table.valid);
8797 }
8798
8799 printf("%s: GOS-3758: 6\n", __func__);
8800 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8801 iwx_desc_lookup(table.error_id));
8802 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8803 table.trm_hw_status0);
8804 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8805 table.trm_hw_status1);
8806 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8807 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8808 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8809 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8810 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8811 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8812 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8813 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8814 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8815 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8816 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8817 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8818 table.fw_rev_type);
8819 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8820 table.major);
8821 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8822 table.minor);
8823 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8824 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8825 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8826 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8827 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8828 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8829 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8830 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8831 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8832 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8833 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8834 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8835 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8836 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8837 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8838 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8839 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8840
8841 if (sc->sc_uc.uc_umac_error_event_table)
8842 iwx_nic_umac_error(sc);
8843 }
8844
8845 static void
8846 iwx_dump_driver_status(struct iwx_softc *sc)
8847 {
8848 struct ieee80211com *ic = &sc->sc_ic;
8849 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8850 enum ieee80211_state state = vap->iv_state;
8851 int i;
8852
8853 printf("driver status:\n");
8854 for (i = 0; i < nitems(sc->txq); i++) {
8855 struct iwx_tx_ring *ring = &sc->txq[i];
8856 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8857 "cur_hw=%-3d queued=%-3d\n",
8858 i, ring->qid, ring->cur, ring->cur_hw,
8859 ring->queued);
8860 }
8861 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8862 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8863 }
8864
8865 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8866 do { \
8867 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8868 _var_ = (void *)((_pkt_)+1); \
8869 } while (/*CONSTCOND*/0)
8870
8871 static int
8872 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8873 {
8874 int qid, idx, code;
8875
8876 qid = pkt->hdr.qid & ~0x80;
8877 idx = pkt->hdr.idx;
8878 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8879
8880 return (!(qid == 0 && idx == 0 && code == 0) &&
8881 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8882 }
8883
8884 static void
8885 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8886 {
8887 struct ieee80211com *ic = &sc->sc_ic;
8888 struct iwx_rx_packet *pkt, *nextpkt;
8889 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8890 struct mbuf *m0, *m;
8891 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8892 int qid, idx, code, handled = 1;
8893
8894 m0 = data->m;
8895 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8896 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8897 qid = pkt->hdr.qid;
8898 idx = pkt->hdr.idx;
8899 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8900
8901 if (!iwx_rx_pkt_valid(pkt))
8902 break;
8903
8904 /*
8905 * XXX Intel inside (tm)
8906 * Any commands in the LONG_GROUP could actually be in the
8907 * LEGACY group. Firmware API versions >= 50 reject commands
8908 * in group 0, forcing us to use this hack.
8909 */
8910 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8911 struct iwx_tx_ring *ring = &sc->txq[qid];
8912 struct iwx_tx_data *txdata = &ring->data[idx];
8913 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8914 code = iwx_cmd_opcode(code);
8915 }
8916
8917 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8918 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8919 break;
8920
8921 // TODO ???
8922 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8923 /* Take mbuf m0 off the RX ring. */
8924 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8925 break;
8926 }
8927 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8928 }
8929
8930 switch (code) {
8931 case IWX_REPLY_RX_PHY_CMD:
8932 /* XXX-THJ: I've not managed to hit this path in testing */
8933 iwx_rx_rx_phy_cmd(sc, pkt, data);
8934 break;
8935
8936 case IWX_REPLY_RX_MPDU_CMD: {
8937 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8938 nextoff = offset +
8939 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8940 nextpkt = (struct iwx_rx_packet *)
8941 (m0->m_data + nextoff);
8942 /* AX210 devices ship only one packet per Rx buffer. */
8943 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
8944 nextoff + minsz >= IWX_RBUF_SIZE ||
8945 !iwx_rx_pkt_valid(nextpkt)) {
8946 /* No need to copy last frame in buffer. */
8947 if (offset > 0)
8948 m_adj(m0, offset);
8949 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
8950 m0 = NULL; /* stack owns m0 now; abort loop */
8951 } else {
8952 /*
8953 * Create an mbuf which points to the current
8954 * packet. Always copy from offset zero to
8955 * preserve m_pkthdr.
8956 */
8957 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
8958 if (m == NULL) {
8959 m_freem(m0);
8960 m0 = NULL;
8961 break;
8962 }
8963 m_adj(m, offset);
8964 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
8965 }
8966 break;
8967 }
8968
8969 // case IWX_BAR_FRAME_RELEASE:
8970 // iwx_rx_bar_frame_release(sc, pkt, ml);
8971 // break;
8972 //
8973 case IWX_TX_CMD:
8974 iwx_rx_tx_cmd(sc, pkt, data);
8975 break;
8976
8977 case IWX_BA_NOTIF:
8978 iwx_rx_compressed_ba(sc, pkt);
8979 break;
8980
8981 case IWX_MISSED_BEACONS_NOTIFICATION:
8982 iwx_rx_bmiss(sc, pkt, data);
8983 DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
8984 __func__));
8985 ieee80211_beacon_miss(ic);
8986 break;
8987
8988 case IWX_MFUART_LOAD_NOTIFICATION:
8989 break;
8990
8991 case IWX_ALIVE: {
8992 struct iwx_alive_resp_v4 *resp4;
8993 struct iwx_alive_resp_v5 *resp5;
8994 struct iwx_alive_resp_v6 *resp6;
8995
8996 DPRINTF(("%s: firmware alive\n", __func__));
8997 sc->sc_uc.uc_ok = 0;
8998
8999 /*
9000 * For v5 and above, we can check the version, for older
9001 * versions we need to check the size.
9002 */
9003 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9004 IWX_ALIVE) == 6) {
9005 SYNC_RESP_STRUCT(resp6, pkt);
9006 if (iwx_rx_packet_payload_len(pkt) !=
9007 sizeof(*resp6)) {
9008 sc->sc_uc.uc_intr = 1;
9009 wakeup(&sc->sc_uc);
9010 break;
9011 }
9012 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9013 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9014 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9015 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9016 sc->sc_uc.uc_log_event_table = le32toh(
9017 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9018 sc->sc_uc.uc_umac_error_event_table = le32toh(
9019 resp6->umac_data.dbg_ptrs.error_info_addr);
9020 sc->sc_sku_id[0] =
9021 le32toh(resp6->sku_id.data[0]);
9022 sc->sc_sku_id[1] =
9023 le32toh(resp6->sku_id.data[1]);
9024 sc->sc_sku_id[2] =
9025 le32toh(resp6->sku_id.data[2]);
9026 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9027 sc->sc_uc.uc_ok = 1;
9028 }
9029 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9030 IWX_ALIVE) == 5) {
9031 SYNC_RESP_STRUCT(resp5, pkt);
9032 if (iwx_rx_packet_payload_len(pkt) !=
9033 sizeof(*resp5)) {
9034 sc->sc_uc.uc_intr = 1;
9035 wakeup(&sc->sc_uc);
9036 break;
9037 }
9038 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9039 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9040 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9041 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9042 sc->sc_uc.uc_log_event_table = le32toh(
9043 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9044 sc->sc_uc.uc_umac_error_event_table = le32toh(
9045 resp5->umac_data.dbg_ptrs.error_info_addr);
9046 sc->sc_sku_id[0] =
9047 le32toh(resp5->sku_id.data[0]);
9048 sc->sc_sku_id[1] =
9049 le32toh(resp5->sku_id.data[1]);
9050 sc->sc_sku_id[2] =
9051 le32toh(resp5->sku_id.data[2]);
9052 if (resp5->status == IWX_ALIVE_STATUS_OK)
9053 sc->sc_uc.uc_ok = 1;
9054 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9055 SYNC_RESP_STRUCT(resp4, pkt);
9056 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9057 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9058 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9059 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9060 sc->sc_uc.uc_log_event_table = le32toh(
9061 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9062 sc->sc_uc.uc_umac_error_event_table = le32toh(
9063 resp4->umac_data.dbg_ptrs.error_info_addr);
9064 if (resp4->status == IWX_ALIVE_STATUS_OK)
9065 sc->sc_uc.uc_ok = 1;
9066 } else
9067 printf("unknown payload version");
9068
9069 sc->sc_uc.uc_intr = 1;
9070 wakeup(&sc->sc_uc);
9071 break;
9072 }
9073
9074 case IWX_STATISTICS_NOTIFICATION: {
9075 struct iwx_notif_statistics *stats;
9076 SYNC_RESP_STRUCT(stats, pkt);
9077 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9078 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9079 break;
9080 }
9081
9082 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9083 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9084 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9085 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9086 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9087 break;
9088
9089 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9090 IWX_CT_KILL_NOTIFICATION): {
9091 struct iwx_ct_kill_notif *notif;
9092 SYNC_RESP_STRUCT(notif, pkt);
9093 printf("%s: device at critical temperature (%u degC), "
9094 "stopping device\n",
9095 DEVNAME(sc), le16toh(notif->temperature));
9096 sc->sc_flags |= IWX_FLAG_HW_ERR;
9097 ieee80211_restart_all(ic);
9098 break;
9099 }
9100
9101 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9102 IWX_SCD_QUEUE_CONFIG_CMD):
9103 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9104 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9105 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9106 IWX_SESSION_PROTECTION_CMD):
9107 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9108 IWX_NVM_GET_INFO):
9109 case IWX_ADD_STA_KEY:
9110 case IWX_PHY_CONFIGURATION_CMD:
9111 case IWX_TX_ANT_CONFIGURATION_CMD:
9112 case IWX_ADD_STA:
9113 case IWX_MAC_CONTEXT_CMD:
9114 case IWX_REPLY_SF_CFG_CMD:
9115 case IWX_POWER_TABLE_CMD:
9116 case IWX_LTR_CONFIG:
9117 case IWX_PHY_CONTEXT_CMD:
9118 case IWX_BINDING_CONTEXT_CMD:
9119 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9120 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9121 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9122 case IWX_REPLY_BEACON_FILTERING_CMD:
9123 case IWX_MAC_PM_POWER_TABLE:
9124 case IWX_TIME_QUOTA_CMD:
9125 case IWX_REMOVE_STA:
9126 case IWX_TXPATH_FLUSH:
9127 case IWX_BT_CONFIG:
9128 case IWX_MCC_UPDATE_CMD:
9129 case IWX_TIME_EVENT_CMD:
9130 case IWX_STATISTICS_CMD:
9131 case IWX_SCD_QUEUE_CFG: {
9132 size_t pkt_len;
9133
9134 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9135 break;
9136
9137 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9138 BUS_DMASYNC_POSTREAD);
9139
9140 pkt_len = sizeof(pkt->len_n_flags) +
9141 iwx_rx_packet_len(pkt);
9142
9143 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9144 pkt_len < sizeof(*pkt) ||
9145 pkt_len > sc->sc_cmd_resp_len[idx]) {
9146 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9147 sc->sc_cmd_resp_pkt[idx] = NULL;
9148 break;
9149 }
9150
9151 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9152 BUS_DMASYNC_POSTREAD);
9153 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9154 break;
9155 }
9156
9157 case IWX_INIT_COMPLETE_NOTIF:
9158 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9159 wakeup(&sc->sc_init_complete);
9160 break;
9161
9162 case IWX_SCAN_COMPLETE_UMAC: {
9163 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9164 struct iwx_umac_scan_complete *notif __attribute__((unused));
9165 SYNC_RESP_STRUCT(notif, pkt);
9166 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9167 notif->status));
9168 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9169 iwx_endscan(sc);
9170 break;
9171 }
9172
9173 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9174 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9175 __func__));
9176 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9177 SYNC_RESP_STRUCT(notif, pkt);
9178 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9179 notif->status));
9180 iwx_endscan(sc);
9181 break;
9182 }
9183
9184 case IWX_MCC_CHUB_UPDATE_CMD: {
9185 struct iwx_mcc_chub_notif *notif;
9186 SYNC_RESP_STRUCT(notif, pkt);
9187 iwx_mcc_update(sc, notif);
9188 break;
9189 }
9190
9191 case IWX_REPLY_ERROR: {
9192 struct iwx_error_resp *resp;
9193 SYNC_RESP_STRUCT(resp, pkt);
9194 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9195 DEVNAME(sc), le32toh(resp->error_type),
9196 resp->cmd_id);
9197 break;
9198 }
9199
9200 case IWX_TIME_EVENT_NOTIFICATION: {
9201 struct iwx_time_event_notif *notif;
9202 uint32_t action;
9203 SYNC_RESP_STRUCT(notif, pkt);
9204
9205 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9206 break;
9207 action = le32toh(notif->action);
9208 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9209 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9210 break;
9211 }
9212
9213 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9214 IWX_SESSION_PROTECTION_NOTIF): {
9215 struct iwx_session_prot_notif *notif;
9216 uint32_t status, start, conf_id;
9217
9218 SYNC_RESP_STRUCT(notif, pkt);
9219
9220 status = le32toh(notif->status);
9221 start = le32toh(notif->start);
9222 conf_id = le32toh(notif->conf_id);
9223 /* Check for end of successful PROTECT_CONF_ASSOC. */
9224 if (status == 1 && start == 0 &&
9225 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9226 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9227 break;
9228 }
9229
9230 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9231 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9232 break;
9233
9234 /*
9235 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9236 * messages. Just ignore them for now.
9237 */
9238 case IWX_DEBUG_LOG_MSG:
9239 break;
9240
9241 case IWX_MCAST_FILTER_CMD:
9242 break;
9243
9244 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9245 break;
9246
9247 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9248 break;
9249
9250 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9251 break;
9252
9253 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9254 IWX_NVM_ACCESS_COMPLETE):
9255 break;
9256
9257 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9258 break; /* happens in monitor mode; ignore for now */
9259
9260 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9261 break;
9262
9263 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9264 IWX_TLC_MNG_UPDATE_NOTIF): {
9265 struct iwx_tlc_update_notif *notif;
9266 SYNC_RESP_STRUCT(notif, pkt);
9267 (void)notif;
9268 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9269 iwx_rs_update(sc, notif);
9270 break;
9271 }
9272
9273 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9274 break;
9275
9276 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9277 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9278 break;
9279
9280 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9281 IWX_PNVM_INIT_COMPLETE):
9282 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9283 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9284 wakeup(&sc->sc_init_complete);
9285 break;
9286
9287 default:
9288 handled = 0;
9289 /* XXX wulf: Get rid of bluetooth-related spam */
9290 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9291 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9292 break;
9293 printf("%s: unhandled firmware response 0x%x/0x%x "
9294 "rx ring %d[%d]\n",
9295 DEVNAME(sc), code, pkt->len_n_flags,
9296 (qid & ~0x80), idx);
9297 break;
9298 }
9299
9300 /*
9301 * uCode sets bit 0x80 when it originates the notification,
9302 * i.e. when the notification is not a direct response to a
9303 * command sent by the driver.
9304 * For example, uCode issues IWX_REPLY_RX when it sends a
9305 * received frame to the driver.
9306 */
9307 if (handled && !(qid & (1 << 7))) {
9308 iwx_cmd_done(sc, qid, idx, code);
9309 }
9310
9311 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9312
9313 /* AX210 devices ship only one packet per Rx buffer. */
9314 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9315 break;
9316 }
9317
9318 if (m0 && m0 != data->m)
9319 m_freem(m0);
9320 }
9321
9322 static void
9323 iwx_notif_intr(struct iwx_softc *sc)
9324 {
9325 struct mbuf m;
9326 uint16_t hw;
9327
9328 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9329 BUS_DMASYNC_POSTREAD);
9330
9331 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9332 uint16_t *status = sc->rxq.stat_dma.vaddr;
9333 hw = le16toh(*status) & 0xfff;
9334 } else
9335 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9336 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9337 while (sc->rxq.cur != hw) {
9338 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9339
9340 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9341 BUS_DMASYNC_POSTREAD);
9342
9343 iwx_rx_pkt(sc, data, &m);
9344 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9345 }
9346
9347 /*
9348 * Tell the firmware what we have processed.
9349 * Seems like the hardware gets upset unless we align the write by 8??
9350 */
9351 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9352 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9353 }
9354
9355 #if 0
9356 int
9357 iwx_intr(void *arg)
9358 {
9359 struct iwx_softc *sc = arg;
9360 struct ieee80211com *ic = &sc->sc_ic;
9361 struct ifnet *ifp = IC2IFP(ic);
9362 int r1, r2, rv = 0;
9363
9364 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9365
9366 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9367 uint32_t *ict = sc->ict_dma.vaddr;
9368 int tmp;
9369
9370 tmp = htole32(ict[sc->ict_cur]);
9371 if (!tmp)
9372 goto out_ena;
9373
9374 /*
9375 * ok, there was something. keep plowing until we have all.
9376 */
9377 r1 = r2 = 0;
9378 while (tmp) {
9379 r1 |= tmp;
9380 ict[sc->ict_cur] = 0;
9381 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9382 tmp = htole32(ict[sc->ict_cur]);
9383 }
9384
9385 /* this is where the fun begins. don't ask */
9386 if (r1 == 0xffffffff)
9387 r1 = 0;
9388
9389 /* i am not expected to understand this */
9390 if (r1 & 0xc0000)
9391 r1 |= 0x8000;
9392 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9393 } else {
9394 r1 = IWX_READ(sc, IWX_CSR_INT);
9395 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9396 goto out;
9397 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9398 }
9399 if (r1 == 0 && r2 == 0) {
9400 goto out_ena;
9401 }
9402
9403 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9404
9405 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9406 #if 0
9407 int i;
9408 /* Firmware has now configured the RFH. */
9409 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9410 iwx_update_rx_desc(sc, &sc->rxq, i);
9411 #endif
9412 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9413 }
9414
9415
9416 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9417 iwx_check_rfkill(sc);
9418 rv = 1;
9419 goto out_ena;
9420 }
9421
9422 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9423 if (ifp->if_flags & IFF_DEBUG) {
9424 iwx_nic_error(sc);
9425 iwx_dump_driver_status(sc);
9426 }
9427 printf("%s: fatal firmware error\n", DEVNAME(sc));
9428 ieee80211_restart_all(ic);
9429 rv = 1;
9430 goto out;
9431
9432 }
9433
9434 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9435 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9436 iwx_stop(sc);
9437 rv = 1;
9438 goto out;
9439 }
9440
9441 /* firmware chunk loaded */
9442 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9443 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9444
9445 sc->sc_fw_chunk_done = 1;
9446 wakeup(&sc->sc_fw);
9447 }
9448
9449 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9450 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9451 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9452 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9453 }
9454 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9455 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9456 }
9457
9458 /* Disable periodic interrupt; we use it as just a one-shot. */
9459 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9460
9461 /*
9462 * Enable periodic interrupt in 8 msec only if we received
9463 * real RX interrupt (instead of just periodic int), to catch
9464 * any dangling Rx interrupt. If it was just the periodic
9465 * interrupt, there was no dangling Rx activity, and no need
9466 * to extend the periodic interrupt; one-shot is enough.
9467 */
9468 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9469 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9470 IWX_CSR_INT_PERIODIC_ENA);
9471
9472 iwx_notif_intr(sc);
9473 }
9474
9475 rv = 1;
9476
9477 out_ena:
9478 iwx_restore_interrupts(sc);
9479 out:
9480 return rv;
9481 }
9482 #endif
9483
9484 static void
9485 iwx_intr_msix(void *arg)
9486 {
9487 struct iwx_softc *sc = arg;
9488 struct ieee80211com *ic = &sc->sc_ic;
9489 uint32_t inta_fh, inta_hw;
9490 int vector = 0;
9491
9492 IWX_LOCK(sc);
9493
9494 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9495 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9496 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9497 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9498 inta_fh &= sc->sc_fh_mask;
9499 inta_hw &= sc->sc_hw_mask;
9500
9501 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9502 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9503 iwx_notif_intr(sc);
9504 }
9505
9506 /* firmware chunk loaded */
9507 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9508 sc->sc_fw_chunk_done = 1;
9509 wakeup(&sc->sc_fw);
9510 }
9511
9512 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9513 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9514 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9515 if (sc->sc_debug) {
9516 iwx_nic_error(sc);
9517 iwx_dump_driver_status(sc);
9518 }
9519 printf("%s: fatal firmware error\n", DEVNAME(sc));
9520 ieee80211_restart_all(ic);
9521 goto out;
9522 }
9523
9524 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9525 iwx_check_rfkill(sc);
9526 }
9527
9528 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9529 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9530 sc->sc_flags |= IWX_FLAG_HW_ERR;
9531 iwx_stop(sc);
9532 goto out;
9533 }
9534
9535 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9536 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9537 "%s:%d WARNING: Skipping rx desc update\n",
9538 __func__, __LINE__);
9539 #if 0
9540 /*
9541 * XXX-THJ: we don't have the dma segment handy. This is hacked
9542 * out in the fc release, return to it if we ever get this
9543 * warning.
9544 */
9545 /* Firmware has now configured the RFH. */
9546 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9547 iwx_update_rx_desc(sc, &sc->rxq, i);
9548 #endif
9549 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9550 }
9551
9552 /*
9553 * Before sending the interrupt the HW disables it to prevent
9554 * a nested interrupt. This is done by writing 1 to the corresponding
9555 * bit in the mask register. After handling the interrupt, it should be
9556 * re-enabled by clearing this bit. This register is defined as
9557 * write 1 clear (W1C) register, meaning that it's being clear
9558 * by writing 1 to the bit.
9559 */
9560 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9561 out:
9562 IWX_UNLOCK(sc);
9563 return;
9564 }
9565
9566 /*
9567 * The device info table below contains device-specific config overrides.
9568 * The most important parameter derived from this table is the name of the
9569 * firmware image to load.
9570 *
9571 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9572 * The "old" table matches devices based on PCI vendor/product IDs only.
9573 * The "new" table extends this with various device parameters derived
9574 * from MAC type, and RF type.
9575 *
9576 * In iwlwifi "old" and "new" tables share the same array, where "old"
9577 * entries contain dummy values for data defined only for "new" entries.
9578 * As of 2022, Linux developers are still in the process of moving entries
9579 * from "old" to "new" style and it looks like this effort has stalled in
9580 * in some work-in-progress state for quite a while. Linux commits moving
9581 * entries from "old" to "new" have at times been reverted due to regressions.
9582 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9583 * devices in the same driver.
9584 *
9585 * Our table below contains mostly "new" entries declared in iwlwifi
9586 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9587 * Other devices are matched based on PCI vendor/product ID as usual,
9588 * unless matching specific PCI subsystem vendor/product IDs is required.
9589 *
9590 * Some "old"-style entries are required to identify the firmware image to use.
9591 * Others might be used to print a specific marketing name into Linux dmesg,
9592 * but we can't be sure whether the corresponding devices would be matched
9593 * correctly in the absence of their entries. So we include them just in case.
9594 */
9595
9596 struct iwx_dev_info {
9597 uint16_t device;
9598 uint16_t subdevice;
9599 uint16_t mac_type;
9600 uint16_t rf_type;
9601 uint8_t mac_step;
9602 uint8_t rf_id;
9603 uint8_t no_160;
9604 uint8_t cores;
9605 uint8_t cdb;
9606 uint8_t jacket;
9607 const struct iwx_device_cfg *cfg;
9608 };
9609
9610 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9611 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9612 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9613 .mac_type = _mac_type, .rf_type = _rf_type, \
9614 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9615 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9616
9617 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9618 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9619 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9620 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9621
9622 /*
9623 * When adding entries to this table keep in mind that entries must
9624 * be listed in the same order as in the Linux driver. Code walks this
9625 * table backwards and uses the first matching entry it finds.
9626 * Device firmware must be available in fw_update(8).
9627 */
9628 static const struct iwx_dev_info iwx_dev_info_table[] = {
9629 /* So with HR */
9630 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9631 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9632 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9633 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9634 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9635 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9636 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9637 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9638 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9639 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9640 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9641 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9642 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9643 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9644 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9645 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9646 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9647 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9648 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9649 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9650 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9651 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9652 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9653 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9654 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9655 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9656 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9657 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9658 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9659 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9660 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9661 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9662 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9663 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9664 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9665 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9666
9667 /* So with GF2 */
9668 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9669 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9670 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9671 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9672 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9673 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9674 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9675 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9676 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9677 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9678 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9679 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9680
9681 /* Qu with Jf, C step */
9682 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9683 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9684 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9685 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9686 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9687 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9688 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9689 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9690 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9691 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9692 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9693 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9694 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9695 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9696 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9697 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9698 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9699 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9700 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9701 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9702 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9703 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9704 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9705 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9706 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9707 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9708 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9709 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9710 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9711 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9712 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9713 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9714 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9715 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9716 IWX_CFG_ANY,
9717 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9718 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9719 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9720 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9721 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9722 IWX_CFG_ANY,
9723 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9724
9725 /* QuZ with Jf */
9726 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9727 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9728 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9729 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9730 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9731 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9732 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9733 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9734 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9735 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9736 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9737 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9738 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9739 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9740 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9741 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9742 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9743 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9744 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9745 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9746 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9747 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9748 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9749 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9750 IWX_CFG_ANY,
9751 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9752 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9753 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9754 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9755 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9756 IWX_CFG_ANY,
9757 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9758
9759 /* Qu with Hr, B step */
9760 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9761 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9762 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9763 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9764 iwx_qu_b0_hr1_b0), /* AX101 */
9765 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9766 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9767 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9768 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9769 iwx_qu_b0_hr_b0), /* AX203 */
9770
9771 /* Qu with Hr, C step */
9772 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9773 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9774 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9775 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9776 iwx_qu_c0_hr1_b0), /* AX101 */
9777 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9778 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9779 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9780 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9781 iwx_qu_c0_hr_b0), /* AX203 */
9782 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9783 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9784 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9785 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9786 iwx_qu_c0_hr_b0), /* AX201 */
9787
9788 /* QuZ with Hr */
9789 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9790 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9791 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9792 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9793 iwx_quz_a0_hr1_b0), /* AX101 */
9794 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9795 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9796 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9797 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9798 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9799
9800 /* SoF with JF2 */
9801 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9802 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9803 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9804 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9805 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9806 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9807 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9808 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9809 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9810 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9811
9812 /* SoF with JF */
9813 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9814 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9815 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9816 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9817 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9818 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9819 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9820 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9821 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9822 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9823 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9824 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9825 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9826 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9827 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9828 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9829 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9830 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9831 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9832 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9833
9834 /* So with Hr */
9835 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9836 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9837 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9838 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9839 iwx_cfg_so_a0_hr_b0), /* AX203 */
9840 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9841 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9842 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9843 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9844 iwx_cfg_so_a0_hr_b0), /* ax101 */
9845 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9846 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9847 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9848 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9849 iwx_cfg_so_a0_hr_b0), /* ax201 */
9850
9851 /* So-F with Hr */
9852 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9853 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9854 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9855 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9856 iwx_cfg_so_a0_hr_b0), /* AX203 */
9857 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9858 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9859 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9860 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9861 iwx_cfg_so_a0_hr_b0), /* AX101 */
9862 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9863 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9864 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9865 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9866 iwx_cfg_so_a0_hr_b0), /* AX201 */
9867
9868 /* So-F with GF */
9869 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9870 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9871 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9872 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9873 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9874 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9875 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9876 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9877 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9878 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9879
9880 /* So with GF */
9881 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9882 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9883 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9884 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9885 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9886 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9887 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9888 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9889 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9890 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9891
9892 /* So with JF2 */
9893 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9894 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9895 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9896 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9897 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9898 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9899 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9900 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9901 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9902 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9903
9904 /* So with JF */
9905 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9906 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9907 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9908 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9909 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9910 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9911 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9912 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9913 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9914 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9915 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9916 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9917 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9918 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9919 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9920 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9921 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9922 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9923 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9924 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9925 };
9926
9927 static int
9928 iwx_preinit(struct iwx_softc *sc)
9929 {
9930 struct ieee80211com *ic = &sc->sc_ic;
9931 int err;
9932
9933 err = iwx_prepare_card_hw(sc);
9934 if (err) {
9935 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9936 return err;
9937 }
9938
9939 if (sc->attached) {
9940 return 0;
9941 }
9942
9943 err = iwx_start_hw(sc);
9944 if (err) {
9945 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9946 return err;
9947 }
9948
9949 err = iwx_run_init_mvm_ucode(sc, 1);
9950 iwx_stop_device(sc);
9951 if (err) {
9952 printf("%s: failed to stop device\n", DEVNAME(sc));
9953 return err;
9954 }
9955
9956 /* Print version info and MAC address on first successful fw load. */
9957 sc->attached = 1;
9958 if (sc->sc_pnvm_ver) {
9959 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
9960 "address %s\n",
9961 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9962 sc->sc_fwver, sc->sc_pnvm_ver,
9963 ether_sprintf(sc->sc_nvm.hw_addr));
9964 } else {
9965 printf("%s: hw rev 0x%x, fw %s, address %s\n",
9966 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9967 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9968 }
9969
9970 /* not all hardware can do 5GHz band */
9971 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9972 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9973 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9974
9975 return 0;
9976 }
9977
9978 static void
9979 iwx_attach_hook(void *self)
9980 {
9981 struct iwx_softc *sc = (void *)self;
9982 struct ieee80211com *ic = &sc->sc_ic;
9983 int err;
9984
9985 IWX_LOCK(sc);
9986 err = iwx_preinit(sc);
9987 IWX_UNLOCK(sc);
9988 if (err != 0)
9989 goto out;
9990
9991 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
9992 ic->ic_channels);
9993
9994 ieee80211_ifattach(ic);
9995 ic->ic_vap_create = iwx_vap_create;
9996 ic->ic_vap_delete = iwx_vap_delete;
9997 ic->ic_raw_xmit = iwx_raw_xmit;
9998 ic->ic_node_alloc = iwx_node_alloc;
9999 ic->ic_scan_start = iwx_scan_start;
10000 ic->ic_scan_end = iwx_scan_end;
10001 ic->ic_update_mcast = iwx_update_mcast;
10002 ic->ic_getradiocaps = iwx_init_channel_map;
10003
10004 ic->ic_set_channel = iwx_set_channel;
10005 ic->ic_scan_curchan = iwx_scan_curchan;
10006 ic->ic_scan_mindwell = iwx_scan_mindwell;
10007 ic->ic_wme.wme_update = iwx_wme_update;
10008 ic->ic_parent = iwx_parent;
10009 ic->ic_transmit = iwx_transmit;
10010
10011 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10012 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10013 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10014 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10015
10016 sc->sc_addba_request = ic->ic_addba_request;
10017 ic->ic_addba_request = iwx_addba_request;
10018 sc->sc_addba_response = ic->ic_addba_response;
10019 ic->ic_addba_response = iwx_addba_response;
10020
10021 iwx_radiotap_attach(sc);
10022 ieee80211_announce(ic);
10023 out:
10024 config_intrhook_disestablish(&sc->sc_preinit_hook);
10025 }
10026
10027 const struct iwx_device_cfg *
10028 iwx_find_device_cfg(struct iwx_softc *sc)
10029 {
10030 uint16_t sdev_id, mac_type, rf_type;
10031 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10032 int i;
10033
10034 sdev_id = pci_get_subdevice(sc->sc_dev);
10035 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10036 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10037 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10038 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10039 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10040
10041 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10042 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10043 cores = IWX_SUBDEVICE_CORES(sdev_id);
10044
10045 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10046 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10047
10048 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10049 dev_info->device != sc->sc_pid)
10050 continue;
10051
10052 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10053 dev_info->subdevice != sdev_id)
10054 continue;
10055
10056 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10057 dev_info->mac_type != mac_type)
10058 continue;
10059
10060 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10061 dev_info->mac_step != mac_step)
10062 continue;
10063
10064 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10065 dev_info->rf_type != rf_type)
10066 continue;
10067
10068 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10069 dev_info->cdb != cdb)
10070 continue;
10071
10072 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10073 dev_info->jacket != jacket)
10074 continue;
10075
10076 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10077 dev_info->rf_id != rf_id)
10078 continue;
10079
10080 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10081 dev_info->no_160 != no_160)
10082 continue;
10083
10084 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10085 dev_info->cores != cores)
10086 continue;
10087
10088 return dev_info->cfg;
10089 }
10090
10091 return NULL;
10092 }
10093
10094 static int
10095 iwx_probe(device_t dev)
10096 {
10097 int i;
10098
10099 for (i = 0; i < nitems(iwx_devices); i++) {
10100 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10101 pci_get_device(dev) == iwx_devices[i].device) {
10102 device_set_desc(dev, iwx_devices[i].name);
10103
10104 /*
10105 * Due to significant existing deployments using
10106 * iwlwifi lower the priority of iwx.
10107 *
10108 * This inverts the advice in bus.h where drivers
10109 * supporting newer hardware should return
10110 * BUS_PROBE_DEFAULT and drivers for older devices
10111 * return BUS_PROBE_LOW_PRIORITY.
10112 *
10113 */
10114 return (BUS_PROBE_LOW_PRIORITY);
10115 }
10116 }
10117
10118 return (ENXIO);
10119 }
10120
10121 static int
10122 iwx_attach(device_t dev)
10123 {
10124 struct iwx_softc *sc = device_get_softc(dev);
10125 struct ieee80211com *ic = &sc->sc_ic;
10126 const struct iwx_device_cfg *cfg;
10127 int err;
10128 int txq_i, i, j;
10129 size_t ctxt_info_size;
10130 int rid;
10131 int count;
10132 int error;
10133 sc->sc_dev = dev;
10134 sc->sc_pid = pci_get_device(dev);
10135 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10136
10137 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10138 IWX_LOCK_INIT(sc);
10139 mbufq_init(&sc->sc_snd, ifqmaxlen);
10140 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10141 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10142 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10143 taskqueue_thread_enqueue, &sc->sc_tq);
10144 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10145 if (error != 0) {
10146 device_printf(dev, "can't start taskq thread, error %d\n",
10147 error);
10148 return (ENXIO);
10149 }
10150
10151 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10152 if (sc->sc_cap_off == 0) {
10153 device_printf(dev, "PCIe capability structure not found!\n");
10154 return (ENXIO);
10155 }
10156
10157 /*
10158 * We disable the RETRY_TIMEOUT register (0x41) to keep
10159 * PCI Tx retries from interfering with C3 CPU state.
10160 */
10161 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10162
10163 if (pci_msix_count(dev)) {
10164 sc->sc_msix = 1;
10165 } else {
10166 device_printf(dev, "no MSI-X found\n");
10167 return (ENXIO);
10168 }
10169
10170 pci_enable_busmaster(dev);
10171 rid = PCIR_BAR(0);
10172 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10173 RF_ACTIVE);
10174 if (sc->sc_mem == NULL) {
10175 device_printf(sc->sc_dev, "can't map mem space\n");
10176 return (ENXIO);
10177 }
10178 sc->sc_st = rman_get_bustag(sc->sc_mem);
10179 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10180
10181 count = 1;
10182 rid = 0;
10183 if (pci_alloc_msix(dev, &count) == 0)
10184 rid = 1;
10185 DPRINTF(("%s: count=%d\n", __func__, count));
10186 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10187 (rid != 0 ? 0 : RF_SHAREABLE));
10188 if (sc->sc_irq == NULL) {
10189 device_printf(dev, "can't map interrupt\n");
10190 return (ENXIO);
10191 }
10192 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10193 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10194 if (error != 0) {
10195 device_printf(dev, "can't establish interrupt\n");
10196 return (ENXIO);
10197 }
10198
10199 /* Clear pending interrupts. */
10200 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10201 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10202 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10203
10204 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10205 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10206 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10207 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10208
10209 /*
10210 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10211 * changed, and now the revision step also includes bit 0-1 (no more
10212 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10213 * in the old format.
10214 */
10215 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10216 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10217
10218 switch (sc->sc_pid) {
10219 case PCI_PRODUCT_INTEL_WL_22500_1:
10220 sc->sc_fwname = IWX_CC_A_FW;
10221 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10222 sc->sc_integrated = 0;
10223 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10224 sc->sc_low_latency_xtal = 0;
10225 sc->sc_xtal_latency = 0;
10226 sc->sc_tx_with_siso_diversity = 0;
10227 sc->sc_uhb_supported = 0;
10228 break;
10229 case PCI_PRODUCT_INTEL_WL_22500_2:
10230 case PCI_PRODUCT_INTEL_WL_22500_5:
10231 /* These devices should be QuZ only. */
10232 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10233 device_printf(dev, "unsupported AX201 adapter\n");
10234 return (ENXIO);
10235 }
10236 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10237 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10238 sc->sc_integrated = 1;
10239 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10240 sc->sc_low_latency_xtal = 0;
10241 sc->sc_xtal_latency = 500;
10242 sc->sc_tx_with_siso_diversity = 0;
10243 sc->sc_uhb_supported = 0;
10244 break;
10245 case PCI_PRODUCT_INTEL_WL_22500_3:
10246 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10247 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10248 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10249 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10250 else
10251 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10252 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10253 sc->sc_integrated = 1;
10254 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10255 sc->sc_low_latency_xtal = 0;
10256 sc->sc_xtal_latency = 500;
10257 sc->sc_tx_with_siso_diversity = 0;
10258 sc->sc_uhb_supported = 0;
10259 break;
10260 case PCI_PRODUCT_INTEL_WL_22500_4:
10261 case PCI_PRODUCT_INTEL_WL_22500_7:
10262 case PCI_PRODUCT_INTEL_WL_22500_8:
10263 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10264 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10265 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10266 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10267 else
10268 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10269 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10270 sc->sc_integrated = 1;
10271 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10272 sc->sc_low_latency_xtal = 0;
10273 sc->sc_xtal_latency = 1820;
10274 sc->sc_tx_with_siso_diversity = 0;
10275 sc->sc_uhb_supported = 0;
10276 break;
10277 case PCI_PRODUCT_INTEL_WL_22500_6:
10278 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10279 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10280 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10281 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10282 else
10283 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10284 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10285 sc->sc_integrated = 1;
10286 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10287 sc->sc_low_latency_xtal = 1;
10288 sc->sc_xtal_latency = 12000;
10289 sc->sc_tx_with_siso_diversity = 0;
10290 sc->sc_uhb_supported = 0;
10291 break;
10292 case PCI_PRODUCT_INTEL_WL_22500_9:
10293 case PCI_PRODUCT_INTEL_WL_22500_10:
10294 case PCI_PRODUCT_INTEL_WL_22500_11:
10295 case PCI_PRODUCT_INTEL_WL_22500_13:
10296 /* _14 is an MA device, not yet supported */
10297 case PCI_PRODUCT_INTEL_WL_22500_15:
10298 case PCI_PRODUCT_INTEL_WL_22500_16:
10299 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10300 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10301 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10302 sc->sc_integrated = 0;
10303 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10304 sc->sc_low_latency_xtal = 0;
10305 sc->sc_xtal_latency = 0;
10306 sc->sc_tx_with_siso_diversity = 0;
10307 sc->sc_uhb_supported = 1;
10308 break;
10309 case PCI_PRODUCT_INTEL_WL_22500_12:
10310 case PCI_PRODUCT_INTEL_WL_22500_17:
10311 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10312 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10313 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10314 sc->sc_integrated = 1;
10315 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10316 sc->sc_low_latency_xtal = 1;
10317 sc->sc_xtal_latency = 12000;
10318 sc->sc_tx_with_siso_diversity = 0;
10319 sc->sc_uhb_supported = 0;
10320 sc->sc_imr_enabled = 1;
10321 break;
10322 default:
10323 device_printf(dev, "unknown adapter type\n");
10324 return (ENXIO);
10325 }
10326
10327 cfg = iwx_find_device_cfg(sc);
10328 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10329 if (cfg) {
10330 sc->sc_fwname = cfg->fw_name;
10331 sc->sc_pnvm_name = cfg->pnvm_name;
10332 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10333 sc->sc_uhb_supported = cfg->uhb_supported;
10334 if (cfg->xtal_latency) {
10335 sc->sc_xtal_latency = cfg->xtal_latency;
10336 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10337 }
10338 }
10339
10340 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10341
10342 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10343 sc->sc_umac_prph_offset = 0x300000;
10344 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10345 } else
10346 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10347
10348 /* Allocate DMA memory for loading firmware. */
10349 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10350 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10351 else
10352 ctxt_info_size = sizeof(struct iwx_context_info);
10353 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10354 ctxt_info_size, 1);
10355 if (err) {
10356 device_printf(dev,
10357 "could not allocate memory for loading firmware\n");
10358 return (ENXIO);
10359 }
10360
10361 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10362 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10363 sizeof(struct iwx_prph_scratch), 1);
10364 if (err) {
10365 device_printf(dev,
10366 "could not allocate prph scratch memory\n");
10367 goto fail1;
10368 }
10369
10370 /*
10371 * Allocate prph information. The driver doesn't use this.
10372 * We use the second half of this page to give the device
10373 * some dummy TR/CR tail pointers - which shouldn't be
10374 * necessary as we don't use this, but the hardware still
10375 * reads/writes there and we can't let it go do that with
10376 * a NULL pointer.
10377 */
10378 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10379 ("iwx_prph_info has wrong size"));
10380 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10381 PAGE_SIZE, 1);
10382 if (err) {
10383 device_printf(dev,
10384 "could not allocate prph info memory\n");
10385 goto fail1;
10386 }
10387 }
10388
10389 /* Allocate interrupt cause table (ICT).*/
10390 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10391 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10392 if (err) {
10393 device_printf(dev, "could not allocate ICT table\n");
10394 goto fail1;
10395 }
10396
10397 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10398 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10399 if (err) {
10400 device_printf(dev, "could not allocate TX ring %d\n",
10401 txq_i);
10402 goto fail4;
10403 }
10404 }
10405
10406 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10407 if (err) {
10408 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10409 goto fail4;
10410 }
10411
10412 #ifdef IWX_DEBUG
10413 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10414 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10415 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10416
10417 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10419 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10420 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10422 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10423
10424 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10425 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10426 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10427
10428 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10430 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10431 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10432 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10433 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10434 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10435 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10436 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10437 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10438 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10439 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10440 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10441 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10442 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10443 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10444 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10445 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10446 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10448 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10449 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10450 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10451 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10452 #endif
10453 ic->ic_softc = sc;
10454 ic->ic_name = device_get_nameunit(sc->sc_dev);
10455 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10456 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10457
10458 /* Set device capabilities. */
10459 ic->ic_caps =
10460 IEEE80211_C_STA |
10461 IEEE80211_C_MONITOR |
10462 IEEE80211_C_WPA | /* WPA/RSN */
10463 IEEE80211_C_WME |
10464 IEEE80211_C_PMGT |
10465 IEEE80211_C_SHSLOT | /* short slot time supported */
10466 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10467 IEEE80211_C_BGSCAN /* capable of bg scanning */
10468 ;
10469 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10470
10471 ic->ic_txstream = 2;
10472 ic->ic_rxstream = 2;
10473 ic->ic_htcaps |= IEEE80211_HTC_HT
10474 | IEEE80211_HTCAP_SMPS_OFF
10475 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10476 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10477 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10478 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10479 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10480 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10481
10482 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10483
10484 /*
10485 * XXX: setupcurchan() expects vhtcaps to be non-zero
10486 * https://bugs.freebsd.org/274156
10487 */
10488 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10489 | IEEE80211_VHTCAP_SHORT_GI_80
10490 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10491 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10492 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10493
10494 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10495 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10496 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10497 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10498 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10499 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10500 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10501 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10502 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10503 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10504 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10505
10506 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10507 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10508 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10509 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10510 rxba->sc = sc;
10511 for (j = 0; j < nitems(rxba->entries); j++)
10512 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10513 }
10514
10515 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10516 sc->sc_preinit_hook.ich_arg = sc;
10517 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10518 device_printf(dev,
10519 "config_intrhook_establish failed\n");
10520 goto fail4;
10521 }
10522
10523 return (0);
10524
10525 fail4:
10526 while (--txq_i >= 0)
10527 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10528 iwx_free_rx_ring(sc, &sc->rxq);
10529 if (sc->ict_dma.vaddr != NULL)
10530 iwx_dma_contig_free(&sc->ict_dma);
10531
10532 fail1:
10533 iwx_dma_contig_free(&sc->ctxt_info_dma);
10534 iwx_dma_contig_free(&sc->prph_scratch_dma);
10535 iwx_dma_contig_free(&sc->prph_info_dma);
10536 return (ENXIO);
10537 }
10538
10539 static int
10540 iwx_detach(device_t dev)
10541 {
10542 struct iwx_softc *sc = device_get_softc(dev);
10543 int txq_i;
10544
10545 iwx_stop_device(sc);
10546
10547 taskqueue_drain_all(sc->sc_tq);
10548 taskqueue_free(sc->sc_tq);
10549
10550 ieee80211_ifdetach(&sc->sc_ic);
10551
10552 callout_drain(&sc->watchdog_to);
10553
10554 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10555 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10556 iwx_free_rx_ring(sc, &sc->rxq);
10557
10558 if (sc->sc_fwp != NULL) {
10559 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10560 sc->sc_fwp = NULL;
10561 }
10562
10563 if (sc->sc_pnvm != NULL) {
10564 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10565 sc->sc_pnvm = NULL;
10566 }
10567
10568 if (sc->sc_irq != NULL) {
10569 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10570 bus_release_resource(dev, SYS_RES_IRQ,
10571 rman_get_rid(sc->sc_irq), sc->sc_irq);
10572 pci_release_msi(dev);
10573 }
10574 if (sc->sc_mem != NULL)
10575 bus_release_resource(dev, SYS_RES_MEMORY,
10576 rman_get_rid(sc->sc_mem), sc->sc_mem);
10577
10578 IWX_LOCK_DESTROY(sc);
10579
10580 return (0);
10581 }
10582
10583 static void
10584 iwx_radiotap_attach(struct iwx_softc *sc)
10585 {
10586 struct ieee80211com *ic = &sc->sc_ic;
10587
10588 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10589 "->%s begin\n", __func__);
10590
10591 ieee80211_radiotap_attach(ic,
10592 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10593 IWX_TX_RADIOTAP_PRESENT,
10594 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10595 IWX_RX_RADIOTAP_PRESENT);
10596
10597 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10598 "->%s end\n", __func__);
10599 }
10600
10601 struct ieee80211vap *
10602 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10603 enum ieee80211_opmode opmode, int flags,
10604 const uint8_t bssid[IEEE80211_ADDR_LEN],
10605 const uint8_t mac[IEEE80211_ADDR_LEN])
10606 {
10607 struct iwx_vap *ivp;
10608 struct ieee80211vap *vap;
10609
10610 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10611 return NULL;
10612 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10613 vap = &ivp->iv_vap;
10614 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10615 vap->iv_bmissthreshold = 10; /* override default */
10616 /* Override with driver methods. */
10617 ivp->iv_newstate = vap->iv_newstate;
10618 vap->iv_newstate = iwx_newstate;
10619
10620 ivp->id = IWX_DEFAULT_MACID;
10621 ivp->color = IWX_DEFAULT_COLOR;
10622
10623 ivp->have_wme = TRUE;
10624 ivp->ps_disabled = FALSE;
10625
10626 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10627 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10628
10629 /* h/w crypto support */
10630 vap->iv_key_alloc = iwx_key_alloc;
10631 vap->iv_key_delete = iwx_key_delete;
10632 vap->iv_key_set = iwx_key_set;
10633 vap->iv_key_update_begin = iwx_key_update_begin;
10634 vap->iv_key_update_end = iwx_key_update_end;
10635
10636 ieee80211_ratectl_init(vap);
10637 /* Complete setup. */
10638 ieee80211_vap_attach(vap, ieee80211_media_change,
10639 ieee80211_media_status, mac);
10640 ic->ic_opmode = opmode;
10641
10642 return vap;
10643 }
10644
10645 static void
10646 iwx_vap_delete(struct ieee80211vap *vap)
10647 {
10648 struct iwx_vap *ivp = IWX_VAP(vap);
10649
10650 ieee80211_ratectl_deinit(vap);
10651 ieee80211_vap_detach(vap);
10652 free(ivp, M_80211_VAP);
10653 }
10654
10655 static void
10656 iwx_parent(struct ieee80211com *ic)
10657 {
10658 struct iwx_softc *sc = ic->ic_softc;
10659 IWX_LOCK(sc);
10660
10661 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10662 iwx_stop(sc);
10663 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10664 } else {
10665 iwx_init(sc);
10666 ieee80211_start_all(ic);
10667 }
10668 IWX_UNLOCK(sc);
10669 }
10670
10671 static int
10672 iwx_suspend(device_t dev)
10673 {
10674 struct iwx_softc *sc = device_get_softc(dev);
10675 struct ieee80211com *ic = &sc->sc_ic;
10676
10677 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10678 ieee80211_suspend_all(ic);
10679
10680 iwx_stop(sc);
10681 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10682 }
10683 return (0);
10684 }
10685
10686 static int
10687 iwx_resume(device_t dev)
10688 {
10689 struct iwx_softc *sc = device_get_softc(dev);
10690 struct ieee80211com *ic = &sc->sc_ic;
10691 int err;
10692
10693 /*
10694 * We disable the RETRY_TIMEOUT register (0x41) to keep
10695 * PCI Tx retries from interfering with C3 CPU state.
10696 */
10697 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10698
10699 IWX_LOCK(sc);
10700
10701 err = iwx_init(sc);
10702 if (err) {
10703 iwx_stop_device(sc);
10704 IWX_UNLOCK(sc);
10705 return err;
10706 }
10707
10708 IWX_UNLOCK(sc);
10709
10710 ieee80211_resume_all(ic);
10711 return (0);
10712 }
10713
10714 static void
10715 iwx_scan_start(struct ieee80211com *ic)
10716 {
10717 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10718 struct iwx_softc *sc = ic->ic_softc;
10719 int err;
10720
10721 IWX_LOCK(sc);
10722 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10723 err = iwx_scan(sc);
10724 else
10725 err = iwx_bgscan(ic);
10726 IWX_UNLOCK(sc);
10727 if (err)
10728 ieee80211_cancel_scan(vap);
10729
10730 return;
10731 }
10732
10733 static void
10734 iwx_update_mcast(struct ieee80211com *ic)
10735 {
10736 }
10737
10738 static void
10739 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10740 {
10741 }
10742
10743 static void
10744 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10745 {
10746 }
10747
10748 static void
10749 iwx_scan_end(struct ieee80211com *ic)
10750 {
10751 iwx_endscan(ic->ic_softc);
10752 }
10753
10754 static void
10755 iwx_set_channel(struct ieee80211com *ic)
10756 {
10757 #if 0
10758 struct iwx_softc *sc = ic->ic_softc;
10759 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10760
10761 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10762 iwx_phy_ctxt_task((void *)sc);
10763 #endif
10764 }
10765
10766 static void
10767 iwx_endscan_cb(void *arg, int pending)
10768 {
10769 struct iwx_softc *sc = arg;
10770 struct ieee80211com *ic = &sc->sc_ic;
10771
10772 DPRINTF(("scan ended\n"));
10773 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10774 }
10775
10776 static int
10777 iwx_wme_update(struct ieee80211com *ic)
10778 {
10779 return 0;
10780 }
10781
10782 static int
10783 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10784 const struct ieee80211_bpf_params *params)
10785 {
10786 struct ieee80211com *ic = ni->ni_ic;
10787 struct iwx_softc *sc = ic->ic_softc;
10788 int err;
10789
10790 IWX_LOCK(sc);
10791 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10792 err = iwx_tx(sc, m, ni);
10793 IWX_UNLOCK(sc);
10794 return err;
10795 } else {
10796 IWX_UNLOCK(sc);
10797 return EIO;
10798 }
10799 }
10800
10801 static int
10802 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10803 {
10804 struct iwx_softc *sc = ic->ic_softc;
10805 int error;
10806
10807 // TODO: mbufq_enqueue in iwm
10808 // TODO dequeue in iwm_start, counters, locking
10809 IWX_LOCK(sc);
10810 error = mbufq_enqueue(&sc->sc_snd, m);
10811 if (error) {
10812 IWX_UNLOCK(sc);
10813 return (error);
10814 }
10815
10816 iwx_start(sc);
10817 IWX_UNLOCK(sc);
10818 return (0);
10819 }
10820
10821 static int
10822 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10823 int baparamset, int batimeout, int baseqctl)
10824 {
10825 struct ieee80211com *ic = ni->ni_ic;
10826 struct iwx_softc *sc = ic->ic_softc;
10827 int tid;
10828
10829 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10830 sc->ni_rx_ba[tid].ba_winstart =
10831 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10832 sc->ni_rx_ba[tid].ba_winsize =
10833 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10834 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10835
10836 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10837 tid >= IWX_MAX_TID_COUNT)
10838 return ENOSPC;
10839
10840 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10841 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10842 return EBUSY;
10843 }
10844 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10845
10846 sc->ba_rx.start_tidmask |= (1 << tid);
10847 DPRINTF(("%s: tid=%i\n", __func__, tid));
10848 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10849 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10850 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10851
10852 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10853
10854 // TODO:misha move to ba_task (serialize)
10855 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10856
10857 return (0);
10858 }
10859
10860 static void
10861 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10862 {
10863 return;
10864 }
10865
10866 static int
10867 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10868 int dialogtoken, int baparamset, int batimeout)
10869 {
10870 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10871 int tid;
10872
10873 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10874 DPRINTF(("%s: tid=%i\n", __func__, tid));
10875 sc->ba_tx.start_tidmask |= (1 << tid);
10876 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10877 return 0;
10878 }
10879
10880
10881 static int
10882 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10883 int code, int baparamset, int batimeout)
10884 {
10885 return 0;
10886 }
10887
10888 static void
10889 iwx_key_update_begin(struct ieee80211vap *vap)
10890 {
10891 return;
10892 }
10893
10894 static void
10895 iwx_key_update_end(struct ieee80211vap *vap)
10896 {
10897 return;
10898 }
10899
10900 static int
10901 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10902 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
10903 {
10904
10905 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
10906 return 1;
10907 }
10908 if (!(&vap->iv_nw_keys[0] <= k &&
10909 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
10910 /*
10911 * Not in the global key table, the driver should handle this
10912 * by allocating a slot in the h/w key table/cache. In
10913 * lieu of that return key slot 0 for any unicast key
10914 * request. We disallow the request if this is a group key.
10915 * This default policy does the right thing for legacy hardware
10916 * with a 4 key table. It also handles devices that pass
10917 * packets through untouched when marked with the WEP bit
10918 * and key index 0.
10919 */
10920 if (k->wk_flags & IEEE80211_KEY_GROUP)
10921 return 0;
10922 *keyix = 0; /* NB: use key index 0 for ucast key */
10923 } else {
10924 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
10925 }
10926 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
10927 return 1;
10928 }
10929
10930 static int
10931 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
10932 {
10933 struct ieee80211com *ic = vap->iv_ic;
10934 struct iwx_softc *sc = ic->ic_softc;
10935 struct iwx_add_sta_key_cmd cmd;
10936 uint32_t status;
10937 int err;
10938 int id;
10939
10940 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
10941 return 1;
10942 }
10943
10944 IWX_LOCK(sc);
10945 /*
10946 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
10947 * Currently we only implement station mode where 'ni' is always
10948 * ic->ic_bss so there is no need to validate arguments beyond this:
10949 */
10950
10951 memset(&cmd, 0, sizeof(cmd));
10952
10953 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10954 DPRINTF(("%s: adding group key\n", __func__));
10955 } else {
10956 DPRINTF(("%s: adding key\n", __func__));
10957 }
10958 if (k >= &vap->iv_nw_keys[0] &&
10959 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
10960 id = (k - vap->iv_nw_keys);
10961 else
10962 id = (0);
10963 DPRINTF(("%s: setting keyid=%i\n", __func__, id));
10964 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
10965 IWX_STA_KEY_FLG_WEP_KEY_MAP |
10966 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
10967 IWX_STA_KEY_FLG_KEYID_MSK));
10968 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10969 cmd.common.key_offset = 1;
10970 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
10971 } else {
10972 cmd.common.key_offset = 0;
10973 }
10974 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
10975 k->wk_keylen));
10976 DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
10977 for (int i=0; i<k->wk_keylen; i++) {
10978 DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
10979 }
10980 cmd.common.sta_id = IWX_STATION_ID;
10981
10982 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
10983 DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
10984
10985 status = IWX_ADD_STA_SUCCESS;
10986 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
10987 &status);
10988 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
10989 err = EIO;
10990 if (err) {
10991 printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
10992 IWX_UNLOCK(sc);
10993 return err;
10994 } else
10995 DPRINTF(("%s: key added successfully\n", __func__));
10996 IWX_UNLOCK(sc);
10997 return 1;
10998 }
10999
11000 static int
11001 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11002 {
11003 return 1;
11004 }
11005
11006 static device_method_t iwx_pci_methods[] = {
11007 /* Device interface */
11008 DEVMETHOD(device_probe, iwx_probe),
11009 DEVMETHOD(device_attach, iwx_attach),
11010 DEVMETHOD(device_detach, iwx_detach),
11011 DEVMETHOD(device_suspend, iwx_suspend),
11012 DEVMETHOD(device_resume, iwx_resume),
11013
11014 DEVMETHOD_END
11015 };
11016
11017 static driver_t iwx_pci_driver = {
11018 "iwx",
11019 iwx_pci_methods,
11020 sizeof (struct iwx_softc)
11021 };
11022
11023 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11024 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11025 iwx_devices, nitems(iwx_devices));
11026 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11027 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11028 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11029