1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186
187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195
196 #include <dev/iwx/if_iwx_debug.h>
197
198 #define PCI_CFG_RETRY_TIMEOUT 0x41
199
200 #define PCI_VENDOR_INTEL 0x8086
201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
218
219 static const struct iwx_devices {
220 uint16_t device;
221 char *name;
222 } iwx_devices[] = {
223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
240 };
241
242 static const uint8_t iwx_nvm_channels_8000[] = {
243 /* 2.4 GHz */
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 /* 5 GHz */
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250
251 static const uint8_t iwx_nvm_channels_uhb[] = {
252 /* 2.4 GHz */
253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
254 /* 5 GHz */
255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
257 149, 153, 157, 161, 165, 169, 173, 177, 181,
258 /* 6-7 GHz */
259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
263 };
264
265 #define IWX_NUM_2GHZ_CHANNELS 14
266 #define IWX_NUM_5GHZ_CHANNELS 37
267
268 const struct iwx_rate {
269 uint16_t rate;
270 uint8_t plcp;
271 uint8_t ht_plcp;
272 } iwx_rates[] = {
273 /* Legacy */ /* HT */
274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
295 };
296 #define IWX_RIDX_CCK 0
297 #define IWX_RIDX_OFDM 4
298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
302
303 /* Convert an MCS index into an iwx_rates[] index. */
304 const int iwx_mcs2ridx[] = {
305 IWX_RATE_MCS_0_INDEX,
306 IWX_RATE_MCS_1_INDEX,
307 IWX_RATE_MCS_2_INDEX,
308 IWX_RATE_MCS_3_INDEX,
309 IWX_RATE_MCS_4_INDEX,
310 IWX_RATE_MCS_5_INDEX,
311 IWX_RATE_MCS_6_INDEX,
312 IWX_RATE_MCS_7_INDEX,
313 IWX_RATE_MCS_8_INDEX,
314 IWX_RATE_MCS_9_INDEX,
315 IWX_RATE_MCS_10_INDEX,
316 IWX_RATE_MCS_11_INDEX,
317 IWX_RATE_MCS_12_INDEX,
318 IWX_RATE_MCS_13_INDEX,
319 IWX_RATE_MCS_14_INDEX,
320 IWX_RATE_MCS_15_INDEX,
321 };
322
323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
326 #if 0
327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
329 #endif
330 static int iwx_apply_debug_destination(struct iwx_softc *);
331 static void iwx_set_ltr(struct iwx_softc *);
332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
334 const struct iwx_fw_sects *);
335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
336 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
338 struct iwx_context_info_dram *);
339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
341 const uint8_t *, size_t);
342 static int iwx_set_default_calib(struct iwx_softc *, const void *);
343 static void iwx_fw_info_free(struct iwx_fw_info *);
344 static int iwx_read_firmware(struct iwx_softc *);
345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
354 static int iwx_nic_lock(struct iwx_softc *);
355 static void iwx_nic_assert_locked(struct iwx_softc *);
356 static void iwx_nic_unlock(struct iwx_softc *);
357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
358 uint32_t);
359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
363 bus_size_t, bus_size_t);
364 static void iwx_dma_contig_free(struct iwx_dma_info *);
365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void iwx_disable_rx_dma(struct iwx_softc *);
367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_enable_rfkill_int(struct iwx_softc *);
373 static int iwx_check_rfkill(struct iwx_softc *);
374 static void iwx_enable_interrupts(struct iwx_softc *);
375 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
376 #if 0
377 static void iwx_restore_interrupts(struct iwx_softc *);
378 #endif
379 static void iwx_disable_interrupts(struct iwx_softc *);
380 static void iwx_ict_reset(struct iwx_softc *);
381 static int iwx_set_hw_ready(struct iwx_softc *);
382 static int iwx_prepare_card_hw(struct iwx_softc *);
383 static int iwx_force_power_gating(struct iwx_softc *);
384 static void iwx_apm_config(struct iwx_softc *);
385 static int iwx_apm_init(struct iwx_softc *);
386 static void iwx_apm_stop(struct iwx_softc *);
387 static int iwx_allow_mcast(struct iwx_softc *);
388 static void iwx_init_msix_hw(struct iwx_softc *);
389 static void iwx_conf_msix_hw(struct iwx_softc *, int);
390 static int iwx_clear_persistence_bit(struct iwx_softc *);
391 static int iwx_start_hw(struct iwx_softc *);
392 static void iwx_stop_device(struct iwx_softc *);
393 static void iwx_nic_config(struct iwx_softc *);
394 static int iwx_nic_rx_init(struct iwx_softc *);
395 static int iwx_nic_init(struct iwx_softc *);
396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
398 static void iwx_post_alive(struct iwx_softc *);
399 static int iwx_schedule_session_protection(struct iwx_softc *,
400 struct iwx_node *, uint32_t);
401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
402 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
403 struct ieee80211_channel[]);
404 static int iwx_mimo_enabled(struct iwx_softc *);
405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
406 uint16_t);
407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
409 uint16_t, uint16_t, int, int);
410 static void iwx_sta_tx_agg_start(struct iwx_softc *,
411 struct ieee80211_node *, uint8_t);
412 static void iwx_ba_rx_task(void *, int);
413 static void iwx_ba_tx_task(void *, int);
414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
415 static int iwx_is_valid_mac_addr(const uint8_t *);
416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
417 static int iwx_nvm_get(struct iwx_softc *);
418 static int iwx_load_firmware(struct iwx_softc *);
419 static int iwx_start_fw(struct iwx_softc *);
420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
423 static int iwx_load_pnvm(struct iwx_softc *);
424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
426 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
427 static int iwx_send_dqa_cmd(struct iwx_softc *);
428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
429 static int iwx_config_ltr(struct iwx_softc *);
430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
431 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
434 struct iwx_rx_data *);
435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
437 #if 0
438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
439 struct ieee80211_node *, struct ieee80211_rxinfo *);
440 #endif
441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
442 int, int, uint32_t, uint8_t);
443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
445 struct iwx_tx_data *);
446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
448 struct iwx_rx_data *);
449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
451 struct iwx_rx_data *);
452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
456 #if 0
457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
459 #endif
460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
464 const void *);
465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
466 uint32_t *);
467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
468 const void *, uint32_t *);
469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
470 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
472 static uint32_t iwx_fw_rateidx_cck(uint8_t);
473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
475 struct mbuf *);
476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
477 uint16_t, uint16_t);
478 static int iwx_tx(struct iwx_softc *, struct mbuf *,
479 struct ieee80211_node *);
480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
484 struct iwx_beacon_filter_cmd *);
485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
486 int);
487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
488 struct iwx_mac_power_cmd *);
489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
490 static int iwx_power_update_device(struct iwx_softc *);
491 #if 0
492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
493 #endif
494 static int iwx_disable_beacon_filter(struct iwx_softc *);
495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
498 static int iwx_fill_probe_req(struct iwx_softc *,
499 struct iwx_scan_probe_req *);
500 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
503 struct iwx_scan_general_params_v10 *, int);
504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
505 struct iwx_scan_general_params_v10 *, uint16_t, int);
506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
507 struct iwx_scan_channel_params_v6 *, uint32_t, int);
508 static int iwx_umac_scan_v14(struct iwx_softc *, int);
509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
511 static int iwx_rval2ridx(int);
512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
513 int *);
514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
515 struct iwx_mac_ctx_cmd *, uint32_t);
516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
517 struct iwx_mac_data_sta *, int);
518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
519 uint32_t, int);
520 static int iwx_clear_statistics(struct iwx_softc *);
521 static int iwx_scan(struct iwx_softc *);
522 static int iwx_bgscan(struct ieee80211com *);
523 static int iwx_enable_mgmt_queue(struct iwx_softc *);
524 static int iwx_disable_mgmt_queue(struct iwx_softc *);
525 static int iwx_rs_rval2idx(uint8_t);
526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
527 int);
528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
533 uint8_t, uint8_t);
534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
536 uint8_t);
537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
538 static int iwx_deauth(struct iwx_softc *);
539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
540 static int iwx_run_stop(struct iwx_softc *);
541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
542 const uint8_t[IEEE80211_ADDR_LEN]);
543 #if 0
544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
545 struct ieee80211_key *);
546 void iwx_setkey_task(void *);
547 void iwx_delete_key(struct ieee80211com *,
548 struct ieee80211_node *, struct ieee80211_key *);
549 #endif
550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
551 static void iwx_endscan(struct iwx_softc *);
552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
553 struct ieee80211_node *);
554 static int iwx_sf_config(struct iwx_softc *, int);
555 static int iwx_send_bt_init_conf(struct iwx_softc *);
556 static int iwx_send_soc_conf(struct iwx_softc *);
557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
559 static int iwx_init_hw(struct iwx_softc *);
560 static int iwx_init(struct iwx_softc *);
561 static void iwx_stop(struct iwx_softc *);
562 static void iwx_watchdog(void *);
563 static const char *iwx_desc_lookup(uint32_t);
564 static void iwx_nic_error(struct iwx_softc *);
565 static void iwx_dump_driver_status(struct iwx_softc *);
566 static void iwx_nic_umac_error(struct iwx_softc *);
567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
570 struct mbuf *);
571 static void iwx_notif_intr(struct iwx_softc *);
572 #if 0
573 /* XXX-THJ - I don't have hardware for this */
574 static int iwx_intr(void *);
575 #endif
576 static void iwx_intr_msix(void *);
577 static int iwx_preinit(struct iwx_softc *);
578 static void iwx_attach_hook(void *);
579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
580 static int iwx_probe(device_t);
581 static int iwx_attach(device_t);
582 static int iwx_detach(device_t);
583
584 /* FreeBSD specific glue */
585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
587
588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
590
591 #if IWX_DEBUG
592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
593 #else
594 #define DPRINTF(x) do { ; } while (0)
595 #endif
596
597 /* FreeBSD specific functions */
598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
601 static void iwx_vap_delete(struct ieee80211vap *);
602 static void iwx_parent(struct ieee80211com *);
603 static void iwx_scan_start(struct ieee80211com *);
604 static void iwx_scan_end(struct ieee80211com *);
605 static void iwx_update_mcast(struct ieee80211com *ic);
606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
607 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
608 static void iwx_set_channel(struct ieee80211com *);
609 static void iwx_endscan_cb(void *, int );
610 static int iwx_wme_update(struct ieee80211com *);
611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
612 const struct ieee80211_bpf_params *);
613 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
614 static void iwx_start(struct iwx_softc *);
615 static int iwx_ampdu_rx_start(struct ieee80211_node *,
616 struct ieee80211_rx_ampdu *, int, int, int);
617 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
618 struct ieee80211_rx_ampdu *);
619 static int iwx_addba_request(struct ieee80211_node *,
620 struct ieee80211_tx_ampdu *, int, int, int);
621 static int iwx_addba_response(struct ieee80211_node *,
622 struct ieee80211_tx_ampdu *, int, int, int);
623 static void iwx_key_update_begin(struct ieee80211vap *);
624 static void iwx_key_update_end(struct ieee80211vap *);
625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
626 ieee80211_keyix *,ieee80211_keyix *);
627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
628 static int iwx_key_delete(struct ieee80211vap *,
629 const struct ieee80211_key *);
630 static int iwx_suspend(device_t);
631 static int iwx_resume(device_t);
632 static void iwx_radiotap_attach(struct iwx_softc *);
633
634 /* OpenBSD compat defines */
635 #define IEEE80211_HTOP0_SCO_SCN 0
636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
638
639 #define IEEE80211_HT_RATESET_SISO 0
640 #define IEEE80211_HT_RATESET_MIMO2 2
641
642 const struct ieee80211_rateset ieee80211_std_rateset_11a =
643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
644
645 const struct ieee80211_rateset ieee80211_std_rateset_11b =
646 { 4, { 2, 4, 11, 22 } };
647
648 const struct ieee80211_rateset ieee80211_std_rateset_11g =
649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
650
651 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)652 ieee80211_has_addr4(const struct ieee80211_frame *wh)
653 {
654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
655 IEEE80211_FC1_DIR_DSTODS;
656 }
657
658 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
660 {
661 const struct iwx_fw_cmd_version *entry;
662 int i;
663
664 for (i = 0; i < sc->n_cmd_versions; i++) {
665 entry = &sc->cmd_versions[i];
666 if (entry->group == grp && entry->cmd == cmd)
667 return entry->cmd_ver;
668 }
669
670 return IWX_FW_CMD_VER_UNKNOWN;
671 }
672
673 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
675 {
676 const struct iwx_fw_cmd_version *entry;
677 int i;
678
679 for (i = 0; i < sc->n_cmd_versions; i++) {
680 entry = &sc->cmd_versions[i];
681 if (entry->group == grp && entry->cmd == cmd)
682 return entry->notif_ver;
683 }
684
685 return IWX_FW_CMD_VER_UNKNOWN;
686 }
687
688 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
690 {
691 const struct iwx_fw_cscheme_list *l = (const void *)data;
692
693 if (dlen < sizeof(*l) ||
694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
695 return EINVAL;
696
697 /* we don't actually store anything for now, always use s/w crypto */
698
699 return 0;
700 }
701
702 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
705 {
706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
707 if (err) {
708 printf("%s: could not allocate context info DMA memory\n",
709 DEVNAME(sc));
710 return err;
711 }
712
713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
714
715 return 0;
716 }
717
718 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)719 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
720 {
721 struct iwx_self_init_dram *dram = &sc->init_dram;
722 int i;
723
724 if (!dram->paging)
725 return;
726
727 /* free paging*/
728 for (i = 0; i < dram->paging_cnt; i++)
729 iwx_dma_contig_free(&dram->paging[i]);
730
731 free(dram->paging, M_DEVBUF);
732 dram->paging_cnt = 0;
733 dram->paging = NULL;
734 }
735
736 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
738 {
739 int i = 0;
740
741 while (start < fws->fw_count &&
742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
744 start++;
745 i++;
746 }
747
748 return i;
749 }
750
751 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
753 struct iwx_context_info_dram *ctxt_dram)
754 {
755 struct iwx_self_init_dram *dram = &sc->init_dram;
756 int i, ret, fw_cnt = 0;
757
758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
759
760 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
761 /* add 1 due to separator */
762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
763 /* add 2 due to separators */
764 dram->paging_cnt = iwx_get_num_sections(fws,
765 dram->lmac_cnt + dram->umac_cnt + 2);
766
767 IWX_UNLOCK(sc);
768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
770 if (!dram->fw) {
771 printf("%s: could not allocate memory for firmware sections\n",
772 DEVNAME(sc));
773 IWX_LOCK(sc);
774 return ENOMEM;
775 }
776
777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
778 M_DEVBUF, M_ZERO | M_WAITOK);
779 IWX_LOCK(sc);
780 if (!dram->paging) {
781 printf("%s: could not allocate memory for firmware paging\n",
782 DEVNAME(sc));
783 return ENOMEM;
784 }
785
786 /* initialize lmac sections */
787 for (i = 0; i < dram->lmac_cnt; i++) {
788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
789 &dram->fw[fw_cnt]);
790 if (ret)
791 return ret;
792 ctxt_dram->lmac_img[i] =
793 htole64(dram->fw[fw_cnt].paddr);
794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
795 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
796 __func__, i,
797 (unsigned long long)dram->fw[fw_cnt].paddr,
798 (unsigned long long)dram->fw[fw_cnt].size);
799 fw_cnt++;
800 }
801
802 /* initialize umac sections */
803 for (i = 0; i < dram->umac_cnt; i++) {
804 /* access FW with +1 to make up for lmac separator */
805 ret = iwx_ctxt_info_alloc_dma(sc,
806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
807 if (ret)
808 return ret;
809 ctxt_dram->umac_img[i] =
810 htole64(dram->fw[fw_cnt].paddr);
811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
812 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
813 __func__, i,
814 (unsigned long long)dram->fw[fw_cnt].paddr,
815 (unsigned long long)dram->fw[fw_cnt].size);
816 fw_cnt++;
817 }
818
819 /*
820 * Initialize paging.
821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
822 * stored separately.
823 * This is since the timing of its release is different -
824 * while fw memory can be released on alive, the paging memory can be
825 * freed only when the device goes down.
826 * Given that, the logic here in accessing the fw image is a bit
827 * different - fw_cnt isn't changing so loop counter is added to it.
828 */
829 for (i = 0; i < dram->paging_cnt; i++) {
830 /* access FW with +2 to make up for lmac & umac separators */
831 int fw_idx = fw_cnt + i + 2;
832
833 ret = iwx_ctxt_info_alloc_dma(sc,
834 &fws->fw_sect[fw_idx], &dram->paging[i]);
835 if (ret)
836 return ret;
837
838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
840 "%s: firmware paging section %d at 0x%llx size %lld\n",
841 __func__, i,
842 (unsigned long long)dram->paging[i].paddr,
843 (unsigned long long)dram->paging[i].size);
844 }
845
846 return 0;
847 }
848
849 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)850 iwx_fw_version_str(char *buf, size_t bufsize,
851 uint32_t major, uint32_t minor, uint32_t api)
852 {
853 /*
854 * Starting with major version 35 the Linux driver prints the minor
855 * version in hexadecimal.
856 */
857 if (major >= 35)
858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
859 else
860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
861 }
862 #if 0
863 static int
864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
865 uint8_t min_power)
866 {
867 struct iwx_dma_info *fw_mon = &sc->fw_mon;
868 uint32_t size = 0;
869 uint8_t power;
870 int err;
871
872 if (fw_mon->size)
873 return 0;
874
875 for (power = max_power; power >= min_power; power--) {
876 size = (1 << power);
877
878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
879 if (err)
880 continue;
881
882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
883 "%s: allocated 0x%08x bytes for firmware monitor.\n",
884 DEVNAME(sc), size);
885 break;
886 }
887
888 if (err) {
889 fw_mon->size = 0;
890 return err;
891 }
892
893 if (power != max_power)
894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
896 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
897 (unsigned long)(1 << (max_power - 10)));
898
899 return 0;
900 }
901
902 static int
903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
904 {
905 if (!max_power) {
906 /* default max_power is maximum */
907 max_power = 26;
908 } else {
909 max_power += 11;
910 }
911
912 if (max_power > 26) {
913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
914 "%s: External buffer size for monitor is too big %d, "
915 "check the FW TLV\n", DEVNAME(sc), max_power);
916 return 0;
917 }
918
919 if (sc->fw_mon.size)
920 return 0;
921
922 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
923 }
924 #endif
925
926 static int
iwx_apply_debug_destination(struct iwx_softc * sc)927 iwx_apply_debug_destination(struct iwx_softc *sc)
928 {
929 #if 0
930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
931 int i, err;
932 uint8_t mon_mode, size_power, base_shift, end_shift;
933 uint32_t base_reg, end_reg;
934
935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
936 mon_mode = dest_v1->monitor_mode;
937 size_power = dest_v1->size_power;
938 base_reg = le32toh(dest_v1->base_reg);
939 end_reg = le32toh(dest_v1->end_reg);
940 base_shift = dest_v1->base_shift;
941 end_shift = dest_v1->end_shift;
942
943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
944
945 if (mon_mode == EXTERNAL_MODE) {
946 err = iwx_alloc_fw_monitor(sc, size_power);
947 if (err)
948 return err;
949 }
950
951 if (!iwx_nic_lock(sc))
952 return EBUSY;
953
954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
955 uint32_t addr, val;
956 uint8_t op;
957
958 addr = le32toh(dest_v1->reg_ops[i].addr);
959 val = le32toh(dest_v1->reg_ops[i].val);
960 op = dest_v1->reg_ops[i].op;
961
962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
963 switch (op) {
964 case CSR_ASSIGN:
965 IWX_WRITE(sc, addr, val);
966 break;
967 case CSR_SETBIT:
968 IWX_SETBITS(sc, addr, (1 << val));
969 break;
970 case CSR_CLEARBIT:
971 IWX_CLRBITS(sc, addr, (1 << val));
972 break;
973 case PRPH_ASSIGN:
974 iwx_write_prph(sc, addr, val);
975 break;
976 case PRPH_SETBIT:
977 err = iwx_set_bits_prph(sc, addr, (1 << val));
978 if (err)
979 return err;
980 break;
981 case PRPH_CLEARBIT:
982 err = iwx_clear_bits_prph(sc, addr, (1 << val));
983 if (err)
984 return err;
985 break;
986 case PRPH_BLOCKBIT:
987 if (iwx_read_prph(sc, addr) & (1 << val))
988 goto monitor;
989 break;
990 default:
991 DPRINTF(("%s: FW debug - unknown OP %d\n",
992 DEVNAME(sc), op));
993 break;
994 }
995 }
996
997 monitor:
998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
999 iwx_write_prph(sc, le32toh(base_reg),
1000 sc->fw_mon.paddr >> base_shift);
1001 iwx_write_prph(sc, end_reg,
1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1003 >> end_shift);
1004 }
1005
1006 iwx_nic_unlock(sc);
1007 return 0;
1008 #else
1009 return 0;
1010 #endif
1011 }
1012
1013 static void
iwx_set_ltr(struct iwx_softc * sc)1014 iwx_set_ltr(struct iwx_softc *sc)
1015 {
1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1027
1028 /*
1029 * To workaround hardware latency issues during the boot process,
1030 * initialize the LTR to ~250 usec (see ltr_val above).
1031 * The firmware initializes this again later (to a smaller value).
1032 */
1033 if (!sc->sc_integrated) {
1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1035 } else if (sc->sc_integrated &&
1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1038 IWX_HPM_MAC_LRT_ENABLE_ALL);
1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1040 }
1041 }
1042
1043 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1045 {
1046 struct iwx_context_info *ctxt_info;
1047 struct iwx_context_info_rbd_cfg *rx_cfg;
1048 uint32_t control_flags = 0;
1049 uint64_t paddr;
1050 int err;
1051
1052 ctxt_info = sc->ctxt_info_dma.vaddr;
1053 memset(ctxt_info, 0, sizeof(*ctxt_info));
1054
1055 ctxt_info->version.version = 0;
1056 ctxt_info->version.mac_id =
1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1058 /* size is in DWs */
1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1060
1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1063
1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1068 ctxt_info->control.control_flags = htole32(control_flags);
1069
1070 /* initialize RX default queue */
1071 rx_cfg = &ctxt_info->rbd_cfg;
1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1075
1076 /* initialize TX command queue */
1077 ctxt_info->hcmd_cfg.cmd_queue_addr =
1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1079 ctxt_info->hcmd_cfg.cmd_queue_size =
1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1081
1082 /* allocate ucode sections in dram and set addresses */
1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1084 if (err) {
1085 iwx_ctxt_info_free_fw_img(sc);
1086 return err;
1087 }
1088
1089 /* Configure debug, if exists */
1090 if (sc->sc_fw.dbg_dest_tlv_v1) {
1091 #if 1
1092 err = iwx_apply_debug_destination(sc);
1093 if (err) {
1094 iwx_ctxt_info_free_fw_img(sc);
1095 return err;
1096 }
1097 #endif
1098 }
1099
1100 /*
1101 * Write the context info DMA base address. The device expects a
1102 * 64-bit address but a simple bus_space_write_8 to this register
1103 * won't work on some devices, such as the AX201.
1104 */
1105 paddr = sc->ctxt_info_dma.paddr;
1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1108
1109 /* kick FW self load */
1110 if (!iwx_nic_lock(sc)) {
1111 iwx_ctxt_info_free_fw_img(sc);
1112 return EBUSY;
1113 }
1114
1115 iwx_set_ltr(sc);
1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1117 iwx_nic_unlock(sc);
1118
1119 /* Context info will be released upon alive or failure to get one */
1120
1121 return 0;
1122 }
1123
1124 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1126 {
1127 struct iwx_context_info_gen3 *ctxt_info_gen3;
1128 struct iwx_prph_scratch *prph_scratch;
1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1130 uint16_t cb_size;
1131 uint32_t control_flags, scratch_size;
1132 uint64_t paddr;
1133 int err;
1134
1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1136 printf("%s: no image loader found in firmware file\n",
1137 DEVNAME(sc));
1138 iwx_ctxt_info_free_fw_img(sc);
1139 return EINVAL;
1140 }
1141
1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1143 sc->sc_fw.iml_len, 1);
1144 if (err) {
1145 printf("%s: could not allocate DMA memory for "
1146 "firmware image loader\n", DEVNAME(sc));
1147 iwx_ctxt_info_free_fw_img(sc);
1148 return ENOMEM;
1149 }
1150
1151 prph_scratch = sc->prph_scratch_dma.vaddr;
1152 memset(prph_scratch, 0, sizeof(*prph_scratch));
1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1154 prph_sc_ctrl->version.version = 0;
1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1157
1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1159 IWX_PRPH_SCRATCH_MTR_MODE |
1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1161 if (sc->sc_imr_enabled)
1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1163 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1164
1165 /* initialize RX default queue */
1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1167 htole64(sc->rxq.free_desc_dma.paddr);
1168
1169 /* allocate ucode sections in dram and set addresses */
1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1171 if (err) {
1172 iwx_dma_contig_free(&sc->iml_dma);
1173 iwx_ctxt_info_free_fw_img(sc);
1174 return err;
1175 }
1176
1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1180 ctxt_info_gen3->prph_scratch_base_addr =
1181 htole64(sc->prph_scratch_dma.paddr);
1182 scratch_size = sizeof(*prph_scratch);
1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1184 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1185 htole64(sc->rxq.stat_dma.paddr);
1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1190 ctxt_info_gen3->mtr_base_addr =
1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1194 ctxt_info_gen3->mtr_size = htole16(cb_size);
1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1196 ctxt_info_gen3->mcr_size = htole16(cb_size);
1197
1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1199
1200 paddr = sc->ctxt_info_dma.paddr;
1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1203
1204 paddr = sc->iml_dma.paddr;
1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1208
1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1210 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1211
1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1213 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1214
1215 /* kick FW self load */
1216 if (!iwx_nic_lock(sc)) {
1217 iwx_dma_contig_free(&sc->iml_dma);
1218 iwx_ctxt_info_free_fw_img(sc);
1219 return EBUSY;
1220 }
1221 iwx_set_ltr(sc);
1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1223 iwx_nic_unlock(sc);
1224
1225 /* Context info will be released upon alive or failure to get one */
1226 return 0;
1227 }
1228
1229 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1231 {
1232 struct iwx_self_init_dram *dram = &sc->init_dram;
1233 int i;
1234
1235 if (!dram->fw)
1236 return;
1237
1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1239 iwx_dma_contig_free(&dram->fw[i]);
1240
1241 free(dram->fw, M_DEVBUF);
1242 dram->lmac_cnt = 0;
1243 dram->umac_cnt = 0;
1244 dram->fw = NULL;
1245 }
1246
1247 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1249 const uint8_t *data, size_t dlen)
1250 {
1251 struct iwx_fw_sects *fws;
1252 struct iwx_fw_onesect *fwone;
1253
1254 if (type >= IWX_UCODE_TYPE_MAX)
1255 return EINVAL;
1256 if (dlen < sizeof(uint32_t))
1257 return EINVAL;
1258
1259 fws = &sc->sc_fw.fw_sects[type];
1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1263 return EINVAL;
1264
1265 fwone = &fws->fw_sect[fws->fw_count];
1266
1267 /* first 32bit are device load offset */
1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1269
1270 /* rest is data */
1271 fwone->fws_data = data + sizeof(uint32_t);
1272 fwone->fws_len = dlen - sizeof(uint32_t);
1273
1274 fws->fw_count++;
1275 fws->fw_totlen += fwone->fws_len;
1276
1277 return 0;
1278 }
1279
1280 #define IWX_DEFAULT_SCAN_CHANNELS 40
1281 /* Newer firmware might support more channels. Raise this value if needed. */
1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1283
1284 struct iwx_tlv_calib_data {
1285 uint32_t ucode_type;
1286 struct iwx_tlv_calib_ctrl calib;
1287 } __packed;
1288
1289 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1291 {
1292 const struct iwx_tlv_calib_data *def_calib = data;
1293 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1294
1295 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1296 return EINVAL;
1297
1298 sc->sc_default_calib[ucode_type].flow_trigger =
1299 def_calib->calib.flow_trigger;
1300 sc->sc_default_calib[ucode_type].event_trigger =
1301 def_calib->calib.event_trigger;
1302
1303 return 0;
1304 }
1305
1306 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1307 iwx_fw_info_free(struct iwx_fw_info *fw)
1308 {
1309 free(fw->fw_rawdata, M_DEVBUF);
1310 fw->fw_rawdata = NULL;
1311 fw->fw_rawsize = 0;
1312 /* don't touch fw->fw_status */
1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1314 free(fw->iml, M_DEVBUF);
1315 fw->iml = NULL;
1316 fw->iml_len = 0;
1317 }
1318
1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1320
1321 static int
iwx_read_firmware(struct iwx_softc * sc)1322 iwx_read_firmware(struct iwx_softc *sc)
1323 {
1324 struct iwx_fw_info *fw = &sc->sc_fw;
1325 const struct iwx_tlv_ucode_header *uhdr;
1326 struct iwx_ucode_tlv tlv;
1327 uint32_t tlv_type;
1328 const uint8_t *data;
1329 int err = 0;
1330 size_t len;
1331 const struct firmware *fwp;
1332
1333 if (fw->fw_status == IWX_FW_STATUS_DONE)
1334 return 0;
1335
1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1337 fwp = firmware_get(sc->sc_fwname);
1338 sc->sc_fwp = fwp;
1339
1340 if (fwp == NULL) {
1341 printf("%s: could not read firmware %s\n",
1342 DEVNAME(sc), sc->sc_fwname);
1343 err = ENOENT;
1344 goto out;
1345 }
1346
1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1349
1350
1351 sc->sc_capaflags = 0;
1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1355 sc->n_cmd_versions = 0;
1356
1357 uhdr = (const void *)(fwp->data);
1358 if (*(const uint32_t *)fwp->data != 0
1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1360 printf("%s: invalid firmware %s\n",
1361 DEVNAME(sc), sc->sc_fwname);
1362 err = EINVAL;
1363 goto out;
1364 }
1365
1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_API(le32toh(uhdr->ver)));
1370
1371 data = uhdr->data;
1372 len = fwp->datasize - sizeof(*uhdr);
1373
1374 while (len >= sizeof(tlv)) {
1375 size_t tlv_len;
1376 const void *tlv_data;
1377
1378 memcpy(&tlv, data, sizeof(tlv));
1379 tlv_len = le32toh(tlv.length);
1380 tlv_type = le32toh(tlv.type);
1381
1382 len -= sizeof(tlv);
1383 data += sizeof(tlv);
1384 tlv_data = data;
1385
1386 if (len < tlv_len) {
1387 printf("%s: firmware too short: %zu bytes\n",
1388 DEVNAME(sc), len);
1389 err = EINVAL;
1390 goto parse_out;
1391 }
1392
1393 switch (tlv_type) {
1394 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1395 if (tlv_len < sizeof(uint32_t)) {
1396 err = EINVAL;
1397 goto parse_out;
1398 }
1399 sc->sc_capa_max_probe_len
1400 = le32toh(*(const uint32_t *)tlv_data);
1401 if (sc->sc_capa_max_probe_len >
1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1403 err = EINVAL;
1404 goto parse_out;
1405 }
1406 break;
1407 case IWX_UCODE_TLV_PAN:
1408 if (tlv_len) {
1409 err = EINVAL;
1410 goto parse_out;
1411 }
1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1413 break;
1414 case IWX_UCODE_TLV_FLAGS:
1415 if (tlv_len < sizeof(uint32_t)) {
1416 err = EINVAL;
1417 goto parse_out;
1418 }
1419 /*
1420 * Apparently there can be many flags, but Linux driver
1421 * parses only the first one, and so do we.
1422 *
1423 * XXX: why does this override IWX_UCODE_TLV_PAN?
1424 * Intentional or a bug? Observations from
1425 * current firmware file:
1426 * 1) TLV_PAN is parsed first
1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1428 * ==> this resets TLV_PAN to itself... hnnnk
1429 */
1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1431 break;
1432 case IWX_UCODE_TLV_CSCHEME:
1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1434 if (err)
1435 goto parse_out;
1436 break;
1437 case IWX_UCODE_TLV_NUM_OF_CPU: {
1438 uint32_t num_cpu;
1439 if (tlv_len != sizeof(uint32_t)) {
1440 err = EINVAL;
1441 goto parse_out;
1442 }
1443 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1444 if (num_cpu < 1 || num_cpu > 2) {
1445 err = EINVAL;
1446 goto parse_out;
1447 }
1448 break;
1449 }
1450 case IWX_UCODE_TLV_SEC_RT:
1451 err = iwx_firmware_store_section(sc,
1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1453 if (err)
1454 goto parse_out;
1455 break;
1456 case IWX_UCODE_TLV_SEC_INIT:
1457 err = iwx_firmware_store_section(sc,
1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1459 if (err)
1460 goto parse_out;
1461 break;
1462 case IWX_UCODE_TLV_SEC_WOWLAN:
1463 err = iwx_firmware_store_section(sc,
1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1465 if (err)
1466 goto parse_out;
1467 break;
1468 case IWX_UCODE_TLV_DEF_CALIB:
1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1470 err = EINVAL;
1471 goto parse_out;
1472 }
1473 err = iwx_set_default_calib(sc, tlv_data);
1474 if (err)
1475 goto parse_out;
1476 break;
1477 case IWX_UCODE_TLV_PHY_SKU:
1478 if (tlv_len != sizeof(uint32_t)) {
1479 err = EINVAL;
1480 goto parse_out;
1481 }
1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1483 break;
1484
1485 case IWX_UCODE_TLV_API_CHANGES_SET: {
1486 const struct iwx_ucode_api *api;
1487 int idx, i;
1488 if (tlv_len != sizeof(*api)) {
1489 err = EINVAL;
1490 goto parse_out;
1491 }
1492 api = (const struct iwx_ucode_api *)tlv_data;
1493 idx = le32toh(api->api_index);
1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1495 err = EINVAL;
1496 goto parse_out;
1497 }
1498 for (i = 0; i < 32; i++) {
1499 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1500 continue;
1501 setbit(sc->sc_ucode_api, i + (32 * idx));
1502 }
1503 break;
1504 }
1505
1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1507 const struct iwx_ucode_capa *capa;
1508 int idx, i;
1509 if (tlv_len != sizeof(*capa)) {
1510 err = EINVAL;
1511 goto parse_out;
1512 }
1513 capa = (const struct iwx_ucode_capa *)tlv_data;
1514 idx = le32toh(capa->api_index);
1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1516 goto parse_out;
1517 }
1518 for (i = 0; i < 32; i++) {
1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1520 continue;
1521 setbit(sc->sc_enabled_capa, i + (32 * idx));
1522 }
1523 break;
1524 }
1525
1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1528 /* ignore, not used by current driver */
1529 break;
1530
1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1532 err = iwx_firmware_store_section(sc,
1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1534 tlv_len);
1535 if (err)
1536 goto parse_out;
1537 break;
1538
1539 case IWX_UCODE_TLV_PAGING:
1540 if (tlv_len != sizeof(uint32_t)) {
1541 err = EINVAL;
1542 goto parse_out;
1543 }
1544 break;
1545
1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1547 if (tlv_len != sizeof(uint32_t)) {
1548 err = EINVAL;
1549 goto parse_out;
1550 }
1551 sc->sc_capa_n_scan_channels =
1552 le32toh(*(const uint32_t *)tlv_data);
1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1554 err = ERANGE;
1555 goto parse_out;
1556 }
1557 break;
1558
1559 case IWX_UCODE_TLV_FW_VERSION:
1560 if (tlv_len != sizeof(uint32_t) * 3) {
1561 err = EINVAL;
1562 goto parse_out;
1563 }
1564
1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1566 le32toh(((const uint32_t *)tlv_data)[0]),
1567 le32toh(((const uint32_t *)tlv_data)[1]),
1568 le32toh(((const uint32_t *)tlv_data)[2]));
1569 break;
1570
1571 case IWX_UCODE_TLV_FW_DBG_DEST: {
1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1573
1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1575 if (*fw->dbg_dest_ver != 0) {
1576 err = EINVAL;
1577 goto parse_out;
1578 }
1579
1580 if (fw->dbg_dest_tlv_init)
1581 break;
1582 fw->dbg_dest_tlv_init = true;
1583
1584 dest_v1 = (const void *)tlv_data;
1585 fw->dbg_dest_tlv_v1 = dest_v1;
1586 fw->n_dest_reg = tlv_len -
1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1590 "%s: found debug dest; n_dest_reg=%d\n",
1591 __func__, fw->n_dest_reg);
1592 break;
1593 }
1594
1595 case IWX_UCODE_TLV_FW_DBG_CONF: {
1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1597
1598 if (!fw->dbg_dest_tlv_init ||
1599 conf->id >= nitems(fw->dbg_conf_tlv) ||
1600 fw->dbg_conf_tlv[conf->id] != NULL)
1601 break;
1602
1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1604 "Found debug configuration: %d\n", conf->id);
1605 fw->dbg_conf_tlv[conf->id] = conf;
1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1607 break;
1608 }
1609
1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1611 const struct iwx_umac_debug_addrs *dbg_ptrs =
1612 (const void *)tlv_data;
1613
1614 if (tlv_len != sizeof(*dbg_ptrs)) {
1615 err = EINVAL;
1616 goto parse_out;
1617 }
1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1619 break;
1620 sc->sc_uc.uc_umac_error_event_table =
1621 le32toh(dbg_ptrs->error_info_addr) &
1622 ~IWX_FW_ADDR_CACHE_CONTROL;
1623 sc->sc_uc.error_event_table_tlv_status |=
1624 IWX_ERROR_EVENT_TABLE_UMAC;
1625 break;
1626 }
1627
1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1629 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1630 (const void *)tlv_data;
1631
1632 if (tlv_len != sizeof(*dbg_ptrs)) {
1633 err = EINVAL;
1634 goto parse_out;
1635 }
1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1637 break;
1638 sc->sc_uc.uc_lmac_error_event_table[0] =
1639 le32toh(dbg_ptrs->error_event_table_ptr) &
1640 ~IWX_FW_ADDR_CACHE_CONTROL;
1641 sc->sc_uc.error_event_table_tlv_status |=
1642 IWX_ERROR_EVENT_TABLE_LMAC1;
1643 break;
1644 }
1645
1646 case IWX_UCODE_TLV_FW_MEM_SEG:
1647 break;
1648
1649 case IWX_UCODE_TLV_IML:
1650 if (sc->sc_fw.iml != NULL) {
1651 free(fw->iml, M_DEVBUF);
1652 fw->iml_len = 0;
1653 }
1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1655 M_WAITOK | M_ZERO);
1656 if (sc->sc_fw.iml == NULL) {
1657 err = ENOMEM;
1658 goto parse_out;
1659 }
1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1661 sc->sc_fw.iml_len = tlv_len;
1662 break;
1663
1664 case IWX_UCODE_TLV_CMD_VERSIONS:
1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1666 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1667 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1668 }
1669 if (sc->n_cmd_versions != 0) {
1670 err = EINVAL;
1671 goto parse_out;
1672 }
1673 if (tlv_len > sizeof(sc->cmd_versions)) {
1674 err = EINVAL;
1675 goto parse_out;
1676 }
1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1679 break;
1680
1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1682 break;
1683
1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1686 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1687 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1688 break;
1689
1690 /* undocumented TLVs found in iwx-cc-a0-46 image */
1691 case 58:
1692 case 0x1000003:
1693 case 0x1000004:
1694 break;
1695
1696 /* undocumented TLVs found in iwx-cc-a0-48 image */
1697 case 0x1000000:
1698 case 0x1000002:
1699 break;
1700
1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1703 case IWX_UCODE_TLV_TYPE_HCMD:
1704 case IWX_UCODE_TLV_TYPE_REGIONS:
1705 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1706 case IWX_UCODE_TLV_TYPE_CONF_SET:
1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1709 case IWX_UCODE_TLV_CURRENT_PC:
1710 break;
1711
1712 /* undocumented TLV found in iwx-cc-a0-67 image */
1713 case 0x100000b:
1714 break;
1715
1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1717 case 0x101:
1718 break;
1719
1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1721 case 0x100000c:
1722 break;
1723
1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1725 case 69:
1726 break;
1727
1728 default:
1729 err = EINVAL;
1730 goto parse_out;
1731 }
1732
1733 /*
1734 * Check for size_t overflow and ignore missing padding at
1735 * end of firmware file.
1736 */
1737 if (roundup(tlv_len, 4) > len)
1738 break;
1739
1740 len -= roundup(tlv_len, 4);
1741 data += roundup(tlv_len, 4);
1742 }
1743
1744 KASSERT(err == 0, ("unhandled fw parse error"));
1745
1746 parse_out:
1747 if (err) {
1748 printf("%s: firmware parse error %d, "
1749 "section type %d\n", DEVNAME(sc), err, tlv_type);
1750 }
1751
1752 out:
1753 if (err) {
1754 fw->fw_status = IWX_FW_STATUS_NONE;
1755 if (fw->fw_rawdata != NULL)
1756 iwx_fw_info_free(fw);
1757 } else
1758 fw->fw_status = IWX_FW_STATUS_DONE;
1759 return err;
1760 }
1761
1762 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1763 iwx_prph_addr_mask(struct iwx_softc *sc)
1764 {
1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1766 return 0x00ffffff;
1767 else
1768 return 0x000fffff;
1769 }
1770
1771 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1773 {
1774 uint32_t mask = iwx_prph_addr_mask(sc);
1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1776 IWX_BARRIER_READ_WRITE(sc);
1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1778 }
1779
1780 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1782 {
1783 iwx_nic_assert_locked(sc);
1784 return iwx_read_prph_unlocked(sc, addr);
1785 }
1786
1787 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1789 {
1790 uint32_t mask = iwx_prph_addr_mask(sc);
1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1792 IWX_BARRIER_WRITE(sc);
1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1794 }
1795
1796 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1798 {
1799 iwx_nic_assert_locked(sc);
1800 iwx_write_prph_unlocked(sc, addr, val);
1801 }
1802
1803 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1805 {
1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1807 }
1808
1809 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1811 {
1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1813 }
1814
1815 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1817 {
1818 int offs, err = 0;
1819 uint32_t *vals = buf;
1820
1821 if (iwx_nic_lock(sc)) {
1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1823 for (offs = 0; offs < dwords; offs++)
1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1825 iwx_nic_unlock(sc);
1826 } else {
1827 err = EBUSY;
1828 }
1829 return err;
1830 }
1831
1832 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1834 int timo)
1835 {
1836 for (;;) {
1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1838 return 1;
1839 }
1840 if (timo < 10) {
1841 return 0;
1842 }
1843 timo -= 10;
1844 DELAY(10);
1845 }
1846 }
1847
1848 static int
iwx_nic_lock(struct iwx_softc * sc)1849 iwx_nic_lock(struct iwx_softc *sc)
1850 {
1851 if (sc->sc_nic_locks > 0) {
1852 iwx_nic_assert_locked(sc);
1853 sc->sc_nic_locks++;
1854 return 1; /* already locked */
1855 }
1856
1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1859
1860 DELAY(2);
1861
1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1866 sc->sc_nic_locks++;
1867 return 1;
1868 }
1869
1870 printf("%s: acquiring device failed\n", DEVNAME(sc));
1871 return 0;
1872 }
1873
1874 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1875 iwx_nic_assert_locked(struct iwx_softc *sc)
1876 {
1877 if (sc->sc_nic_locks <= 0)
1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1879 }
1880
1881 static void
iwx_nic_unlock(struct iwx_softc * sc)1882 iwx_nic_unlock(struct iwx_softc *sc)
1883 {
1884 if (sc->sc_nic_locks > 0) {
1885 if (--sc->sc_nic_locks == 0)
1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1888 } else
1889 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1890 }
1891
1892 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1894 uint32_t mask)
1895 {
1896 uint32_t val;
1897
1898 if (iwx_nic_lock(sc)) {
1899 val = iwx_read_prph(sc, reg) & mask;
1900 val |= bits;
1901 iwx_write_prph(sc, reg, val);
1902 iwx_nic_unlock(sc);
1903 return 0;
1904 }
1905 return EBUSY;
1906 }
1907
1908 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1910 {
1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1912 }
1913
1914 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1916 {
1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1918 }
1919
1920 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1922 {
1923 if (error != 0)
1924 return;
1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1926 *(bus_addr_t *)arg = segs[0].ds_addr;
1927 }
1928
1929 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1931 bus_size_t size, bus_size_t alignment)
1932 {
1933 int error;
1934
1935 dma->tag = NULL;
1936 dma->map = NULL;
1937 dma->size = size;
1938 dma->vaddr = NULL;
1939
1940 error = bus_dma_tag_create(tag, alignment,
1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1942 1, size, 0, NULL, NULL, &dma->tag);
1943 if (error != 0)
1944 goto fail;
1945
1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1948 if (error != 0)
1949 goto fail;
1950
1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1953 if (error != 0) {
1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1955 dma->vaddr = NULL;
1956 goto fail;
1957 }
1958
1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1960
1961 return 0;
1962
1963 fail:
1964 iwx_dma_contig_free(dma);
1965 return error;
1966 }
1967
1968 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1969 iwx_dma_contig_free(struct iwx_dma_info *dma)
1970 {
1971 if (dma->vaddr != NULL) {
1972 bus_dmamap_sync(dma->tag, dma->map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->tag, dma->map);
1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1976 dma->vaddr = NULL;
1977 }
1978 if (dma->tag != NULL) {
1979 bus_dma_tag_destroy(dma->tag);
1980 dma->tag = NULL;
1981 }
1982 }
1983
1984 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 bus_size_t size;
1988 int i, err;
1989
1990 ring->cur = 0;
1991
1992 /* Allocate RX descriptors (256-byte aligned). */
1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1994 size = sizeof(struct iwx_rx_transfer_desc);
1995 else
1996 size = sizeof(uint64_t);
1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1998 size * IWX_RX_MQ_RING_COUNT, 256);
1999 if (err) {
2000 device_printf(sc->sc_dev,
2001 "could not allocate RX ring DMA memory\n");
2002 goto fail;
2003 }
2004 ring->desc = ring->free_desc_dma.vaddr;
2005
2006 /* Allocate RX status area (16-byte aligned). */
2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2008 size = sizeof(uint16_t);
2009 else
2010 size = sizeof(*ring->stat);
2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2012 if (err) {
2013 device_printf(sc->sc_dev,
2014 "could not allocate RX status DMA memory\n");
2015 goto fail;
2016 }
2017 ring->stat = ring->stat_dma.vaddr;
2018
2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2020 size = sizeof(struct iwx_rx_completion_desc);
2021 else
2022 size = sizeof(uint32_t);
2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2024 size * IWX_RX_MQ_RING_COUNT, 256);
2025 if (err) {
2026 device_printf(sc->sc_dev,
2027 "could not allocate RX ring DMA memory\n");
2028 goto fail;
2029 }
2030
2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2033 0, NULL, NULL, &ring->data_dmat);
2034
2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2036 struct iwx_rx_data *data = &ring->data[i];
2037
2038 memset(data, 0, sizeof(*data));
2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (err) {
2041 device_printf(sc->sc_dev,
2042 "could not create RX buf DMA map\n");
2043 goto fail;
2044 }
2045
2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2047 if (err)
2048 goto fail;
2049 }
2050 return 0;
2051
2052 fail: iwx_free_rx_ring(sc, ring);
2053 return err;
2054 }
2055
2056 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2057 iwx_disable_rx_dma(struct iwx_softc *sc)
2058 {
2059 int ntries;
2060
2061 if (iwx_nic_lock(sc)) {
2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2064 else
2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2066 for (ntries = 0; ntries < 1000; ntries++) {
2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 if (iwx_read_umac_prph(sc,
2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2070 break;
2071 } else {
2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2073 IWX_RXF_DMA_IDLE)
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078 iwx_nic_unlock(sc);
2079 }
2080 }
2081
2082 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2084 {
2085 ring->cur = 0;
2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2087 BUS_DMASYNC_PREWRITE);
2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2089 uint16_t *status = sc->rxq.stat_dma.vaddr;
2090 *status = 0;
2091 } else
2092 memset(ring->stat, 0, sizeof(*ring->stat));
2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2094 BUS_DMASYNC_POSTWRITE);
2095
2096 }
2097
2098 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2100 {
2101 int i;
2102
2103 iwx_dma_contig_free(&ring->free_desc_dma);
2104 iwx_dma_contig_free(&ring->stat_dma);
2105 iwx_dma_contig_free(&ring->used_desc_dma);
2106
2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2108 struct iwx_rx_data *data = &ring->data[i];
2109 if (data->m != NULL) {
2110 bus_dmamap_sync(ring->data_dmat, data->map,
2111 BUS_DMASYNC_POSTREAD);
2112 bus_dmamap_unload(ring->data_dmat, data->map);
2113 m_freem(data->m);
2114 data->m = NULL;
2115 }
2116 if (data->map != NULL) {
2117 bus_dmamap_destroy(ring->data_dmat, data->map);
2118 data->map = NULL;
2119 }
2120 }
2121 if (ring->data_dmat != NULL) {
2122 bus_dma_tag_destroy(ring->data_dmat);
2123 ring->data_dmat = NULL;
2124 }
2125 }
2126
2127 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2129 {
2130 bus_addr_t paddr;
2131 bus_size_t size;
2132 int i, err;
2133 size_t bc_tbl_size;
2134 bus_size_t bc_align;
2135 size_t mapsize;
2136
2137 ring->qid = qid;
2138 ring->queued = 0;
2139 ring->cur = 0;
2140 ring->cur_hw = 0;
2141 ring->tail = 0;
2142 ring->tail_hw = 0;
2143
2144 /* Allocate TX descriptors (256-byte aligned). */
2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2147 if (err) {
2148 device_printf(sc->sc_dev,
2149 "could not allocate TX ring DMA memory\n");
2150 goto fail;
2151 }
2152 ring->desc = ring->desc_dma.vaddr;
2153
2154 /*
2155 * The hardware supports up to 512 Tx rings which is more
2156 * than we currently need.
2157 *
2158 * In DQA mode we use 1 command queue + 1 default queue for
2159 * management, control, and non-QoS data frames.
2160 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2161 *
2162 * Tx aggregation requires additional queues, one queue per TID for
2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2164 * Firmware may assign its own internal IDs for these queues
2165 * depending on which TID gets aggregation enabled first.
2166 * The driver maintains a table mapping driver-side queue IDs
2167 * to firmware-side queue IDs.
2168 */
2169
2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2173 bc_align = 128;
2174 } else {
2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2176 bc_align = 64;
2177 }
2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2179 bc_align);
2180 if (err) {
2181 device_printf(sc->sc_dev,
2182 "could not allocate byte count table DMA memory\n");
2183 goto fail;
2184 }
2185
2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2188 IWX_FIRST_TB_SIZE_ALIGN);
2189 if (err) {
2190 device_printf(sc->sc_dev,
2191 "could not allocate cmd DMA memory\n");
2192 goto fail;
2193 }
2194 ring->cmd = ring->cmd_dma.vaddr;
2195
2196 /* FW commands may require more mapped space than packets. */
2197 if (qid == IWX_DQA_CMD_QUEUE)
2198 mapsize = (sizeof(struct iwx_cmd_header) +
2199 IWX_MAX_CMD_PAYLOAD_SIZE);
2200 else
2201 mapsize = MCLBYTES;
2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2204 mapsize, 0, NULL, NULL, &ring->data_dmat);
2205
2206 paddr = ring->cmd_dma.paddr;
2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2208 struct iwx_tx_data *data = &ring->data[i];
2209
2210 data->cmd_paddr = paddr;
2211 paddr += sizeof(struct iwx_device_cmd);
2212
2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2214 if (err) {
2215 device_printf(sc->sc_dev,
2216 "could not create TX buf DMA map\n");
2217 goto fail;
2218 }
2219 }
2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2221 return 0;
2222
2223 fail:
2224 return err;
2225 }
2226
2227 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2229 {
2230 int i;
2231
2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2233 struct iwx_tx_data *data = &ring->data[i];
2234
2235 if (data->m != NULL) {
2236 bus_dmamap_sync(ring->data_dmat, data->map,
2237 BUS_DMASYNC_POSTWRITE);
2238 bus_dmamap_unload(ring->data_dmat, data->map);
2239 m_freem(data->m);
2240 data->m = NULL;
2241 }
2242 }
2243
2244 /* Clear byte count table. */
2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2246
2247 /* Clear TX descriptors. */
2248 memset(ring->desc, 0, ring->desc_dma.size);
2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2250 BUS_DMASYNC_PREWRITE);
2251 sc->qfullmsk &= ~(1 << ring->qid);
2252 sc->qenablemsk &= ~(1 << ring->qid);
2253 for (i = 0; i < nitems(sc->aggqid); i++) {
2254 if (sc->aggqid[i] == ring->qid) {
2255 sc->aggqid[i] = 0;
2256 break;
2257 }
2258 }
2259 ring->queued = 0;
2260 ring->cur = 0;
2261 ring->cur_hw = 0;
2262 ring->tail = 0;
2263 ring->tail_hw = 0;
2264 ring->tid = 0;
2265 }
2266
2267 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2269 {
2270 int i;
2271
2272 iwx_dma_contig_free(&ring->desc_dma);
2273 iwx_dma_contig_free(&ring->cmd_dma);
2274 iwx_dma_contig_free(&ring->bc_tbl);
2275
2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2277 struct iwx_tx_data *data = &ring->data[i];
2278
2279 if (data->m != NULL) {
2280 bus_dmamap_sync(ring->data_dmat, data->map,
2281 BUS_DMASYNC_POSTWRITE);
2282 bus_dmamap_unload(ring->data_dmat, data->map);
2283 m_freem(data->m);
2284 data->m = NULL;
2285 }
2286 if (data->map != NULL) {
2287 bus_dmamap_destroy(ring->data_dmat, data->map);
2288 data->map = NULL;
2289 }
2290 }
2291 if (ring->data_dmat != NULL) {
2292 bus_dma_tag_destroy(ring->data_dmat);
2293 ring->data_dmat = NULL;
2294 }
2295 }
2296
2297 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2298 iwx_enable_rfkill_int(struct iwx_softc *sc)
2299 {
2300 if (!sc->sc_msix) {
2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2303 } else {
2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2305 sc->sc_fh_init_mask);
2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2309 }
2310
2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2313 }
2314
2315 static int
iwx_check_rfkill(struct iwx_softc * sc)2316 iwx_check_rfkill(struct iwx_softc *sc)
2317 {
2318 uint32_t v;
2319 int rv;
2320
2321 /*
2322 * "documentation" is not really helpful here:
2323 * 27: HW_RF_KILL_SW
2324 * Indicates state of (platform's) hardware RF-Kill switch
2325 *
2326 * But apparently when it's off, it's on ...
2327 */
2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2330 if (rv) {
2331 sc->sc_flags |= IWX_FLAG_RFKILL;
2332 } else {
2333 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2334 }
2335
2336 return rv;
2337 }
2338
2339 static void
iwx_enable_interrupts(struct iwx_softc * sc)2340 iwx_enable_interrupts(struct iwx_softc *sc)
2341 {
2342 if (!sc->sc_msix) {
2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2345 } else {
2346 /*
2347 * fh/hw_mask keeps all the unmasked causes.
2348 * Unlike msi, in msix cause is enabled when it is unset.
2349 */
2350 sc->sc_hw_mask = sc->sc_hw_init_mask;
2351 sc->sc_fh_mask = sc->sc_fh_init_mask;
2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2353 ~sc->sc_fh_mask);
2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2355 ~sc->sc_hw_mask);
2356 }
2357 }
2358
2359 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2361 {
2362 if (!sc->sc_msix) {
2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2365 } else {
2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2369 /*
2370 * Leave all the FH causes enabled to get the ALIVE
2371 * notification.
2372 */
2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2374 ~sc->sc_fh_init_mask);
2375 sc->sc_fh_mask = sc->sc_fh_init_mask;
2376 }
2377 }
2378
2379 #if 0
2380 static void
2381 iwx_restore_interrupts(struct iwx_softc *sc)
2382 {
2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2384 }
2385 #endif
2386
2387 static void
iwx_disable_interrupts(struct iwx_softc * sc)2388 iwx_disable_interrupts(struct iwx_softc *sc)
2389 {
2390 if (!sc->sc_msix) {
2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2392
2393 /* acknowledge all interrupts */
2394 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2396 } else {
2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2398 sc->sc_fh_init_mask);
2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2400 sc->sc_hw_init_mask);
2401 }
2402 }
2403
2404 static void
iwx_ict_reset(struct iwx_softc * sc)2405 iwx_ict_reset(struct iwx_softc *sc)
2406 {
2407 iwx_disable_interrupts(sc);
2408
2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2410 sc->ict_cur = 0;
2411
2412 /* Set physical address of ICT (4KB aligned). */
2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2414 IWX_CSR_DRAM_INT_TBL_ENABLE
2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2418
2419 /* Switch to ICT interrupt mode in driver. */
2420 sc->sc_flags |= IWX_FLAG_USE_ICT;
2421
2422 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2423 iwx_enable_interrupts(sc);
2424 }
2425
2426 #define IWX_HW_READY_TIMEOUT 50
2427 static int
iwx_set_hw_ready(struct iwx_softc * sc)2428 iwx_set_hw_ready(struct iwx_softc *sc)
2429 {
2430 int ready;
2431
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2434
2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_HW_READY_TIMEOUT);
2439 if (ready)
2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2442
2443 DPRINTF(("%s: ready=%d\n", __func__, ready));
2444 return ready;
2445 }
2446 #undef IWX_HW_READY_TIMEOUT
2447
2448 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2449 iwx_prepare_card_hw(struct iwx_softc *sc)
2450 {
2451 int t = 0;
2452 int ntries;
2453
2454 if (iwx_set_hw_ready(sc))
2455 return 0;
2456
2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2459 DELAY(1000);
2460
2461 for (ntries = 0; ntries < 10; ntries++) {
2462 /* If HW is not ready, prepare the conditions to check again */
2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2465
2466 do {
2467 if (iwx_set_hw_ready(sc))
2468 return 0;
2469 DELAY(200);
2470 t += 200;
2471 } while (t < 150000);
2472 DELAY(25000);
2473 }
2474
2475 return ETIMEDOUT;
2476 }
2477
2478 static int
iwx_force_power_gating(struct iwx_softc * sc)2479 iwx_force_power_gating(struct iwx_softc *sc)
2480 {
2481 int err;
2482
2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2485 if (err)
2486 return err;
2487 DELAY(20);
2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2491 if (err)
2492 return err;
2493 DELAY(20);
2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2496 return err;
2497 }
2498
2499 static void
iwx_apm_config(struct iwx_softc * sc)2500 iwx_apm_config(struct iwx_softc *sc)
2501 {
2502 uint16_t lctl, cap;
2503 int pcie_ptr;
2504 int error;
2505
2506 /*
2507 * L0S states have been found to be unstable with our devices
2508 * and in newer hardware they are not officially supported at
2509 * all, so we must always set the L0S_DISABLED bit.
2510 */
2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2512
2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2514 if (error != 0) {
2515 printf("can't fill pcie_ptr\n");
2516 return;
2517 }
2518
2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2520 sizeof(lctl));
2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2523 #define PCI_PCIE_DCSR2 0x28
2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2525 sizeof(lctl));
2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2530 DEVNAME(sc),
2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2532 sc->sc_ltr_enabled ? "En" : "Dis"));
2533 #undef PCI_PCIE_LCSR_ASPM_L0S
2534 #undef PCI_PCIE_DCSR2
2535 #undef PCI_PCIE_DCSR2_LTREN
2536 #undef PCI_PCIE_LCSR_ASPM_L1
2537 }
2538
2539 /*
2540 * Start up NIC's basic functionality after it has been reset
2541 * e.g. after platform boot or shutdown.
2542 * NOTE: This does not load uCode nor start the embedded processor
2543 */
2544 static int
iwx_apm_init(struct iwx_softc * sc)2545 iwx_apm_init(struct iwx_softc *sc)
2546 {
2547 int err = 0;
2548
2549 /*
2550 * Disable L0s without affecting L1;
2551 * don't wait for ICH L0s (ICH bug W/A)
2552 */
2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2555
2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2558
2559 /*
2560 * Enable HAP INTA (interrupt from management bus) to
2561 * wake device's PCI Express link L1a -> L0s
2562 */
2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2565
2566 iwx_apm_config(sc);
2567
2568 /*
2569 * Set "initialization complete" bit to move adapter from
2570 * D0U* --> D0A* (powered-up active) state.
2571 */
2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2573
2574 /*
2575 * Wait for clock stabilization; once stabilized, access to
2576 * device-internal resources is supported, e.g. iwx_write_prph()
2577 * and accesses to uCode SRAM.
2578 */
2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2582 printf("%s: timeout waiting for clock stabilization\n",
2583 DEVNAME(sc));
2584 err = ETIMEDOUT;
2585 goto out;
2586 }
2587 out:
2588 if (err)
2589 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2590 return err;
2591 }
2592
2593 static void
iwx_apm_stop(struct iwx_softc * sc)2594 iwx_apm_stop(struct iwx_softc *sc)
2595 {
2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2601 DELAY(1000);
2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2604 DELAY(5000);
2605
2606 /* stop device's busmaster DMA activity */
2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2608
2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2613
2614 /*
2615 * Clear "initialization complete" bit to move adapter from
2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2617 */
2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2620 }
2621
2622 static void
iwx_init_msix_hw(struct iwx_softc * sc)2623 iwx_init_msix_hw(struct iwx_softc *sc)
2624 {
2625 iwx_conf_msix_hw(sc, 0);
2626
2627 if (!sc->sc_msix)
2628 return;
2629
2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2631 sc->sc_fh_mask = sc->sc_fh_init_mask;
2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2633 sc->sc_hw_mask = sc->sc_hw_init_mask;
2634 }
2635
2636 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2638 {
2639 int vector = 0;
2640
2641 if (!sc->sc_msix) {
2642 /* Newer chips default to MSIX. */
2643 if (!stopped && iwx_nic_lock(sc)) {
2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2645 IWX_UREG_CHICK_MSI_ENABLE);
2646 iwx_nic_unlock(sc);
2647 }
2648 return;
2649 }
2650
2651 if (!stopped && iwx_nic_lock(sc)) {
2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2653 IWX_UREG_CHICK_MSIX_ENABLE);
2654 iwx_nic_unlock(sc);
2655 }
2656
2657 /* Disable all interrupts */
2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2660
2661 /* Map fallback-queue (command/mgmt) to a single vector */
2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2664 /* Map RSS queue (data) to the same vector */
2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2667
2668 /* Enable the RX queues cause interrupts */
2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2671
2672 /* Map non-RX causes to the same vector */
2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2703
2704 /* Enable non-RX causes interrupts */
2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_S2D |
2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2722 }
2723
2724 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2725 iwx_clear_persistence_bit(struct iwx_softc *sc)
2726 {
2727 uint32_t hpm, wprot;
2728
2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2732 if (wprot & IWX_PREG_WFPM_ACCESS) {
2733 printf("%s: cannot clear persistence bit\n",
2734 DEVNAME(sc));
2735 return EPERM;
2736 }
2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2738 hpm & ~IWX_PERSISTENCE_BIT);
2739 }
2740
2741 return 0;
2742 }
2743
2744 static int
iwx_start_hw(struct iwx_softc * sc)2745 iwx_start_hw(struct iwx_softc *sc)
2746 {
2747 int err;
2748
2749 err = iwx_prepare_card_hw(sc);
2750 if (err)
2751 return err;
2752
2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2754 err = iwx_clear_persistence_bit(sc);
2755 if (err)
2756 return err;
2757 }
2758
2759 /* Reset the entire device */
2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2761 DELAY(5000);
2762
2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2764 sc->sc_integrated) {
2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2767 DELAY(20);
2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2771 printf("%s: timeout waiting for clock stabilization\n",
2772 DEVNAME(sc));
2773 return ETIMEDOUT;
2774 }
2775
2776 err = iwx_force_power_gating(sc);
2777 if (err)
2778 return err;
2779
2780 /* Reset the entire device */
2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2782 DELAY(5000);
2783 }
2784
2785 err = iwx_apm_init(sc);
2786 if (err)
2787 return err;
2788
2789 iwx_init_msix_hw(sc);
2790
2791 iwx_enable_rfkill_int(sc);
2792 iwx_check_rfkill(sc);
2793
2794 return 0;
2795 }
2796
2797 static void
iwx_stop_device(struct iwx_softc * sc)2798 iwx_stop_device(struct iwx_softc *sc)
2799 {
2800 int i;
2801
2802 iwx_disable_interrupts(sc);
2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2804
2805 iwx_disable_rx_dma(sc);
2806 iwx_reset_rx_ring(sc, &sc->rxq);
2807 for (i = 0; i < nitems(sc->txq); i++)
2808 iwx_reset_tx_ring(sc, &sc->txq[i]);
2809 #if 0
2810 /* XXX-THJ: Tidy up BA state on stop */
2811 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2813 if (ba->ba_state != IEEE80211_BA_AGREED)
2814 continue;
2815 ieee80211_delba_request(ic, ni, 0, 1, i);
2816 }
2817 #endif
2818 /* Make sure (redundant) we've released our request to stay awake */
2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2821 if (sc->sc_nic_locks > 0)
2822 printf("%s: %d active NIC locks forcefully cleared\n",
2823 DEVNAME(sc), sc->sc_nic_locks);
2824 sc->sc_nic_locks = 0;
2825
2826 /* Stop the device, and put it in low power state */
2827 iwx_apm_stop(sc);
2828
2829 /* Reset the on-board processor. */
2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2831 DELAY(5000);
2832
2833 /*
2834 * Upon stop, the IVAR table gets erased, so msi-x won't
2835 * work. This causes a bug in RF-KILL flows, since the interrupt
2836 * that enables radio won't fire on the correct irq, and the
2837 * driver won't be able to handle the interrupt.
2838 * Configure the IVAR table again after reset.
2839 */
2840 iwx_conf_msix_hw(sc, 1);
2841
2842 /*
2843 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2844 * Clear the interrupt again.
2845 */
2846 iwx_disable_interrupts(sc);
2847
2848 /* Even though we stop the HW we still want the RF kill interrupt. */
2849 iwx_enable_rfkill_int(sc);
2850 iwx_check_rfkill(sc);
2851
2852 iwx_prepare_card_hw(sc);
2853
2854 iwx_ctxt_info_free_paging(sc);
2855 iwx_dma_contig_free(&sc->pnvm_dma);
2856 }
2857
2858 static void
iwx_nic_config(struct iwx_softc * sc)2859 iwx_nic_config(struct iwx_softc *sc)
2860 {
2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2862 uint32_t mask, val, reg_val = 0;
2863
2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2867 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2869 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2870
2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2875
2876 /* radio configuration */
2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2880
2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2888
2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2890 val &= ~mask;
2891 val |= reg_val;
2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2893 }
2894
2895 static int
iwx_nic_rx_init(struct iwx_softc * sc)2896 iwx_nic_rx_init(struct iwx_softc *sc)
2897 {
2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2899
2900 /*
2901 * We don't configure the RFH; the firmware will do that.
2902 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2903 */
2904 return 0;
2905 }
2906
2907 static int
iwx_nic_init(struct iwx_softc * sc)2908 iwx_nic_init(struct iwx_softc *sc)
2909 {
2910 int err;
2911
2912 iwx_apm_init(sc);
2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2914 iwx_nic_config(sc);
2915
2916 err = iwx_nic_rx_init(sc);
2917 if (err)
2918 return err;
2919
2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2921
2922 return 0;
2923 }
2924
2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2926 const uint8_t iwx_ac_to_tx_fifo[] = {
2927 IWX_GEN2_EDCA_TX_FIFO_BE,
2928 IWX_GEN2_EDCA_TX_FIFO_BK,
2929 IWX_GEN2_EDCA_TX_FIFO_VI,
2930 IWX_GEN2_EDCA_TX_FIFO_VO,
2931 };
2932
2933 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2935 int num_slots)
2936 {
2937 struct iwx_rx_packet *pkt;
2938 struct iwx_tx_queue_cfg_rsp *resp;
2939 struct iwx_tx_queue_cfg_cmd cmd_v0;
2940 struct iwx_scd_queue_cfg_cmd cmd_v3;
2941 struct iwx_host_cmd hcmd = {
2942 .flags = IWX_CMD_WANT_RESP,
2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2944 };
2945 struct iwx_tx_ring *ring = &sc->txq[qid];
2946 int err, fwqid, cmd_ver;
2947 uint32_t wr_idx;
2948 size_t resp_len;
2949
2950 DPRINTF(("%s: tid=%i\n", __func__, tid));
2951 DPRINTF(("%s: qid=%i\n", __func__, qid));
2952 iwx_reset_tx_ring(sc, ring);
2953
2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2955 IWX_SCD_QUEUE_CONFIG_CMD);
2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2957 memset(&cmd_v0, 0, sizeof(cmd_v0));
2958 cmd_v0.sta_id = sta_id;
2959 cmd_v0.tid = tid;
2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2964 hcmd.id = IWX_SCD_QUEUE_CFG;
2965 hcmd.data[0] = &cmd_v0;
2966 hcmd.len[0] = sizeof(cmd_v0);
2967 } else if (cmd_ver == 3) {
2968 memset(&cmd_v3, 0, sizeof(cmd_v3));
2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2973 cmd_v3.u.add.flags = htole32(0);
2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2975 cmd_v3.u.add.tid = tid;
2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2977 IWX_SCD_QUEUE_CONFIG_CMD);
2978 hcmd.data[0] = &cmd_v3;
2979 hcmd.len[0] = sizeof(cmd_v3);
2980 } else {
2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2982 DEVNAME(sc), cmd_ver);
2983 return ENOTSUP;
2984 }
2985
2986 err = iwx_send_cmd(sc, &hcmd);
2987 if (err)
2988 return err;
2989
2990 pkt = hcmd.resp_pkt;
2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2992 err = EIO;
2993 goto out;
2994 }
2995
2996 resp_len = iwx_rx_packet_payload_len(pkt);
2997 if (resp_len != sizeof(*resp)) {
2998 err = EIO;
2999 goto out;
3000 }
3001
3002 resp = (void *)pkt->data;
3003 fwqid = le16toh(resp->queue_number);
3004 wr_idx = le16toh(resp->write_pointer);
3005
3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3007 if (fwqid != qid) {
3008 DPRINTF(("%s: === fwqid != qid\n", __func__));
3009 err = EIO;
3010 goto out;
3011 }
3012
3013 if (wr_idx != ring->cur_hw) {
3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3015 err = EIO;
3016 goto out;
3017 }
3018
3019 sc->qenablemsk |= (1 << qid);
3020 ring->tid = tid;
3021 out:
3022 iwx_free_resp(sc, &hcmd);
3023 return err;
3024 }
3025
3026 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3028 {
3029 struct iwx_rx_packet *pkt;
3030 struct iwx_tx_queue_cfg_rsp *resp;
3031 struct iwx_tx_queue_cfg_cmd cmd_v0;
3032 struct iwx_scd_queue_cfg_cmd cmd_v3;
3033 struct iwx_host_cmd hcmd = {
3034 .flags = IWX_CMD_WANT_RESP,
3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3036 };
3037 struct iwx_tx_ring *ring = &sc->txq[qid];
3038 int err, cmd_ver;
3039
3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3041 IWX_SCD_QUEUE_CONFIG_CMD);
3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3043 memset(&cmd_v0, 0, sizeof(cmd_v0));
3044 cmd_v0.sta_id = sta_id;
3045 cmd_v0.tid = tid;
3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3047 cmd_v0.cb_size = htole32(0);
3048 cmd_v0.byte_cnt_addr = htole64(0);
3049 cmd_v0.tfdq_addr = htole64(0);
3050 hcmd.id = IWX_SCD_QUEUE_CFG;
3051 hcmd.data[0] = &cmd_v0;
3052 hcmd.len[0] = sizeof(cmd_v0);
3053 } else if (cmd_ver == 3) {
3054 memset(&cmd_v3, 0, sizeof(cmd_v3));
3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3057 cmd_v3.u.remove.tid = tid;
3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3059 IWX_SCD_QUEUE_CONFIG_CMD);
3060 hcmd.data[0] = &cmd_v3;
3061 hcmd.len[0] = sizeof(cmd_v3);
3062 } else {
3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3064 DEVNAME(sc), cmd_ver);
3065 return ENOTSUP;
3066 }
3067
3068 err = iwx_send_cmd(sc, &hcmd);
3069 if (err)
3070 return err;
3071
3072 pkt = hcmd.resp_pkt;
3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3074 err = EIO;
3075 goto out;
3076 }
3077
3078 sc->qenablemsk &= ~(1 << qid);
3079 iwx_reset_tx_ring(sc, ring);
3080 out:
3081 iwx_free_resp(sc, &hcmd);
3082 return err;
3083 }
3084
3085 static void
iwx_post_alive(struct iwx_softc * sc)3086 iwx_post_alive(struct iwx_softc *sc)
3087 {
3088 int txcmd_ver;
3089
3090 iwx_ict_reset(sc);
3091
3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3094 sc->sc_rate_n_flags_version = 2;
3095 else
3096 sc->sc_rate_n_flags_version = 1;
3097
3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3099 }
3100
3101 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3103 uint32_t duration_tu)
3104 {
3105
3106 struct iwx_session_prot_cmd cmd = {
3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3108 in->in_color)),
3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3111 .duration_tu = htole32(duration_tu),
3112 };
3113 uint32_t cmd_id;
3114 int err;
3115
3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3118 if (!err)
3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3120 return err;
3121 }
3122
3123 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3125 {
3126 struct iwx_session_prot_cmd cmd = {
3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3128 in->in_color)),
3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3131 .duration_tu = 0,
3132 };
3133 uint32_t cmd_id;
3134
3135 /* Do nothing if the time event has already ended. */
3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3137 return;
3138
3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3142 }
3143
3144 /*
3145 * NVM read access and content parsing. We do not support
3146 * external NVM or writing NVM.
3147 */
3148
3149 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3151 {
3152 uint8_t tx_ant;
3153
3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3156
3157 if (sc->sc_nvm.valid_tx_ant)
3158 tx_ant &= sc->sc_nvm.valid_tx_ant;
3159
3160 return tx_ant;
3161 }
3162
3163 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3165 {
3166 uint8_t rx_ant;
3167
3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3170
3171 if (sc->sc_nvm.valid_rx_ant)
3172 rx_ant &= sc->sc_nvm.valid_rx_ant;
3173
3174 return rx_ant;
3175 }
3176
3177 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3179 struct ieee80211_channel chans[])
3180 {
3181 struct iwx_softc *sc = ic->ic_softc;
3182 struct iwx_nvm_data *data = &sc->sc_nvm;
3183 uint8_t bands[IEEE80211_MODE_BYTES];
3184 const uint8_t *nvm_channels;
3185 uint32_t ch_flags;
3186 int ch_idx, nchan;
3187
3188 if (sc->sc_uhb_supported) {
3189 nchan = nitems(iwx_nvm_channels_uhb);
3190 nvm_channels = iwx_nvm_channels_uhb;
3191 } else {
3192 nchan = nitems(iwx_nvm_channels_8000);
3193 nvm_channels = iwx_nvm_channels_8000;
3194 }
3195
3196 /* 2.4Ghz; 1-13: 11b/g channels. */
3197 if (!data->sku_cap_band_24GHz_enable)
3198 goto band_5;
3199
3200 memset(bands, 0, sizeof(bands));
3201 setbit(bands, IEEE80211_MODE_11B);
3202 setbit(bands, IEEE80211_MODE_11G);
3203 setbit(bands, IEEE80211_MODE_11NG);
3204 for (ch_idx = 0;
3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3206 ch_idx++) {
3207
3208 uint32_t nflags = 0;
3209 int cflags = 0;
3210
3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3212 ch_flags = le32_to_cpup(
3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3214 } else {
3215 ch_flags = le16_to_cpup(
3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3217 }
3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3219 continue;
3220
3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3222 cflags |= NET80211_CBW_FLAG_HT40;
3223
3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3225
3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3227 nvm_channels[ch_idx],
3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3230 nflags, bands, cflags);
3231 }
3232
3233 band_5:
3234 /* 5Ghz */
3235 if (!data->sku_cap_band_52GHz_enable)
3236 goto band_6;
3237
3238
3239 memset(bands, 0, sizeof(bands));
3240 setbit(bands, IEEE80211_MODE_11A);
3241 setbit(bands, IEEE80211_MODE_11NA);
3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3243
3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3246 ch_idx++) {
3247 uint32_t nflags = 0;
3248 int cflags = 0;
3249
3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3251 ch_flags = le32_to_cpup(
3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3253 else
3254 ch_flags = le16_to_cpup(
3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3256
3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3258 continue;
3259
3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3261 cflags |= NET80211_CBW_FLAG_HT40;
3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3263 cflags |= NET80211_CBW_FLAG_VHT80;
3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3265 cflags |= NET80211_CBW_FLAG_VHT160;
3266
3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3268
3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3270 nvm_channels[ch_idx],
3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3273 nflags, bands, cflags);
3274 }
3275 band_6:
3276 /* 6GHz one day ... */
3277 return;
3278 }
3279
3280 static int
iwx_mimo_enabled(struct iwx_softc * sc)3281 iwx_mimo_enabled(struct iwx_softc *sc)
3282 {
3283
3284 return !sc->sc_nvm.sku_cap_mimo_disable;
3285 }
3286
3287 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3289 uint16_t ssn, uint16_t buf_size)
3290 {
3291 reorder_buf->head_sn = ssn;
3292 reorder_buf->num_stored = 0;
3293 reorder_buf->buf_size = buf_size;
3294 reorder_buf->last_amsdu = 0;
3295 reorder_buf->last_sub_index = 0;
3296 reorder_buf->removed = 0;
3297 reorder_buf->valid = 0;
3298 reorder_buf->consec_oldsn_drops = 0;
3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3300 reorder_buf->consec_oldsn_prev_drop = 0;
3301 }
3302
3303 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3305 {
3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3307
3308 reorder_buf->removed = 1;
3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3310 }
3311
3312 #define IWX_MAX_RX_BA_SESSIONS 16
3313
3314 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3316 {
3317 int i;
3318
3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3320 if (sc->sc_rxba_data[i].baid ==
3321 IWX_RX_REORDER_DATA_INVALID_BAID)
3322 continue;
3323 if (sc->sc_rxba_data[i].tid == tid)
3324 return &sc->sc_rxba_data[i];
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3333 uint8_t *baid)
3334 {
3335 struct iwx_rx_baid_cfg_cmd cmd;
3336 uint32_t new_baid = 0;
3337 int err;
3338
3339 IWX_ASSERT_LOCKED(sc);
3340
3341 memset(&cmd, 0, sizeof(cmd));
3342
3343 if (start) {
3344 cmd.action = IWX_RX_BAID_ACTION_ADD;
3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3346 cmd.alloc.tid = tid;
3347 cmd.alloc.ssn = htole16(ssn);
3348 cmd.alloc.win_size = htole16(winsize);
3349 } else {
3350 struct iwx_rxba_data *rxba;
3351
3352 rxba = iwx_find_rxba_data(sc, tid);
3353 if (rxba == NULL)
3354 return ENOENT;
3355 *baid = rxba->baid;
3356
3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3360 cmd.remove_v1.baid = rxba->baid;
3361 } else {
3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3363 cmd.remove.tid = tid;
3364 }
3365 }
3366
3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3369 if (err)
3370 return err;
3371
3372 if (start) {
3373 if (new_baid >= nitems(sc->sc_rxba_data))
3374 return ERANGE;
3375 *baid = new_baid;
3376 }
3377
3378 return 0;
3379 }
3380
3381 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3384 {
3385 int err;
3386 struct iwx_rxba_data *rxba = NULL;
3387 uint8_t baid = 0;
3388
3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3390 return;
3391 }
3392
3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3395 timeout_val, start, &baid);
3396 } else {
3397 panic("sta_rx_agg unsupported hw");
3398 }
3399 if (err) {
3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3401 return;
3402 } else
3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3404
3405 rxba = &sc->sc_rxba_data[baid];
3406
3407 /* Deaggregation is done in hardware. */
3408 if (start) {
3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3410 return;
3411 }
3412 rxba->sta_id = IWX_STATION_ID;
3413 rxba->tid = tid;
3414 rxba->baid = baid;
3415 rxba->timeout = timeout_val;
3416 getmicrouptime(&rxba->last_rx);
3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3418 winsize);
3419 if (timeout_val != 0) {
3420 DPRINTF(("%s: timeout_val != 0\n", __func__));
3421 return;
3422 }
3423 } else
3424 iwx_clear_reorder_buffer(sc, rxba);
3425
3426 if (start) {
3427 sc->sc_rx_ba_sessions++;
3428 } else if (sc->sc_rx_ba_sessions > 0)
3429 sc->sc_rx_ba_sessions--;
3430 }
3431
3432 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3433 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3434 uint8_t tid)
3435 {
3436 int err, qid;
3437
3438 qid = sc->aggqid[tid];
3439 if (qid == 0) {
3440 /* Firmware should pick the next unused Tx queue. */
3441 qid = fls(sc->qenablemsk);
3442 }
3443
3444 DPRINTF(("%s: qid=%i\n", __func__, qid));
3445
3446 /*
3447 * Simply enable the queue.
3448 * Firmware handles Tx Ba session setup and teardown.
3449 */
3450 if ((sc->qenablemsk & (1 << qid)) == 0) {
3451 if (!iwx_nic_lock(sc)) {
3452 return;
3453 }
3454 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3455 IWX_TX_RING_COUNT);
3456 iwx_nic_unlock(sc);
3457 if (err) {
3458 printf("%s: could not enable Tx queue %d "
3459 "(error %d)\n", DEVNAME(sc), qid, err);
3460 return;
3461 }
3462 }
3463 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3464 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3465 sc->aggqid[tid] = qid;
3466 }
3467
3468 static void
iwx_ba_rx_task(void * arg,int npending __unused)3469 iwx_ba_rx_task(void *arg, int npending __unused)
3470 {
3471 struct iwx_softc *sc = arg;
3472 struct ieee80211com *ic = &sc->sc_ic;
3473 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3474 struct ieee80211_node *ni = vap->iv_bss;
3475 int tid;
3476
3477 IWX_LOCK(sc);
3478 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3479 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3480 break;
3481 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3482 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3483 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3484 ba->ba_flags));
3485 if (ba->ba_flags == IWX_BA_DONE) {
3486 DPRINTF(("%s: ampdu for tid %i already added\n",
3487 __func__, tid));
3488 break;
3489 }
3490
3491 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3492 tid));
3493 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3494 ba->ba_winsize, ba->ba_timeout_val, 1);
3495 sc->ba_rx.start_tidmask &= ~(1 << tid);
3496 ba->ba_flags = IWX_BA_DONE;
3497 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3498 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3499 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3500 }
3501 }
3502 IWX_UNLOCK(sc);
3503 }
3504
3505 static void
iwx_ba_tx_task(void * arg,int npending __unused)3506 iwx_ba_tx_task(void *arg, int npending __unused)
3507 {
3508 struct iwx_softc *sc = arg;
3509 struct ieee80211com *ic = &sc->sc_ic;
3510 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3511 struct ieee80211_node *ni = vap->iv_bss;
3512 int tid;
3513
3514 IWX_LOCK(sc);
3515 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3516 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3517 break;
3518 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3519 DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
3520 tid));
3521 iwx_sta_tx_agg_start(sc, ni, tid);
3522 sc->ba_tx.start_tidmask &= ~(1 << tid);
3523 sc->sc_flags |= IWX_FLAG_AMPDUTX;
3524 }
3525 }
3526
3527 IWX_UNLOCK(sc);
3528 }
3529
3530 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3531 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3532 {
3533 uint32_t mac_addr0, mac_addr1;
3534
3535 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3536
3537 if (!iwx_nic_lock(sc))
3538 return;
3539
3540 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3541 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3542
3543 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3544
3545 /* If OEM fused a valid address, use it instead of the one in OTP. */
3546 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3547 iwx_nic_unlock(sc);
3548 return;
3549 }
3550
3551 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3552 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3553
3554 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3555
3556 iwx_nic_unlock(sc);
3557 }
3558
3559 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3560 iwx_is_valid_mac_addr(const uint8_t *addr)
3561 {
3562 static const uint8_t reserved_mac[] = {
3563 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3564 };
3565
3566 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3567 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3568 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3569 !ETHER_IS_MULTICAST(addr));
3570 }
3571
3572 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3573 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3574 {
3575 const uint8_t *hw_addr;
3576
3577 hw_addr = (const uint8_t *)&mac_addr0;
3578 dest[0] = hw_addr[3];
3579 dest[1] = hw_addr[2];
3580 dest[2] = hw_addr[1];
3581 dest[3] = hw_addr[0];
3582
3583 hw_addr = (const uint8_t *)&mac_addr1;
3584 dest[4] = hw_addr[1];
3585 dest[5] = hw_addr[0];
3586 }
3587
3588 static int
iwx_nvm_get(struct iwx_softc * sc)3589 iwx_nvm_get(struct iwx_softc *sc)
3590 {
3591 struct iwx_nvm_get_info cmd = {};
3592 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3593 struct iwx_host_cmd hcmd = {
3594 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3595 .data = { &cmd, },
3596 .len = { sizeof(cmd) },
3597 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3598 IWX_NVM_GET_INFO)
3599 };
3600 int err = 0;
3601 uint32_t mac_flags;
3602 /*
3603 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3604 * in v3, except for the channel profile part of the
3605 * regulatory. So we can just access the new struct, with the
3606 * exception of the latter.
3607 */
3608 struct iwx_nvm_get_info_rsp *rsp;
3609 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3610 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3611 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3612
3613 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3614 err = iwx_send_cmd(sc, &hcmd);
3615 if (err) {
3616 printf("%s: failed to send cmd (error %d)", __func__, err);
3617 return err;
3618 }
3619
3620 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3621 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3622 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3623 printf("%s: resp_len=%zu\n", __func__, resp_len);
3624 err = EIO;
3625 goto out;
3626 }
3627
3628 memset(nvm, 0, sizeof(*nvm));
3629
3630 iwx_set_mac_addr_from_csr(sc, nvm);
3631 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3632 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3633 err = EINVAL;
3634 goto out;
3635 }
3636
3637 rsp = (void *)hcmd.resp_pkt->data;
3638
3639 /* Initialize general data */
3640 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3641 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3642
3643 /* Initialize MAC sku data */
3644 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3645 nvm->sku_cap_11ac_enable =
3646 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3647 nvm->sku_cap_11n_enable =
3648 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3649 nvm->sku_cap_11ax_enable =
3650 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3651 nvm->sku_cap_band_24GHz_enable =
3652 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3653 nvm->sku_cap_band_52GHz_enable =
3654 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3655 nvm->sku_cap_mimo_disable =
3656 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3657
3658 /* Initialize PHY sku data */
3659 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3660 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3661
3662 if (le32toh(rsp->regulatory.lar_enabled) &&
3663 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3664 nvm->lar_enabled = 1;
3665 }
3666
3667 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3668 if (v4) {
3669 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3670 } else {
3671 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3672 }
3673 out:
3674 iwx_free_resp(sc, &hcmd);
3675 return err;
3676 }
3677
3678 static int
iwx_load_firmware(struct iwx_softc * sc)3679 iwx_load_firmware(struct iwx_softc *sc)
3680 {
3681 struct iwx_fw_sects *fws;
3682 int err;
3683
3684 IWX_ASSERT_LOCKED(sc)
3685
3686 sc->sc_uc.uc_intr = 0;
3687 sc->sc_uc.uc_ok = 0;
3688
3689 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3690 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3691 err = iwx_ctxt_info_gen3_init(sc, fws);
3692 else
3693 err = iwx_ctxt_info_init(sc, fws);
3694 if (err) {
3695 printf("%s: could not init context info\n", DEVNAME(sc));
3696 return err;
3697 }
3698
3699 /* wait for the firmware to load */
3700 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3701 if (err || !sc->sc_uc.uc_ok) {
3702 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3703 iwx_ctxt_info_free_paging(sc);
3704 }
3705
3706 iwx_dma_contig_free(&sc->iml_dma);
3707 iwx_ctxt_info_free_fw_img(sc);
3708
3709 if (!sc->sc_uc.uc_ok)
3710 return EINVAL;
3711
3712 return err;
3713 }
3714
3715 static int
iwx_start_fw(struct iwx_softc * sc)3716 iwx_start_fw(struct iwx_softc *sc)
3717 {
3718 int err;
3719
3720 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3721
3722 iwx_disable_interrupts(sc);
3723
3724 /* make sure rfkill handshake bits are cleared */
3725 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3726 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3727 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3728
3729 /* clear (again), then enable firmware load interrupt */
3730 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3731
3732 err = iwx_nic_init(sc);
3733 if (err) {
3734 printf("%s: unable to init nic\n", DEVNAME(sc));
3735 return err;
3736 }
3737
3738 iwx_enable_fwload_interrupt(sc);
3739
3740 return iwx_load_firmware(sc);
3741 }
3742
3743 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3744 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3745 size_t len)
3746 {
3747 const struct iwx_ucode_tlv *tlv;
3748 uint32_t sha1 = 0;
3749 uint16_t mac_type = 0, rf_id = 0;
3750 uint8_t *pnvm_data = NULL, *tmp;
3751 int hw_match = 0;
3752 uint32_t size = 0;
3753 int err;
3754
3755 while (len >= sizeof(*tlv)) {
3756 uint32_t tlv_len, tlv_type;
3757
3758 len -= sizeof(*tlv);
3759 tlv = (const void *)data;
3760
3761 tlv_len = le32toh(tlv->length);
3762 tlv_type = le32toh(tlv->type);
3763
3764 if (len < tlv_len) {
3765 printf("%s: invalid TLV len: %zd/%u\n",
3766 DEVNAME(sc), len, tlv_len);
3767 err = EINVAL;
3768 goto out;
3769 }
3770
3771 data += sizeof(*tlv);
3772
3773 switch (tlv_type) {
3774 case IWX_UCODE_TLV_PNVM_VERSION:
3775 if (tlv_len < sizeof(uint32_t))
3776 break;
3777
3778 sha1 = le32_to_cpup((const uint32_t *)data);
3779 break;
3780 case IWX_UCODE_TLV_HW_TYPE:
3781 if (tlv_len < 2 * sizeof(uint16_t))
3782 break;
3783
3784 if (hw_match)
3785 break;
3786
3787 mac_type = le16_to_cpup((const uint16_t *)data);
3788 rf_id = le16_to_cpup((const uint16_t *)(data +
3789 sizeof(uint16_t)));
3790
3791 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3792 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3793 hw_match = 1;
3794 break;
3795 case IWX_UCODE_TLV_SEC_RT: {
3796 const struct iwx_pnvm_section *section;
3797 uint32_t data_len;
3798
3799 section = (const void *)data;
3800 data_len = tlv_len - sizeof(*section);
3801
3802 /* TODO: remove, this is a deprecated separator */
3803 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3804 break;
3805
3806 tmp = malloc(size + data_len, M_DEVBUF,
3807 M_WAITOK | M_ZERO);
3808 if (tmp == NULL) {
3809 err = ENOMEM;
3810 goto out;
3811 }
3812 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3813 memcpy(tmp, pnvm_data, size);
3814 memcpy(tmp + size, section->data, data_len);
3815 free(pnvm_data, M_DEVBUF);
3816 pnvm_data = tmp;
3817 size += data_len;
3818 break;
3819 }
3820 case IWX_UCODE_TLV_PNVM_SKU:
3821 /* New PNVM section started, stop parsing. */
3822 goto done;
3823 default:
3824 break;
3825 }
3826
3827 if (roundup(tlv_len, 4) > len)
3828 break;
3829 len -= roundup(tlv_len, 4);
3830 data += roundup(tlv_len, 4);
3831 }
3832 done:
3833 if (!hw_match || size == 0) {
3834 err = ENOENT;
3835 goto out;
3836 }
3837
3838 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3839 if (err) {
3840 printf("%s: could not allocate DMA memory for PNVM\n",
3841 DEVNAME(sc));
3842 err = ENOMEM;
3843 goto out;
3844 }
3845 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3846 iwx_ctxt_info_gen3_set_pnvm(sc);
3847 sc->sc_pnvm_ver = sha1;
3848 out:
3849 free(pnvm_data, M_DEVBUF);
3850 return err;
3851 }
3852
3853 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3854 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3855 {
3856 const struct iwx_ucode_tlv *tlv;
3857
3858 while (len >= sizeof(*tlv)) {
3859 uint32_t tlv_len, tlv_type;
3860
3861 len -= sizeof(*tlv);
3862 tlv = (const void *)data;
3863
3864 tlv_len = le32toh(tlv->length);
3865 tlv_type = le32toh(tlv->type);
3866
3867 if (len < tlv_len || roundup(tlv_len, 4) > len)
3868 return EINVAL;
3869
3870 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3871 const struct iwx_sku_id *sku_id =
3872 (const void *)(data + sizeof(*tlv));
3873
3874 data += sizeof(*tlv) + roundup(tlv_len, 4);
3875 len -= roundup(tlv_len, 4);
3876
3877 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3878 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3879 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3880 iwx_pnvm_handle_section(sc, data, len) == 0)
3881 return 0;
3882 } else {
3883 data += sizeof(*tlv) + roundup(tlv_len, 4);
3884 len -= roundup(tlv_len, 4);
3885 }
3886 }
3887
3888 return ENOENT;
3889 }
3890
3891 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3892 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3893 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3894 {
3895 struct iwx_prph_scratch *prph_scratch;
3896 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3897
3898 prph_scratch = sc->prph_scratch_dma.vaddr;
3899 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3900
3901 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3902 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3903
3904 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3905 }
3906
3907 /*
3908 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3909 * This data apparently contains regulatory information and affects device
3910 * channel configuration.
3911 * The SKU of AX210 devices tells us which PNVM file section is needed.
3912 * Pre-AX210 devices store NVM data onboard.
3913 */
3914 static int
iwx_load_pnvm(struct iwx_softc * sc)3915 iwx_load_pnvm(struct iwx_softc *sc)
3916 {
3917 const int wait_flags = IWX_PNVM_COMPLETE;
3918 int err = 0;
3919 const struct firmware *pnvm;
3920
3921 if (sc->sc_sku_id[0] == 0 &&
3922 sc->sc_sku_id[1] == 0 &&
3923 sc->sc_sku_id[2] == 0)
3924 return 0;
3925
3926 if (sc->sc_pnvm_name) {
3927 if (sc->pnvm_dma.vaddr == NULL) {
3928 IWX_UNLOCK(sc);
3929 pnvm = firmware_get(sc->sc_pnvm_name);
3930 if (pnvm == NULL) {
3931 printf("%s: could not read %s (error %d)\n",
3932 DEVNAME(sc), sc->sc_pnvm_name, err);
3933 IWX_LOCK(sc);
3934 return EINVAL;
3935 }
3936 sc->sc_pnvm = pnvm;
3937
3938 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3939 IWX_LOCK(sc);
3940 if (err && err != ENOENT) {
3941 return EINVAL;
3942 }
3943 } else
3944 iwx_ctxt_info_gen3_set_pnvm(sc);
3945 }
3946
3947 if (!iwx_nic_lock(sc)) {
3948 return EBUSY;
3949 }
3950
3951 /*
3952 * If we don't have a platform NVM file simply ask firmware
3953 * to proceed without it.
3954 */
3955
3956 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3957 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3958
3959 /* Wait for the pnvm complete notification from firmware. */
3960 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3961 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3962 if (err)
3963 break;
3964 }
3965
3966 iwx_nic_unlock(sc);
3967
3968 return err;
3969 }
3970
3971 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)3972 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3973 {
3974 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3975 .valid = htole32(valid_tx_ant),
3976 };
3977
3978 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3979 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3980 }
3981
3982 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)3983 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3984 {
3985 struct iwx_phy_cfg_cmd phy_cfg_cmd;
3986
3987 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3988 phy_cfg_cmd.calib_control.event_trigger =
3989 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3990 phy_cfg_cmd.calib_control.flow_trigger =
3991 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3992
3993 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3994 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3995 }
3996
3997 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)3998 iwx_send_dqa_cmd(struct iwx_softc *sc)
3999 {
4000 struct iwx_dqa_enable_cmd dqa_cmd = {
4001 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4002 };
4003 uint32_t cmd_id;
4004
4005 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4006 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4007 }
4008
4009 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4010 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4011 {
4012 int err;
4013
4014 IWX_UNLOCK(sc);
4015 err = iwx_read_firmware(sc);
4016 IWX_LOCK(sc);
4017 if (err)
4018 return err;
4019
4020 err = iwx_start_fw(sc);
4021 if (err)
4022 return err;
4023
4024 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4025 err = iwx_load_pnvm(sc);
4026 if (err)
4027 return err;
4028 }
4029
4030 iwx_post_alive(sc);
4031
4032 return 0;
4033 }
4034
4035 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4036 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4037 {
4038 const int wait_flags = IWX_INIT_COMPLETE;
4039 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4040 struct iwx_init_extended_cfg_cmd init_cfg = {
4041 .init_flags = htole32(IWX_INIT_NVM),
4042 };
4043
4044 int err;
4045
4046 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4047 printf("%s: radio is disabled by hardware switch\n",
4048 DEVNAME(sc));
4049 return EPERM;
4050 }
4051
4052 sc->sc_init_complete = 0;
4053 err = iwx_load_ucode_wait_alive(sc);
4054 if (err) {
4055 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4056 "%s: failed to load init firmware\n", DEVNAME(sc));
4057 return err;
4058 } else {
4059 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4060 "%s: successfully loaded init firmware\n", __func__);
4061 }
4062
4063 /*
4064 * Send init config command to mark that we are sending NVM
4065 * access commands
4066 */
4067 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4068 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4069 if (err) {
4070 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4071 err);
4072 return err;
4073 }
4074
4075 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4076 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4077 if (err) {
4078 return err;
4079 }
4080
4081 /* Wait for the init complete notification from the firmware. */
4082 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4083 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4084 if (err) {
4085 DPRINTF(("%s: will return err=%d\n", __func__, err));
4086 return err;
4087 } else {
4088 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4089 __func__));
4090 }
4091 }
4092
4093 if (readnvm) {
4094 err = iwx_nvm_get(sc);
4095 DPRINTF(("%s: err=%d\n", __func__, err));
4096 if (err) {
4097 printf("%s: failed to read nvm (error %d)\n",
4098 DEVNAME(sc), err);
4099 return err;
4100 } else {
4101 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4102 }
4103 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4104 }
4105 return 0;
4106 }
4107
4108 static int
iwx_config_ltr(struct iwx_softc * sc)4109 iwx_config_ltr(struct iwx_softc *sc)
4110 {
4111 struct iwx_ltr_config_cmd cmd = {
4112 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4113 };
4114
4115 if (!sc->sc_ltr_enabled)
4116 return 0;
4117
4118 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4119 }
4120
4121 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4122 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4123 bus_dma_segment_t *seg)
4124 {
4125 struct iwx_rx_data *data = &ring->data[idx];
4126
4127 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4128 struct iwx_rx_transfer_desc *desc = ring->desc;
4129 desc[idx].rbid = htole16(idx & 0xffff);
4130 desc[idx].addr = htole64((*seg).ds_addr);
4131 bus_dmamap_sync(ring->data_dmat, data->map,
4132 BUS_DMASYNC_PREWRITE);
4133 } else {
4134 ((uint64_t *)ring->desc)[idx] =
4135 htole64((*seg).ds_addr);
4136 bus_dmamap_sync(ring->data_dmat, data->map,
4137 BUS_DMASYNC_PREWRITE);
4138 }
4139 }
4140
4141 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4142 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4143 {
4144 struct iwx_rx_ring *ring = &sc->rxq;
4145 struct iwx_rx_data *data = &ring->data[idx];
4146 struct mbuf *m;
4147 int err;
4148 int fatal = 0;
4149 bus_dma_segment_t seg;
4150 int nsegs;
4151
4152 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4153 if (m == NULL)
4154 return ENOBUFS;
4155
4156 if (data->m != NULL) {
4157 bus_dmamap_unload(ring->data_dmat, data->map);
4158 fatal = 1;
4159 }
4160
4161 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4162 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4163 &nsegs, BUS_DMA_NOWAIT);
4164 if (err) {
4165 /* XXX */
4166 if (fatal)
4167 panic("could not load RX mbuf");
4168 m_freem(m);
4169 return err;
4170 }
4171 data->m = m;
4172 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4173
4174 /* Update RX descriptor. */
4175 iwx_update_rx_desc(sc, ring, idx, &seg);
4176 return 0;
4177 }
4178
4179 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4180 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4181 struct iwx_rx_mpdu_desc *desc)
4182 {
4183 int energy_a, energy_b;
4184
4185 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4186 energy_a = desc->v3.energy_a;
4187 energy_b = desc->v3.energy_b;
4188 } else {
4189 energy_a = desc->v1.energy_a;
4190 energy_b = desc->v1.energy_b;
4191 }
4192 energy_a = energy_a ? -energy_a : -256;
4193 energy_b = energy_b ? -energy_b : -256;
4194 return MAX(energy_a, energy_b);
4195 }
4196
4197 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4198 iwx_rxmq_get_chains(struct iwx_softc *sc,
4199 struct iwx_rx_mpdu_desc *desc)
4200 {
4201
4202 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4203 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4204 IWX_RATE_MCS_ANT_POS);
4205 else
4206 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4207 IWX_RATE_MCS_ANT_POS);
4208 }
4209
4210 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4211 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4212 struct iwx_rx_data *data)
4213 {
4214 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4215 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4216 int qid = cmd_hdr->qid;
4217 struct iwx_tx_ring *ring = &sc->txq[qid];
4218
4219 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4220 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4221 }
4222
4223 /*
4224 * Retrieve the average noise (in dBm) among receivers.
4225 */
4226 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4227 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4228 {
4229 int i, total, nbant, noise;
4230
4231 total = nbant = noise = 0;
4232 for (i = 0; i < 3; i++) {
4233 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4234 if (noise) {
4235 total += noise;
4236 nbant++;
4237 }
4238 }
4239
4240 /* There should be at least one antenna but check anyway. */
4241 return (nbant == 0) ? -127 : (total / nbant) - 107;
4242 }
4243
4244 #if 0
4245 int
4246 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4247 struct ieee80211_rxinfo *rxi)
4248 {
4249 struct ieee80211com *ic = &sc->sc_ic;
4250 struct ieee80211_key *k;
4251 struct ieee80211_frame *wh;
4252 uint64_t pn, *prsc;
4253 uint8_t *ivp;
4254 uint8_t tid;
4255 int hdrlen, hasqos;
4256
4257 wh = mtod(m, struct ieee80211_frame *);
4258 hdrlen = ieee80211_get_hdrlen(wh);
4259 ivp = (uint8_t *)wh + hdrlen;
4260
4261 /* find key for decryption */
4262 k = ieee80211_get_rxkey(ic, m, ni);
4263 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4264 return 1;
4265
4266 /* Check that ExtIV bit is be set. */
4267 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4268 return 1;
4269
4270 hasqos = ieee80211_has_qos(wh);
4271 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4272 prsc = &k->k_rsc[tid];
4273
4274 /* Extract the 48-bit PN from the CCMP header. */
4275 pn = (uint64_t)ivp[0] |
4276 (uint64_t)ivp[1] << 8 |
4277 (uint64_t)ivp[4] << 16 |
4278 (uint64_t)ivp[5] << 24 |
4279 (uint64_t)ivp[6] << 32 |
4280 (uint64_t)ivp[7] << 40;
4281 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4282 if (pn < *prsc) {
4283 ic->ic_stats.is_ccmp_replays++;
4284 return 1;
4285 }
4286 } else if (pn <= *prsc) {
4287 ic->ic_stats.is_ccmp_replays++;
4288 return 1;
4289 }
4290 /* Last seen packet number is updated in ieee80211_inputm(). */
4291
4292 /*
4293 * Some firmware versions strip the MIC, and some don't. It is not
4294 * clear which of the capability flags could tell us what to expect.
4295 * For now, keep things simple and just leave the MIC in place if
4296 * it is present.
4297 *
4298 * The IV will be stripped by ieee80211_inputm().
4299 */
4300 return 0;
4301 }
4302 #endif
4303
4304 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4305 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4306 {
4307 struct ieee80211_frame *wh;
4308 int ret = 0;
4309 uint8_t type, subtype;
4310
4311 wh = mtod(m, struct ieee80211_frame *);
4312
4313 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4314 if (type == IEEE80211_FC0_TYPE_CTL) {
4315 return 0;
4316 }
4317
4318 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4319 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4320 return 0;
4321 }
4322
4323
4324 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4325 IEEE80211_FC0_TYPE_CTL)
4326 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4327 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4328 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4329 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4330 ret = 1;
4331 goto out;
4332 }
4333 /* Check whether decryption was successful or not. */
4334 if ((rx_pkt_status &
4335 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4336 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4337 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4338 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4339 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4340 ret = 1;
4341 goto out;
4342 }
4343 }
4344 out:
4345 return ret;
4346 }
4347
4348 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4349 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4350 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4351 uint32_t device_timestamp, uint8_t rssi)
4352 {
4353 struct ieee80211com *ic = &sc->sc_ic;
4354 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4355 struct ieee80211_frame *wh;
4356 struct ieee80211_node *ni;
4357
4358 /*
4359 * We need to turn the hardware provided channel index into a channel
4360 * and then find it in our ic_channels array
4361 */
4362 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4363 /*
4364 * OpenBSD points this at the ibss chan, which it defaults to
4365 * channel 1 and then never touches again. Skip a step.
4366 */
4367 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4368 chanidx = 1;
4369 }
4370
4371 int channel = chanidx;
4372 for (int i = 0; i < ic->ic_nchans; i++) {
4373 if (ic->ic_channels[i].ic_ieee == channel) {
4374 chanidx = i;
4375 }
4376 }
4377 ic->ic_curchan = &ic->ic_channels[chanidx];
4378
4379 wh = mtod(m, struct ieee80211_frame *);
4380 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4381
4382 #if 0 /* XXX hw decrypt */
4383 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4384 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4385 m_freem(m);
4386 ieee80211_release_node(ic, ni);
4387 return;
4388 }
4389 #endif
4390 if (ieee80211_radiotap_active_vap(vap)) {
4391 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4392 uint16_t chan_flags;
4393 int have_legacy_rate = 1;
4394 uint8_t mcs, rate;
4395
4396 tap->wr_flags = 0;
4397 if (is_shortpre)
4398 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4399 tap->wr_chan_freq =
4400 htole16(ic->ic_channels[chanidx].ic_freq);
4401 chan_flags = ic->ic_channels[chanidx].ic_flags;
4402 #if 0
4403 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4404 ic->ic_curmode != IEEE80211_MODE_11AC) {
4405 chan_flags &= ~IEEE80211_CHAN_HT;
4406 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4407 }
4408 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4409 chan_flags &= ~IEEE80211_CHAN_VHT;
4410 #else
4411 chan_flags &= ~IEEE80211_CHAN_HT;
4412 #endif
4413 tap->wr_chan_flags = htole16(chan_flags);
4414 tap->wr_dbm_antsignal = rssi;
4415 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4416 tap->wr_tsft = device_timestamp;
4417
4418 if (sc->sc_rate_n_flags_version >= 2) {
4419 uint32_t mod_type = (rate_n_flags &
4420 IWX_RATE_MCS_MOD_TYPE_MSK);
4421 const struct ieee80211_rateset *rs = NULL;
4422 uint32_t ridx;
4423 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4424 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4425 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4426 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4427 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4428 rs = &ieee80211_std_rateset_11b;
4429 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4430 rs = &ieee80211_std_rateset_11a;
4431 if (rs && ridx < rs->rs_nrates) {
4432 rate = (rs->rs_rates[ridx] &
4433 IEEE80211_RATE_VAL);
4434 } else
4435 rate = 0;
4436 } else {
4437 have_legacy_rate = ((rate_n_flags &
4438 (IWX_RATE_MCS_HT_MSK_V1 |
4439 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4440 mcs = (rate_n_flags &
4441 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4442 IWX_RATE_HT_MCS_NSS_MSK_V1));
4443 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4444 }
4445 if (!have_legacy_rate) {
4446 tap->wr_rate = (0x80 | mcs);
4447 } else {
4448 switch (rate) {
4449 /* CCK rates. */
4450 case 10: tap->wr_rate = 2; break;
4451 case 20: tap->wr_rate = 4; break;
4452 case 55: tap->wr_rate = 11; break;
4453 case 110: tap->wr_rate = 22; break;
4454 /* OFDM rates. */
4455 case 0xd: tap->wr_rate = 12; break;
4456 case 0xf: tap->wr_rate = 18; break;
4457 case 0x5: tap->wr_rate = 24; break;
4458 case 0x7: tap->wr_rate = 36; break;
4459 case 0x9: tap->wr_rate = 48; break;
4460 case 0xb: tap->wr_rate = 72; break;
4461 case 0x1: tap->wr_rate = 96; break;
4462 case 0x3: tap->wr_rate = 108; break;
4463 /* Unknown rate: should not happen. */
4464 default: tap->wr_rate = 0;
4465 }
4466 // XXX hack - this needs rebased with the new rate stuff anyway
4467 tap->wr_rate = rate;
4468 }
4469 }
4470
4471 IWX_UNLOCK(sc);
4472 if (ni == NULL) {
4473 if (ieee80211_input_mimo_all(ic, m) == -1)
4474 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4475 } else {
4476
4477 if (ieee80211_input_mimo(ni, m) == -1)
4478 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4479 ieee80211_free_node(ni);
4480 }
4481 IWX_LOCK(sc);
4482 }
4483
4484 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4485 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4486 size_t maxlen)
4487 {
4488 struct ieee80211com *ic = &sc->sc_ic;
4489 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4490 struct ieee80211_node *ni = vap->iv_bss;
4491 struct ieee80211_key *k;
4492 struct ieee80211_rx_stats rxs;
4493 struct iwx_rx_mpdu_desc *desc;
4494 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4495 int rssi;
4496 uint8_t chanidx;
4497 uint16_t phy_info;
4498 size_t desc_size;
4499 int pad = 0;
4500
4501 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4502 desc_size = sizeof(*desc);
4503 else
4504 desc_size = IWX_RX_DESC_SIZE_V1;
4505
4506 if (maxlen < desc_size) {
4507 m_freem(m);
4508 return; /* drop */
4509 }
4510
4511 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4512
4513 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4514 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4515 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4516 m_freem(m);
4517 return; /* drop */
4518 }
4519
4520 len = le16toh(desc->mpdu_len);
4521 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4522 /* Allow control frames in monitor mode. */
4523 if (len < sizeof(struct ieee80211_frame_cts)) {
4524 m_freem(m);
4525 return;
4526 }
4527
4528 } else if (len < sizeof(struct ieee80211_frame)) {
4529 m_freem(m);
4530 return;
4531 }
4532 if (len > maxlen - desc_size) {
4533 m_freem(m);
4534 return;
4535 }
4536
4537 // TODO: arithmetic on a pointer to void is a GNU extension
4538 m->m_data = (char *)pktdata + desc_size;
4539 m->m_pkthdr.len = m->m_len = len;
4540
4541 /* Account for padding following the frame header. */
4542 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4543 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4544 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4545 if (type == IEEE80211_FC0_TYPE_CTL) {
4546 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4547 case IEEE80211_FC0_SUBTYPE_CTS:
4548 hdrlen = sizeof(struct ieee80211_frame_cts);
4549 break;
4550 case IEEE80211_FC0_SUBTYPE_ACK:
4551 hdrlen = sizeof(struct ieee80211_frame_ack);
4552 break;
4553 default:
4554 hdrlen = sizeof(struct ieee80211_frame_min);
4555 break;
4556 }
4557 } else
4558 hdrlen = ieee80211_hdrsize(wh);
4559
4560 if ((le16toh(desc->status) &
4561 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4562 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4563 // CCMP header length
4564 hdrlen += 8;
4565 }
4566
4567 memmove(m->m_data + 2, m->m_data, hdrlen);
4568 m_adj(m, 2);
4569
4570 }
4571
4572 if ((le16toh(desc->status) &
4573 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4574 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4575 pad = 1;
4576 }
4577
4578 // /*
4579 // * Hardware de-aggregates A-MSDUs and copies the same MAC header
4580 // * in place for each subframe. But it leaves the 'A-MSDU present'
4581 // * bit set in the frame header. We need to clear this bit ourselves.
4582 // * (XXX This workaround is not required on AX200/AX201 devices that
4583 // * have been tested by me, but it's unclear when this problem was
4584 // * fixed in the hardware. It definitely affects the 9k generation.
4585 // * Leaving this in place for now since some 9k/AX200 hybrids seem
4586 // * to exist that we may eventually add support for.)
4587 // *
4588 // * And we must allow the same CCMP PN for subframes following the
4589 // * first subframe. Otherwise they would be discarded as replays.
4590 // */
4591 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4592 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4593 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4594 // uint8_t subframe_idx = (desc->amsdu_info &
4595 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4596 // if (subframe_idx > 0)
4597 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4598 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4599 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4600 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4601 // struct ieee80211_qosframe_addr4 *);
4602 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4603 // } else if (ieee80211_has_qos(wh) &&
4604 // m->m_len >= sizeof(struct ieee80211_qosframe)) {
4605 // struct ieee80211_qosframe *qwh = mtod(m,
4606 // struct ieee80211_qosframe *);
4607 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4608 // }
4609 }
4610
4611 /*
4612 * Verify decryption before duplicate detection. The latter uses
4613 * the TID supplied in QoS frame headers and this TID is implicitly
4614 * verified as part of the CCMP nonce.
4615 */
4616 k = ieee80211_crypto_get_txkey(ni, m);
4617 if (k != NULL &&
4618 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4619 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4620 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4621 m_freem(m);
4622 return;
4623 }
4624
4625 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4626 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4627 chanidx = desc->v3.channel;
4628 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4629 } else {
4630 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4631 chanidx = desc->v1.channel;
4632 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4633 }
4634
4635 phy_info = le16toh(desc->phy_info);
4636
4637 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4638 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4639 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4640
4641 memset(&rxs, 0, sizeof(rxs));
4642 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4643 rxs.r_flags |= IEEE80211_R_BAND;
4644 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4645 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4646
4647 rxs.c_ieee = chanidx;
4648 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4649 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4650 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4651 rxs.c_rx_tsf = device_timestamp;
4652 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4653 if (rxs.c_chain != 0)
4654 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4655
4656 /* rssi is in 1/2db units */
4657 rxs.c_rssi = rssi * 2;
4658 rxs.c_nf = sc->sc_noise;
4659
4660 if (pad) {
4661 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4662 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4663 }
4664
4665 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4666 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4667 return;
4668 }
4669
4670 ieee80211_add_rx_params(m, &rxs);
4671
4672 #if 0
4673 if (iwx_rx_reorder(sc, m, chanidx, desc,
4674 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4675 rate_n_flags, device_timestamp, &rxi, ml))
4676 return;
4677 #endif
4678
4679 if (pad) {
4680 #define TRIM 8
4681 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4682 hdrlen = ieee80211_hdrsize(wh);
4683 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4684 m_adj(m, TRIM);
4685 #undef TRIM
4686 }
4687
4688 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4689 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4690 rate_n_flags, device_timestamp, rssi);
4691 }
4692
4693 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4694 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4695 {
4696 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4697 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4698 int i;
4699
4700 /* First TB is never cleared - it is bidirectional DMA data. */
4701 for (i = 1; i < num_tbs; i++) {
4702 struct iwx_tfh_tb *tb = &desc->tbs[i];
4703 memset(tb, 0, sizeof(*tb));
4704 }
4705 desc->num_tbs = htole16(1);
4706
4707 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4708 BUS_DMASYNC_PREWRITE);
4709 }
4710
4711 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4712 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4713 struct iwx_tx_data *txd)
4714 {
4715 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4716 bus_dmamap_unload(ring->data_dmat, txd->map);
4717
4718 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4719 txd->m = NULL;
4720 txd->in = NULL;
4721 }
4722
4723 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4724 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4725 {
4726 struct iwx_tx_data *txd;
4727
4728 while (ring->tail_hw != idx) {
4729 txd = &ring->data[ring->tail];
4730 if (txd->m != NULL) {
4731 iwx_clear_tx_desc(sc, ring, ring->tail);
4732 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4733 iwx_txd_done(sc, ring, txd);
4734 ring->queued--;
4735 if (ring->queued < 0)
4736 panic("caught negative queue count");
4737 }
4738 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4739 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4740 }
4741 }
4742
4743 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4744 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4745 struct iwx_rx_data *data)
4746 {
4747 struct ieee80211com *ic = &sc->sc_ic;
4748 struct ifnet *ifp = IC2IFP(ic);
4749 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4750 int qid = cmd_hdr->qid, status, txfail;
4751 struct iwx_tx_ring *ring = &sc->txq[qid];
4752 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4753 uint32_t ssn;
4754 uint32_t len = iwx_rx_packet_len(pkt);
4755 int idx = cmd_hdr->idx;
4756 struct iwx_tx_data *txd = &ring->data[idx];
4757 struct mbuf *m = txd->m;
4758
4759 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4760
4761 /* Sanity checks. */
4762 if (sizeof(*tx_resp) > len)
4763 return;
4764 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4765 return;
4766 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4767 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4768 return;
4769
4770 sc->sc_tx_timer[qid] = 0;
4771
4772 if (tx_resp->frame_count > 1) /* A-MPDU */
4773 return;
4774
4775 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4776 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4777 status != IWX_TX_STATUS_DIRECT_DONE);
4778
4779 #ifdef __not_yet__
4780 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4781 ieee80211_tx_complete(&in->in_ni, m, txfail);
4782 #else
4783 if (txfail)
4784 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4785 else {
4786 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4787 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4788 if (m->m_flags & M_MCAST)
4789 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4790 }
4791 #endif
4792 /*
4793 * On hardware supported by iwx(4) the SSN counter corresponds
4794 * to a Tx ring index rather than a sequence number.
4795 * Frames up to this index (non-inclusive) can now be freed.
4796 */
4797 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4798 ssn = le32toh(ssn);
4799 if (ssn < sc->max_tfd_queue_size) {
4800 iwx_txq_advance(sc, ring, ssn);
4801 iwx_clear_oactive(sc, ring);
4802 }
4803 }
4804
4805 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4806 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4807 {
4808 if (ring->queued < iwx_lomark) {
4809 sc->qfullmsk &= ~(1 << ring->qid);
4810 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4811 /*
4812 * Well, we're in interrupt context, but then again
4813 * I guess net80211 does all sorts of stunts in
4814 * interrupt context, so maybe this is no biggie.
4815 */
4816 iwx_start(sc);
4817 }
4818 }
4819 }
4820
4821 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4822 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4823 {
4824 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4825 struct ieee80211com *ic = &sc->sc_ic;
4826 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4827 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4828 struct ieee80211_node *ni = &in->in_ni;
4829 struct iwx_tx_ring *ring;
4830 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4831 int qid;
4832
4833 // if (ic->ic_state != IEEE80211_S_RUN)
4834 // return;
4835
4836 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4837 return;
4838
4839 if (ba_res->sta_id != IWX_STATION_ID)
4840 return;
4841
4842 in = (void *)ni;
4843
4844 tfd_cnt = le16toh(ba_res->tfd_cnt);
4845 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4846 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4847 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4848 sizeof(ba_res->tfd[0]) * tfd_cnt))
4849 return;
4850
4851 for (i = 0; i < tfd_cnt; i++) {
4852 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4853 uint8_t tid;
4854
4855 tid = ba_tfd->tid;
4856 if (tid >= nitems(sc->aggqid))
4857 continue;
4858
4859 qid = sc->aggqid[tid];
4860 if (qid != htole16(ba_tfd->q_num))
4861 continue;
4862
4863 ring = &sc->txq[qid];
4864
4865 #if 0
4866 ba = &ni->ni_tx_ba[tid];
4867 if (ba->ba_state != IEEE80211_BA_AGREED)
4868 continue;
4869 #endif
4870 idx = le16toh(ba_tfd->tfd_index);
4871 sc->sc_tx_timer[qid] = 0;
4872 iwx_txq_advance(sc, ring, idx);
4873 iwx_clear_oactive(sc, ring);
4874 }
4875 }
4876
4877 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4878 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4879 struct iwx_rx_data *data)
4880 {
4881 struct ieee80211com *ic = &sc->sc_ic;
4882 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4883 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4884 uint32_t missed;
4885
4886 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4887 (vap->iv_state != IEEE80211_S_RUN))
4888 return;
4889
4890 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4891 BUS_DMASYNC_POSTREAD);
4892
4893 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4894 if (missed > vap->iv_bmissthreshold) {
4895 ieee80211_beacon_miss(ic);
4896 }
4897
4898 }
4899
4900 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4901 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4902 {
4903 struct iwx_binding_cmd cmd;
4904 struct ieee80211com *ic = &sc->sc_ic;
4905 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4906 struct iwx_vap *ivp = IWX_VAP(vap);
4907 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4908 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4909 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4910 uint32_t status;
4911
4912 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4913 panic("binding already added");
4914 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4915 panic("binding already removed");
4916
4917 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4918 return EINVAL;
4919
4920 memset(&cmd, 0, sizeof(cmd));
4921
4922 cmd.id_and_color
4923 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4924 cmd.action = htole32(action);
4925 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4926
4927 cmd.macs[0] = htole32(mac_id);
4928 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4929 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4930
4931 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4932 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4933 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4934 else
4935 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4936
4937 status = 0;
4938 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4939 &cmd, &status);
4940 if (err == 0 && status != 0)
4941 err = EIO;
4942
4943 return err;
4944 }
4945
4946 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4947 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4948 {
4949 int ctlchan = ieee80211_chan2ieee(ic, chan);
4950 int midpoint = chan->ic_vht_ch_freq1;
4951
4952 /*
4953 * The FW is expected to check the control channel position only
4954 * when in HT/VHT and the channel width is not 20MHz. Return
4955 * this value as the default one:
4956 */
4957 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4958
4959 switch (ctlchan - midpoint) {
4960 case -6:
4961 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4962 break;
4963 case -2:
4964 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4965 break;
4966 case 2:
4967 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4968 break;
4969 case 6:
4970 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4971 break;
4972 default:
4973 break;
4974 }
4975
4976 return pos;
4977 }
4978
4979 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)4980 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4981 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4982 uint8_t vht_chan_width, int cmdver)
4983 {
4984 struct ieee80211com *ic = &sc->sc_ic;
4985 struct iwx_phy_context_cmd_uhb cmd;
4986 uint8_t active_cnt, idle_cnt;
4987 struct ieee80211_channel *chan = ctxt->channel;
4988
4989 memset(&cmd, 0, sizeof(cmd));
4990 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4991 ctxt->color));
4992 cmd.action = htole32(action);
4993
4994 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
4995 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4996 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4997 else
4998 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4999
5000 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5001 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5002 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5003
5004 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5005 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5006 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5007 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5008 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5009 if (IEEE80211_IS_CHAN_HT40D(chan))
5010 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5011 else
5012 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5013 } else {
5014 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5015 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5016 }
5017
5018 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5019 IWX_RLC_CONFIG_CMD) != 2) {
5020 idle_cnt = chains_static;
5021 active_cnt = chains_dynamic;
5022 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5023 IWX_PHY_RX_CHAIN_VALID_POS);
5024 cmd.rxchain_info |= htole32(idle_cnt <<
5025 IWX_PHY_RX_CHAIN_CNT_POS);
5026 cmd.rxchain_info |= htole32(active_cnt <<
5027 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5028 }
5029
5030 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5031 }
5032
5033 #if 0
5034 int
5035 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5036 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5037 uint8_t vht_chan_width, int cmdver)
5038 {
5039 struct ieee80211com *ic = &sc->sc_ic;
5040 struct iwx_phy_context_cmd cmd;
5041 uint8_t active_cnt, idle_cnt;
5042 struct ieee80211_channel *chan = ctxt->channel;
5043
5044 memset(&cmd, 0, sizeof(cmd));
5045 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5046 ctxt->color));
5047 cmd.action = htole32(action);
5048
5049 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5050 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5051 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5052 else
5053 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5054
5055 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5056 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5057 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5058 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5059 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5060 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5061 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5062 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5063 /* secondary chan above -> control chan below */
5064 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5065 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5066 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5067 /* secondary chan below -> control chan above */
5068 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5069 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5070 } else {
5071 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5072 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5073 }
5074 } else {
5075 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5076 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5077 }
5078
5079 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5080 IWX_RLC_CONFIG_CMD) != 2) {
5081 idle_cnt = chains_static;
5082 active_cnt = chains_dynamic;
5083 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5084 IWX_PHY_RX_CHAIN_VALID_POS);
5085 cmd.rxchain_info |= htole32(idle_cnt <<
5086 IWX_PHY_RX_CHAIN_CNT_POS);
5087 cmd.rxchain_info |= htole32(active_cnt <<
5088 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5089 }
5090
5091 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5092 }
5093 #endif
5094
5095 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5096 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5097 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5098 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5099 {
5100 int cmdver;
5101
5102 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5103 if (cmdver != 3 && cmdver != 4) {
5104 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5105 DEVNAME(sc));
5106 return ENOTSUP;
5107 }
5108
5109 /*
5110 * Intel increased the size of the fw_channel_info struct and neglected
5111 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5112 * member in the middle.
5113 * To keep things simple we use a separate function to handle the larger
5114 * variant of the phy context command.
5115 */
5116 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5117 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5118 chains_dynamic, action, sco, vht_chan_width, cmdver);
5119 } else
5120 panic("Unsupported old hardware contact thj@");
5121
5122 #if 0
5123 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5124 action, sco, vht_chan_width, cmdver);
5125 #endif
5126 }
5127
5128 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5129 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5130 {
5131 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5132 struct iwx_tfh_tfd *desc;
5133 struct iwx_tx_data *txdata;
5134 struct iwx_device_cmd *cmd;
5135 struct mbuf *m;
5136 bus_addr_t paddr;
5137 uint64_t addr;
5138 int err = 0, i, paylen, off/*, s*/;
5139 int idx, code, async, group_id;
5140 size_t hdrlen, datasz;
5141 uint8_t *data;
5142 int generation = sc->sc_generation;
5143 bus_dma_segment_t seg[10];
5144 int nsegs;
5145
5146 code = hcmd->id;
5147 async = hcmd->flags & IWX_CMD_ASYNC;
5148 idx = ring->cur;
5149
5150 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5151 paylen += hcmd->len[i];
5152 }
5153
5154 /* If this command waits for a response, allocate response buffer. */
5155 hcmd->resp_pkt = NULL;
5156 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5157 uint8_t *resp_buf;
5158 KASSERT(!async, ("async command want response"));
5159 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5160 ("wrong pkt len 1"));
5161 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5162 ("wrong pkt len 2"));
5163 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5164 return ENOSPC;
5165 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5166 M_NOWAIT | M_ZERO);
5167 if (resp_buf == NULL)
5168 return ENOMEM;
5169 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5170 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5171 } else {
5172 sc->sc_cmd_resp_pkt[idx] = NULL;
5173 }
5174
5175 desc = &ring->desc[idx];
5176 txdata = &ring->data[idx];
5177
5178 /*
5179 * XXX Intel inside (tm)
5180 * Firmware API versions >= 50 reject old-style commands in
5181 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5182 * that such commands were in the LONG_GROUP instead in order
5183 * for firmware to accept them.
5184 */
5185 if (iwx_cmd_groupid(code) == 0) {
5186 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5187 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5188 } else
5189 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5190
5191 group_id = iwx_cmd_groupid(code);
5192
5193 hdrlen = sizeof(cmd->hdr_wide);
5194 datasz = sizeof(cmd->data_wide);
5195
5196 if (paylen > datasz) {
5197 /* Command is too large to fit in pre-allocated space. */
5198 size_t totlen = hdrlen + paylen;
5199 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5200 printf("%s: firmware command too long (%zd bytes)\n",
5201 DEVNAME(sc), totlen);
5202 err = EINVAL;
5203 goto out;
5204 }
5205 if (totlen > IWX_RBUF_SIZE)
5206 panic("totlen > IWX_RBUF_SIZE");
5207 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5208 if (m == NULL) {
5209 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5210 DEVNAME(sc), IWX_RBUF_SIZE);
5211 err = ENOMEM;
5212 goto out;
5213 }
5214 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5215 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5216 seg, &nsegs, BUS_DMA_NOWAIT);
5217 if (nsegs > 20)
5218 panic("nsegs > 20");
5219 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5220 if (err) {
5221 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5222 DEVNAME(sc), totlen);
5223 m_freem(m);
5224 goto out;
5225 }
5226 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5227 cmd = mtod(m, struct iwx_device_cmd *);
5228 paddr = seg[0].ds_addr;
5229 } else {
5230 cmd = &ring->cmd[idx];
5231 paddr = txdata->cmd_paddr;
5232 }
5233
5234 memset(cmd, 0, sizeof(*cmd));
5235 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5236 cmd->hdr_wide.group_id = group_id;
5237 cmd->hdr_wide.qid = ring->qid;
5238 cmd->hdr_wide.idx = idx;
5239 cmd->hdr_wide.length = htole16(paylen);
5240 cmd->hdr_wide.version = iwx_cmd_version(code);
5241 data = cmd->data_wide;
5242
5243 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5244 if (hcmd->len[i] == 0)
5245 continue;
5246 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5247 off += hcmd->len[i];
5248 }
5249 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5250
5251 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5252 addr = htole64(paddr);
5253 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5254 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5255 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5256 paylen));
5257 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5258 IWX_FIRST_TB_SIZE);
5259 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5260 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5261 desc->num_tbs = htole16(2);
5262 } else
5263 desc->num_tbs = htole16(1);
5264
5265 if (paylen > datasz) {
5266 bus_dmamap_sync(ring->data_dmat, txdata->map,
5267 BUS_DMASYNC_PREWRITE);
5268 } else {
5269 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5270 BUS_DMASYNC_PREWRITE);
5271 }
5272 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5273 BUS_DMASYNC_PREWRITE);
5274
5275 /* Kick command ring. */
5276 ring->queued++;
5277 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5278 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5279 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5280 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5281
5282 if (!async) {
5283 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5284 if (err == 0) {
5285 /* if hardware is no longer up, return error */
5286 if (generation != sc->sc_generation) {
5287 err = ENXIO;
5288 goto out;
5289 }
5290
5291 /* Response buffer will be freed in iwx_free_resp(). */
5292 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5293 sc->sc_cmd_resp_pkt[idx] = NULL;
5294 } else if (generation == sc->sc_generation) {
5295 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5296 sc->sc_cmd_resp_pkt[idx] = NULL;
5297 }
5298 }
5299 out:
5300 return err;
5301 }
5302
5303 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5304 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5305 uint16_t len, const void *data)
5306 {
5307 struct iwx_host_cmd cmd = {
5308 .id = id,
5309 .len = { len, },
5310 .data = { data, },
5311 .flags = flags,
5312 };
5313
5314 return iwx_send_cmd(sc, &cmd);
5315 }
5316
5317 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5318 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5319 uint32_t *status)
5320 {
5321 struct iwx_rx_packet *pkt;
5322 struct iwx_cmd_response *resp;
5323 int err, resp_len;
5324
5325 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5326 cmd->flags |= IWX_CMD_WANT_RESP;
5327 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5328
5329 err = iwx_send_cmd(sc, cmd);
5330 if (err)
5331 return err;
5332
5333 pkt = cmd->resp_pkt;
5334 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5335 return EIO;
5336
5337 resp_len = iwx_rx_packet_payload_len(pkt);
5338 if (resp_len != sizeof(*resp)) {
5339 iwx_free_resp(sc, cmd);
5340 return EIO;
5341 }
5342
5343 resp = (void *)pkt->data;
5344 *status = le32toh(resp->status);
5345 iwx_free_resp(sc, cmd);
5346 return err;
5347 }
5348
5349 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5350 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5351 const void *data, uint32_t *status)
5352 {
5353 struct iwx_host_cmd cmd = {
5354 .id = id,
5355 .len = { len, },
5356 .data = { data, },
5357 };
5358
5359 return iwx_send_cmd_status(sc, &cmd, status);
5360 }
5361
5362 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5363 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5364 {
5365 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5366 ("hcmd flags !IWX_CMD_WANT_RESP"));
5367 free(hcmd->resp_pkt, M_DEVBUF);
5368 hcmd->resp_pkt = NULL;
5369 }
5370
5371 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5372 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5373 {
5374 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5375 struct iwx_tx_data *data;
5376
5377 if (qid != IWX_DQA_CMD_QUEUE) {
5378 return; /* Not a command ack. */
5379 }
5380
5381 data = &ring->data[idx];
5382
5383 if (data->m != NULL) {
5384 bus_dmamap_sync(ring->data_dmat, data->map,
5385 BUS_DMASYNC_POSTWRITE);
5386 bus_dmamap_unload(ring->data_dmat, data->map);
5387 m_freem(data->m);
5388 data->m = NULL;
5389 }
5390 wakeup(&ring->desc[idx]);
5391
5392 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5393 if (ring->queued == 0) {
5394 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5395 DEVNAME(sc), code));
5396 } else if (ring->queued > 0)
5397 ring->queued--;
5398 }
5399
5400 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5401 iwx_fw_rateidx_ofdm(uint8_t rval)
5402 {
5403 /* Firmware expects indices which match our 11a rate set. */
5404 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5405 int i;
5406
5407 for (i = 0; i < rs->rs_nrates; i++) {
5408 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5409 return i;
5410 }
5411
5412 return 0;
5413 }
5414
5415 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5416 iwx_fw_rateidx_cck(uint8_t rval)
5417 {
5418 /* Firmware expects indices which match our 11b rate set. */
5419 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5420 int i;
5421
5422 for (i = 0; i < rs->rs_nrates; i++) {
5423 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5424 return i;
5425 }
5426
5427 return 0;
5428 }
5429
5430 static int
iwx_min_basic_rate(struct ieee80211com * ic)5431 iwx_min_basic_rate(struct ieee80211com *ic)
5432 {
5433 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5434 struct ieee80211_node *ni = vap->iv_bss;
5435 struct ieee80211_rateset *rs = &ni->ni_rates;
5436 struct ieee80211_channel *c = ni->ni_chan;
5437 int i, min, rval;
5438
5439 min = -1;
5440
5441 if (c == IEEE80211_CHAN_ANYC) {
5442 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5443 return -1;
5444 }
5445
5446 for (i = 0; i < rs->rs_nrates; i++) {
5447 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5448 continue;
5449 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5450 if (min == -1)
5451 min = rval;
5452 else if (rval < min)
5453 min = rval;
5454 }
5455
5456 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5457 if (min == -1)
5458 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5459
5460 return min;
5461 }
5462
5463 /*
5464 * Determine the Tx command flags and Tx rate+flags to use.
5465 * Return the selected Tx rate.
5466 */
5467 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5468 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5469 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5470 struct mbuf *m)
5471 {
5472 struct ieee80211com *ic = &sc->sc_ic;
5473 struct ieee80211_node *ni = &in->in_ni;
5474 struct ieee80211_rateset *rs = &ni->ni_rates;
5475 const struct iwx_rate *rinfo = NULL;
5476 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5477 int ridx = iwx_min_basic_rate(ic);
5478 int min_ridx, rate_flags;
5479 uint8_t rval;
5480
5481 /* We're in the process of clearing the node, no channel already */
5482 if (ridx == -1)
5483 return NULL;
5484
5485 min_ridx = iwx_rval2ridx(ridx);
5486
5487 *flags = 0;
5488
5489 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5490 type != IEEE80211_FC0_TYPE_DATA) {
5491 /* for non-data, use the lowest supported rate */
5492 ridx = min_ridx;
5493 *flags |= IWX_TX_FLAGS_CMD_RATE;
5494 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5495 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5496 & ~IEEE80211_RATE_MCS];
5497 } else {
5498 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5499 & IEEE80211_RATE_VAL);
5500 ridx = iwx_rval2ridx(rval);
5501 if (ridx < min_ridx)
5502 ridx = min_ridx;
5503 }
5504
5505 if (m->m_flags & M_EAPOL)
5506 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5507
5508 rinfo = &iwx_rates[ridx];
5509
5510 /*
5511 * Do not fill rate_n_flags if firmware controls the Tx rate.
5512 * For data frames we rely on Tx rate scaling in firmware by default.
5513 */
5514 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5515 *rate_n_flags = 0;
5516 return rinfo;
5517 }
5518
5519 /*
5520 * Forcing a CCK/OFDM legacy rate is important for management frames.
5521 * Association will only succeed if we do this correctly.
5522 */
5523
5524 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5525 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5526 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5527 if (IWX_RIDX_IS_CCK(ridx)) {
5528 if (sc->sc_rate_n_flags_version >= 2)
5529 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5530 else
5531 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5532 } else if (sc->sc_rate_n_flags_version >= 2)
5533 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5534
5535 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5536 & IEEE80211_RATE_VAL);
5537 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5538 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5539
5540 if (sc->sc_rate_n_flags_version >= 2) {
5541 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5542 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5543 IWX_RATE_LEGACY_RATE_MSK);
5544 } else {
5545 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5546 IWX_RATE_LEGACY_RATE_MSK);
5547 }
5548 } else
5549 rate_flags |= rinfo->plcp;
5550
5551 *rate_n_flags = rate_flags;
5552 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5553 __func__, __LINE__,*flags);
5554 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5555 __func__, __LINE__, *rate_n_flags);
5556
5557 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5558 print_ratenflags(__func__, __LINE__,
5559 *rate_n_flags, sc->sc_rate_n_flags_version);
5560
5561 return rinfo;
5562 }
5563
5564 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5565 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5566 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5567 {
5568 uint8_t filled_tfd_size, num_fetch_chunks;
5569 uint16_t len = byte_cnt;
5570 uint16_t bc_ent;
5571
5572 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5573 num_tbs * sizeof(struct iwx_tfh_tb);
5574 /*
5575 * filled_tfd_size contains the number of filled bytes in the TFD.
5576 * Dividing it by 64 will give the number of chunks to fetch
5577 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5578 * If, for example, TFD contains only 3 TBs then 32 bytes
5579 * of the TFD are used, and only one chunk of 64 bytes should
5580 * be fetched
5581 */
5582 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5583
5584 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5585 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5586 /* Starting from AX210, the HW expects bytes */
5587 bc_ent = htole16(len | (num_fetch_chunks << 14));
5588 scd_bc_tbl[idx].tfd_offset = bc_ent;
5589 } else {
5590 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5591 /* Before AX210, the HW expects DW */
5592 len = howmany(len, 4);
5593 bc_ent = htole16(len | (num_fetch_chunks << 12));
5594 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5595 }
5596
5597 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5598 }
5599
5600 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5601 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5602 {
5603 struct ieee80211com *ic = &sc->sc_ic;
5604 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5605 struct iwx_node *in = (void *)ni;
5606 struct iwx_tx_ring *ring;
5607 struct iwx_tx_data *data;
5608 struct iwx_tfh_tfd *desc;
5609 struct iwx_device_cmd *cmd;
5610 struct ieee80211_frame *wh;
5611 struct ieee80211_key *k = NULL;
5612 const struct iwx_rate *rinfo;
5613 uint64_t paddr;
5614 u_int hdrlen;
5615 uint32_t rate_n_flags;
5616 uint16_t num_tbs, flags, offload_assist = 0;
5617 uint8_t type, subtype;
5618 int i, totlen, err, pad, qid;
5619 #define IWM_MAX_SCATTER 20
5620 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5621 int nsegs;
5622 struct mbuf *m1;
5623 size_t txcmd_size;
5624
5625 wh = mtod(m, struct ieee80211_frame *);
5626 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5627 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5628 hdrlen = ieee80211_anyhdrsize(wh);
5629
5630 qid = sc->first_data_qid;
5631
5632 /* Put QoS frames on the data queue which maps to their TID. */
5633 if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
5634 uint16_t qos = ieee80211_gettid(wh);
5635 uint8_t tid = qos & IEEE80211_QOS_TID;
5636 #if 0
5637 /*
5638 * XXX-THJ: TODO when we enable ba we need to manage the
5639 * mappings
5640 */
5641 struct ieee80211_tx_ba *ba;
5642 ba = &ni->ni_tx_ba[tid];
5643
5644 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5645 type == IEEE80211_FC0_TYPE_DATA &&
5646 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5647 subtype != IEEE80211_FC0_SUBTYPE_BAR &&
5648 sc->aggqid[tid] != 0 /*&&
5649 ba->ba_state == IEEE80211_BA_AGREED*/) {
5650 qid = sc->aggqid[tid];
5651 #else
5652 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5653 type == IEEE80211_FC0_TYPE_DATA &&
5654 subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5655 sc->aggqid[tid] != 0) {
5656 qid = sc->aggqid[tid];
5657 #endif
5658 }
5659 }
5660
5661 ring = &sc->txq[qid];
5662 desc = &ring->desc[ring->cur];
5663 memset(desc, 0, sizeof(*desc));
5664 data = &ring->data[ring->cur];
5665
5666 cmd = &ring->cmd[ring->cur];
5667 cmd->hdr.code = IWX_TX_CMD;
5668 cmd->hdr.flags = 0;
5669 cmd->hdr.qid = ring->qid;
5670 cmd->hdr.idx = ring->cur;
5671
5672 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5673 if (rinfo == NULL)
5674 return EINVAL;
5675
5676 /* Offloaded sequence number assignment; non-AMPDU case */
5677 if ((m->m_flags & M_AMPDU_MPDU) == 0)
5678 ieee80211_output_seqno_assign(ni, -1, m);
5679
5680 /* Radiotap */
5681 if (ieee80211_radiotap_active_vap(vap)) {
5682 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5683
5684 tap->wt_flags = 0;
5685 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5686 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5687 tap->wt_rate = rinfo->rate;
5688 if (k != NULL)
5689 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5690 ieee80211_radiotap_tx(vap, m);
5691 }
5692
5693 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
5694 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5695 k = ieee80211_crypto_get_txkey(ni, m);
5696 if (k == NULL) {
5697 printf("%s: k is NULL!\n", __func__);
5698 m_freem(m);
5699 return (ENOBUFS);
5700 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5701 k->wk_keytsc++;
5702 } else {
5703 k->wk_cipher->ic_encap(k, m);
5704
5705 /* 802.11 headers may have moved */
5706 wh = mtod(m, struct ieee80211_frame *);
5707 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5708 }
5709 } else
5710 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5711
5712 totlen = m->m_pkthdr.len;
5713
5714 if (hdrlen & 3) {
5715 /* First segment length must be a multiple of 4. */
5716 pad = 4 - (hdrlen & 3);
5717 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5718 } else
5719 pad = 0;
5720
5721 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5722 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5723 memset(tx, 0, sizeof(*tx));
5724 tx->len = htole16(totlen);
5725 tx->offload_assist = htole32(offload_assist);
5726 tx->flags = htole16(flags);
5727 tx->rate_n_flags = htole32(rate_n_flags);
5728 memcpy(tx->hdr, wh, hdrlen);
5729 txcmd_size = sizeof(*tx);
5730 } else {
5731 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5732 memset(tx, 0, sizeof(*tx));
5733 tx->len = htole16(totlen);
5734 tx->offload_assist = htole16(offload_assist);
5735 tx->flags = htole32(flags);
5736 tx->rate_n_flags = htole32(rate_n_flags);
5737 memcpy(tx->hdr, wh, hdrlen);
5738 txcmd_size = sizeof(*tx);
5739 }
5740
5741 /* Trim 802.11 header. */
5742 m_adj(m, hdrlen);
5743
5744 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5745 &nsegs, BUS_DMA_NOWAIT);
5746 if (err && err != EFBIG) {
5747 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5748 m_freem(m);
5749 return err;
5750 }
5751 if (err) {
5752 /* Too many DMA segments, linearize mbuf. */
5753 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5754 if (m1 == NULL) {
5755 printf("%s: could not defrag mbufs\n", __func__);
5756 m_freem(m);
5757 return (ENOBUFS);
5758 }
5759 m = m1;
5760 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5761 segs, &nsegs, BUS_DMA_NOWAIT);
5762 if (err) {
5763 printf("%s: can't map mbuf (error %d)\n", __func__,
5764 err);
5765 m_freem(m);
5766 return (err);
5767 }
5768 }
5769 data->m = m;
5770 data->in = in;
5771
5772 /* Fill TX descriptor. */
5773 num_tbs = 2 + nsegs;
5774 desc->num_tbs = htole16(num_tbs);
5775
5776 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5777 paddr = htole64(data->cmd_paddr);
5778 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5779 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5780 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5781 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5782 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5783 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5784 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5785
5786 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5787 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5788
5789 /* Other DMA segments are for data payload. */
5790 for (i = 0; i < nsegs; i++) {
5791 seg = &segs[i];
5792 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5793 paddr = htole64(seg->ds_addr);
5794 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5795 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5796 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5797 }
5798
5799 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5800 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5801 BUS_DMASYNC_PREWRITE);
5802 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5803 BUS_DMASYNC_PREWRITE);
5804
5805 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5806
5807 /* Kick TX ring. */
5808 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5809 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5810 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5811
5812 /* Mark TX ring as full if we reach a certain threshold. */
5813 if (++ring->queued > iwx_himark) {
5814 sc->qfullmsk |= 1 << ring->qid;
5815 }
5816
5817 sc->sc_tx_timer[ring->qid] = 15;
5818
5819 return 0;
5820 }
5821
5822 static int
5823 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5824 {
5825 struct iwx_rx_packet *pkt;
5826 struct iwx_tx_path_flush_cmd_rsp *resp;
5827 struct iwx_tx_path_flush_cmd flush_cmd = {
5828 .sta_id = htole32(sta_id),
5829 .tid_mask = htole16(tids),
5830 };
5831 struct iwx_host_cmd hcmd = {
5832 .id = IWX_TXPATH_FLUSH,
5833 .len = { sizeof(flush_cmd), },
5834 .data = { &flush_cmd, },
5835 .flags = IWX_CMD_WANT_RESP,
5836 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5837 };
5838 int err, resp_len, i, num_flushed_queues;
5839
5840 err = iwx_send_cmd(sc, &hcmd);
5841 if (err)
5842 return err;
5843
5844 pkt = hcmd.resp_pkt;
5845 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5846 err = EIO;
5847 goto out;
5848 }
5849
5850 resp_len = iwx_rx_packet_payload_len(pkt);
5851 /* Some firmware versions don't provide a response. */
5852 if (resp_len == 0)
5853 goto out;
5854 else if (resp_len != sizeof(*resp)) {
5855 err = EIO;
5856 goto out;
5857 }
5858
5859 resp = (void *)pkt->data;
5860
5861 if (le16toh(resp->sta_id) != sta_id) {
5862 err = EIO;
5863 goto out;
5864 }
5865
5866 num_flushed_queues = le16toh(resp->num_flushed_queues);
5867 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5868 err = EIO;
5869 goto out;
5870 }
5871
5872 for (i = 0; i < num_flushed_queues; i++) {
5873 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5874 uint16_t tid = le16toh(queue_info->tid);
5875 uint16_t read_after = le16toh(queue_info->read_after_flush);
5876 uint16_t qid = le16toh(queue_info->queue_num);
5877 struct iwx_tx_ring *txq;
5878
5879 if (qid >= nitems(sc->txq))
5880 continue;
5881
5882 txq = &sc->txq[qid];
5883 if (tid != txq->tid)
5884 continue;
5885
5886 iwx_txq_advance(sc, txq, read_after);
5887 }
5888 out:
5889 iwx_free_resp(sc, &hcmd);
5890 return err;
5891 }
5892
5893 #define IWX_FLUSH_WAIT_MS 2000
5894
5895 static int
5896 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5897 {
5898 struct iwx_add_sta_cmd cmd;
5899 int err;
5900 uint32_t status;
5901
5902 memset(&cmd, 0, sizeof(cmd));
5903 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5904 in->in_color));
5905 cmd.sta_id = IWX_STATION_ID;
5906 cmd.add_modify = IWX_STA_MODE_MODIFY;
5907 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5908 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5909
5910 status = IWX_ADD_STA_SUCCESS;
5911 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5912 sizeof(cmd), &cmd, &status);
5913 if (err) {
5914 printf("%s: could not update sta (error %d)\n",
5915 DEVNAME(sc), err);
5916 return err;
5917 }
5918
5919 switch (status & IWX_ADD_STA_STATUS_MASK) {
5920 case IWX_ADD_STA_SUCCESS:
5921 break;
5922 default:
5923 err = EIO;
5924 printf("%s: Couldn't %s draining for station\n",
5925 DEVNAME(sc), drain ? "enable" : "disable");
5926 break;
5927 }
5928
5929 return err;
5930 }
5931
5932 static int
5933 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5934 {
5935 int err;
5936
5937 IWX_ASSERT_LOCKED(sc);
5938
5939 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5940
5941 err = iwx_drain_sta(sc, in, 1);
5942 if (err)
5943 goto done;
5944
5945 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5946 if (err) {
5947 printf("%s: could not flush Tx path (error %d)\n",
5948 DEVNAME(sc), err);
5949 goto done;
5950 }
5951
5952 /*
5953 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5954 * fc drive rand has has been replaced in OpenBSD.
5955 */
5956
5957 err = iwx_drain_sta(sc, in, 0);
5958 done:
5959 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5960 return err;
5961 }
5962
5963 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
5964
5965 static int
5966 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5967 struct iwx_beacon_filter_cmd *cmd)
5968 {
5969 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5970 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5971 }
5972
5973 static int
5974 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5975 {
5976 struct iwx_beacon_filter_cmd cmd = {
5977 IWX_BF_CMD_CONFIG_DEFAULTS,
5978 .bf_enable_beacon_filter = htole32(1),
5979 .ba_enable_beacon_abort = htole32(enable),
5980 };
5981
5982 if (!sc->sc_bf.bf_enabled)
5983 return 0;
5984
5985 sc->sc_bf.ba_enabled = enable;
5986 return iwx_beacon_filter_send_cmd(sc, &cmd);
5987 }
5988
5989 static void
5990 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5991 struct iwx_mac_power_cmd *cmd)
5992 {
5993 struct ieee80211com *ic = &sc->sc_ic;
5994 struct ieee80211_node *ni = &in->in_ni;
5995 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5996 int dtim_period, dtim_msec, keep_alive;
5997
5998 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5999 in->in_color));
6000 if (vap->iv_dtim_period)
6001 dtim_period = vap->iv_dtim_period;
6002 else
6003 dtim_period = 1;
6004
6005 /*
6006 * Regardless of power management state the driver must set
6007 * keep alive period. FW will use it for sending keep alive NDPs
6008 * immediately after association. Check that keep alive period
6009 * is at least 3 * DTIM.
6010 */
6011 dtim_msec = dtim_period * ni->ni_intval;
6012 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6013 keep_alive = roundup(keep_alive, 1000) / 1000;
6014 cmd->keep_alive_seconds = htole16(keep_alive);
6015
6016 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6017 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6018 }
6019
6020 static int
6021 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6022 {
6023 int err;
6024 int ba_enable;
6025 struct iwx_mac_power_cmd cmd;
6026
6027 memset(&cmd, 0, sizeof(cmd));
6028
6029 iwx_power_build_cmd(sc, in, &cmd);
6030
6031 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6032 sizeof(cmd), &cmd);
6033 if (err != 0)
6034 return err;
6035
6036 ba_enable = !!(cmd.flags &
6037 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6038 return iwx_update_beacon_abort(sc, in, ba_enable);
6039 }
6040
6041 static int
6042 iwx_power_update_device(struct iwx_softc *sc)
6043 {
6044 struct iwx_device_power_cmd cmd = { };
6045 struct ieee80211com *ic = &sc->sc_ic;
6046
6047 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6048 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6049
6050 return iwx_send_cmd_pdu(sc,
6051 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6052 }
6053 #if 0
6054 static int
6055 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6056 {
6057 struct iwx_beacon_filter_cmd cmd = {
6058 IWX_BF_CMD_CONFIG_DEFAULTS,
6059 .bf_enable_beacon_filter = htole32(1),
6060 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6061 };
6062 int err;
6063
6064 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6065 if (err == 0)
6066 sc->sc_bf.bf_enabled = 1;
6067
6068 return err;
6069 }
6070 #endif
6071 static int
6072 iwx_disable_beacon_filter(struct iwx_softc *sc)
6073 {
6074 struct iwx_beacon_filter_cmd cmd;
6075 int err;
6076
6077 memset(&cmd, 0, sizeof(cmd));
6078
6079 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6080 if (err == 0)
6081 sc->sc_bf.bf_enabled = 0;
6082
6083 return err;
6084 }
6085
6086 static int
6087 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6088 {
6089 struct iwx_add_sta_cmd add_sta_cmd;
6090 int err, i;
6091 uint32_t status, aggsize;
6092 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6093 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6094 struct ieee80211com *ic = &sc->sc_ic;
6095 struct ieee80211_node *ni = &in->in_ni;
6096 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6097
6098 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6099 panic("STA already added");
6100
6101 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6102
6103 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6104 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6105 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6106 } else {
6107 add_sta_cmd.sta_id = IWX_STATION_ID;
6108 add_sta_cmd.station_type = IWX_STA_LINK;
6109 }
6110 add_sta_cmd.mac_id_n_color
6111 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6112 if (!update) {
6113 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6114 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6115 etheranyaddr);
6116 else
6117 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6118 in->in_macaddr);
6119 }
6120 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6121 ether_sprintf(add_sta_cmd.addr)));
6122 add_sta_cmd.add_modify = update ? 1 : 0;
6123 add_sta_cmd.station_flags_msk
6124 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6125
6126 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6127 add_sta_cmd.station_flags_msk
6128 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6129 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6130
6131 if (iwx_mimo_enabled(sc)) {
6132 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6133 add_sta_cmd.station_flags |=
6134 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6135 } else {
6136 int hasmimo = 0;
6137 for (i = 0; i < htrs->rs_nrates; i++) {
6138 if (htrs->rs_rates[i] > 7) {
6139 hasmimo = 1;
6140 break;
6141 }
6142 }
6143 if (hasmimo) {
6144 add_sta_cmd.station_flags |=
6145 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6146 }
6147 }
6148 }
6149
6150 if (ni->ni_flags & IEEE80211_NODE_HT &&
6151 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6152 add_sta_cmd.station_flags |= htole32(
6153 IWX_STA_FLG_FAT_EN_40MHZ);
6154 }
6155
6156
6157 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6158 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6159 add_sta_cmd.station_flags |= htole32(
6160 IWX_STA_FLG_FAT_EN_80MHZ);
6161 }
6162 // XXX-misha: TODO get real ampdu size
6163 aggsize = max_aggsize;
6164 } else {
6165 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6166 IEEE80211_HTCAP_MAXRXAMPDU);
6167 }
6168
6169 if (aggsize > max_aggsize)
6170 aggsize = max_aggsize;
6171 add_sta_cmd.station_flags |= htole32((aggsize <<
6172 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6173 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6174
6175 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6176 IEEE80211_HTCAP_MPDUDENSITY)) {
6177 case IEEE80211_HTCAP_MPDUDENSITY_2:
6178 add_sta_cmd.station_flags
6179 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6180 break;
6181 case IEEE80211_HTCAP_MPDUDENSITY_4:
6182 add_sta_cmd.station_flags
6183 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6184 break;
6185 case IEEE80211_HTCAP_MPDUDENSITY_8:
6186 add_sta_cmd.station_flags
6187 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6188 break;
6189 case IEEE80211_HTCAP_MPDUDENSITY_16:
6190 add_sta_cmd.station_flags
6191 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6192 break;
6193 default:
6194 break;
6195 }
6196 }
6197
6198 status = IWX_ADD_STA_SUCCESS;
6199 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6200 &add_sta_cmd, &status);
6201 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6202 err = EIO;
6203
6204 return err;
6205 }
6206
6207 static int
6208 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6209 {
6210 struct ieee80211com *ic = &sc->sc_ic;
6211 struct iwx_rm_sta_cmd rm_sta_cmd;
6212 int err;
6213
6214 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6215 panic("sta already removed");
6216
6217 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6218 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6219 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6220 else
6221 rm_sta_cmd.sta_id = IWX_STATION_ID;
6222
6223 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6224 &rm_sta_cmd);
6225
6226 return err;
6227 }
6228
6229 static int
6230 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6231 {
6232 int err, i, cmd_ver;
6233
6234 err = iwx_flush_sta(sc, in);
6235 if (err) {
6236 printf("%s: could not flush Tx path (error %d)\n",
6237 DEVNAME(sc), err);
6238 return err;
6239 }
6240
6241 /*
6242 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6243 * before a station gets removed.
6244 */
6245 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6246 IWX_SCD_QUEUE_CONFIG_CMD);
6247 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6248 err = iwx_disable_mgmt_queue(sc);
6249 if (err)
6250 return err;
6251 for (i = IWX_FIRST_AGG_TX_QUEUE;
6252 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6253 struct iwx_tx_ring *ring = &sc->txq[i];
6254 if ((sc->qenablemsk & (1 << i)) == 0)
6255 continue;
6256 err = iwx_disable_txq(sc, IWX_STATION_ID,
6257 ring->qid, ring->tid);
6258 if (err) {
6259 printf("%s: could not disable Tx queue %d "
6260 "(error %d)\n", DEVNAME(sc), ring->qid,
6261 err);
6262 return err;
6263 }
6264 }
6265 }
6266
6267 err = iwx_rm_sta_cmd(sc, in);
6268 if (err) {
6269 printf("%s: could not remove STA (error %d)\n",
6270 DEVNAME(sc), err);
6271 return err;
6272 }
6273
6274 in->in_flags = 0;
6275
6276 sc->sc_rx_ba_sessions = 0;
6277 sc->ba_rx.start_tidmask = 0;
6278 sc->ba_rx.stop_tidmask = 0;
6279 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6280 sc->ba_tx.start_tidmask = 0;
6281 sc->ba_tx.stop_tidmask = 0;
6282 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6283 sc->qenablemsk &= ~(1 << i);
6284
6285 #if 0
6286 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6287 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6288 if (ba->ba_state != IEEE80211_BA_AGREED)
6289 continue;
6290 ieee80211_delba_request(ic, ni, 0, 1, i);
6291 }
6292 #endif
6293 /* Clear ampdu rx state (GOS-1525) */
6294 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6295 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6296 ba->ba_flags = 0;
6297 }
6298
6299 return 0;
6300 }
6301
6302 static uint8_t
6303 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6304 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6305 int n_ssids, uint32_t channel_cfg_flags)
6306 {
6307 struct ieee80211com *ic = &sc->sc_ic;
6308 struct ieee80211_scan_state *ss = ic->ic_scan;
6309 struct ieee80211_channel *c;
6310 uint8_t nchan;
6311 int j;
6312
6313 for (nchan = j = 0;
6314 j < ss->ss_last &&
6315 nchan < sc->sc_capa_n_scan_channels;
6316 j++) {
6317 uint8_t channel_num;
6318
6319 c = ss->ss_chans[j];
6320 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6321 if (isset(sc->sc_ucode_api,
6322 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6323 chan->v2.channel_num = channel_num;
6324 if (IEEE80211_IS_CHAN_2GHZ(c))
6325 chan->v2.band = IWX_PHY_BAND_24;
6326 else
6327 chan->v2.band = IWX_PHY_BAND_5;
6328 chan->v2.iter_count = 1;
6329 chan->v2.iter_interval = 0;
6330 } else {
6331 chan->v1.channel_num = channel_num;
6332 chan->v1.iter_count = 1;
6333 chan->v1.iter_interval = htole16(0);
6334 }
6335 chan->flags |= htole32(channel_cfg_flags);
6336 chan++;
6337 nchan++;
6338 }
6339
6340 return nchan;
6341 }
6342
6343 static int
6344 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6345 {
6346 struct ieee80211com *ic = &sc->sc_ic;
6347 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6348 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6349 struct ieee80211_rateset *rs;
6350 size_t remain = sizeof(preq->buf);
6351 uint8_t *frm, *pos;
6352
6353 memset(preq, 0, sizeof(*preq));
6354
6355 if (remain < sizeof(*wh) + 2)
6356 return ENOBUFS;
6357
6358 /*
6359 * Build a probe request frame. Most of the following code is a
6360 * copy & paste of what is done in net80211.
6361 */
6362 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6363 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6364 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6365 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6366 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6367 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6368 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6369 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6370
6371 frm = (uint8_t *)(wh + 1);
6372 *frm++ = IEEE80211_ELEMID_SSID;
6373 *frm++ = 0;
6374 /* hardware inserts SSID */
6375
6376 /* Tell the firmware where the MAC header is. */
6377 preq->mac_header.offset = 0;
6378 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6379 remain -= frm - (uint8_t *)wh;
6380
6381 /* Fill in 2GHz IEs and tell firmware where they are. */
6382 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6383 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6384 if (remain < 4 + rs->rs_nrates)
6385 return ENOBUFS;
6386 } else if (remain < 2 + rs->rs_nrates)
6387 return ENOBUFS;
6388 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6389 pos = frm;
6390 frm = ieee80211_add_rates(frm, rs);
6391 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6392 frm = ieee80211_add_xrates(frm, rs);
6393 remain -= frm - pos;
6394
6395 if (isset(sc->sc_enabled_capa,
6396 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6397 if (remain < 3)
6398 return ENOBUFS;
6399 *frm++ = IEEE80211_ELEMID_DSPARMS;
6400 *frm++ = 1;
6401 *frm++ = 0;
6402 remain -= 3;
6403 }
6404 preq->band_data[0].len = htole16(frm - pos);
6405
6406 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6407 /* Fill in 5GHz IEs. */
6408 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6409 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6410 if (remain < 4 + rs->rs_nrates)
6411 return ENOBUFS;
6412 } else if (remain < 2 + rs->rs_nrates)
6413 return ENOBUFS;
6414 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6415 pos = frm;
6416 frm = ieee80211_add_rates(frm, rs);
6417 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6418 frm = ieee80211_add_xrates(frm, rs);
6419 preq->band_data[1].len = htole16(frm - pos);
6420 remain -= frm - pos;
6421 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6422 if (remain < 14)
6423 return ENOBUFS;
6424 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6425 remain -= frm - pos;
6426 preq->band_data[1].len = htole16(frm - pos);
6427 }
6428 }
6429
6430 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6431 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6432 pos = frm;
6433 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6434 if (remain < 28)
6435 return ENOBUFS;
6436 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6437 /* XXX add WME info? */
6438 remain -= frm - pos;
6439 }
6440
6441 preq->common_data.len = htole16(frm - pos);
6442
6443 return 0;
6444 }
6445
6446 static int
6447 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6448 {
6449 struct iwx_scan_config scan_cfg;
6450 struct iwx_host_cmd hcmd = {
6451 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6452 .len[0] = sizeof(scan_cfg),
6453 .data[0] = &scan_cfg,
6454 .flags = 0,
6455 };
6456 int cmdver;
6457
6458 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6459 printf("%s: firmware does not support reduced scan config\n",
6460 DEVNAME(sc));
6461 return ENOTSUP;
6462 }
6463
6464 memset(&scan_cfg, 0, sizeof(scan_cfg));
6465
6466 /*
6467 * SCAN_CFG version >= 5 implies that the broadcast
6468 * STA ID field is deprecated.
6469 */
6470 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6471 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6472 scan_cfg.bcast_sta_id = 0xff;
6473
6474 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6475 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6476
6477 return iwx_send_cmd(sc, &hcmd);
6478 }
6479
6480 static uint16_t
6481 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6482 {
6483 struct ieee80211com *ic = &sc->sc_ic;
6484 struct ieee80211_scan_state *ss = ic->ic_scan;
6485 uint16_t flags = 0;
6486
6487 if (ss->ss_nssid == 0) {
6488 DPRINTF(("%s: Passive scan started\n", __func__));
6489 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6490 }
6491
6492 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6493 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6494 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6495
6496 return flags;
6497 }
6498
6499 #define IWX_SCAN_DWELL_ACTIVE 10
6500 #define IWX_SCAN_DWELL_PASSIVE 110
6501
6502 /* adaptive dwell max budget time [TU] for full scan */
6503 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6504 /* adaptive dwell max budget time [TU] for directed scan */
6505 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6506 /* adaptive dwell default high band APs number */
6507 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6508 /* adaptive dwell default low band APs number */
6509 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6510 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6511 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6512 /* adaptive dwell number of APs override for p2p friendly GO channels */
6513 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6514 /* adaptive dwell number of APs override for social channels */
6515 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6516
6517 static void
6518 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6519 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6520 {
6521 uint32_t suspend_time, max_out_time;
6522 uint8_t active_dwell, passive_dwell;
6523
6524 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6525 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6526
6527 general_params->adwell_default_social_chn =
6528 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6529 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6530 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6531
6532 if (bgscan)
6533 general_params->adwell_max_budget =
6534 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6535 else
6536 general_params->adwell_max_budget =
6537 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6538
6539 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6540 if (bgscan) {
6541 max_out_time = htole32(120);
6542 suspend_time = htole32(120);
6543 } else {
6544 max_out_time = htole32(0);
6545 suspend_time = htole32(0);
6546 }
6547 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6548 htole32(max_out_time);
6549 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6550 htole32(suspend_time);
6551 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6552 htole32(max_out_time);
6553 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6554 htole32(suspend_time);
6555
6556 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6557 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6558 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6559 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6560 }
6561
6562 static void
6563 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6564 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6565 {
6566 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6567
6568 gp->flags = htole16(gen_flags);
6569
6570 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6571 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6572 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6573 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6574
6575 gp->scan_start_mac_id = 0;
6576 }
6577
6578 static void
6579 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6580 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6581 int n_ssid)
6582 {
6583 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6584
6585 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6586 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6587
6588 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6589 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6590 }
6591
6592 static int
6593 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6594 {
6595 struct ieee80211com *ic = &sc->sc_ic;
6596 struct ieee80211_scan_state *ss = ic->ic_scan;
6597 struct iwx_host_cmd hcmd = {
6598 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6599 .len = { 0, },
6600 .data = { NULL, },
6601 .flags = 0,
6602 };
6603 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6604 struct iwx_scan_req_params_v14 *scan_p;
6605 int err, async = bgscan, n_ssid = 0;
6606 uint16_t gen_flags;
6607 uint32_t bitmap_ssid = 0;
6608
6609 IWX_ASSERT_LOCKED(sc);
6610
6611 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6612
6613 scan_p = &cmd->scan_params;
6614
6615 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6616 cmd->uid = htole32(0);
6617
6618 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6619 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6620 gen_flags, bgscan);
6621
6622 scan_p->periodic_params.schedule[0].interval = htole16(0);
6623 scan_p->periodic_params.schedule[0].iter_count = 1;
6624
6625 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6626 if (err) {
6627 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6628 err);
6629 return err;
6630 }
6631
6632 for (int i=0; i < ss->ss_nssid; i++) {
6633 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6634 scan_p->probe_params.direct_scan[i].len =
6635 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6636 DPRINTF(("%s: Active scan started for ssid ", __func__));
6637 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6638 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6639 n_ssid++;
6640 bitmap_ssid |= (1 << i);
6641 }
6642 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6643
6644 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6645 n_ssid);
6646
6647 hcmd.len[0] = sizeof(*cmd);
6648 hcmd.data[0] = (void *)cmd;
6649 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6650
6651 err = iwx_send_cmd(sc, &hcmd);
6652 return err;
6653 }
6654
6655 static void
6656 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6657 {
6658 char alpha2[3];
6659
6660 snprintf(alpha2, sizeof(alpha2), "%c%c",
6661 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6662
6663 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6664 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6665
6666 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6667 }
6668
6669 uint8_t
6670 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6671 {
6672 int i;
6673 uint8_t rval;
6674
6675 for (i = 0; i < rs->rs_nrates; i++) {
6676 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6677 if (rval == iwx_rates[ridx].rate)
6678 return rs->rs_rates[i];
6679 }
6680
6681 return 0;
6682 }
6683
6684 static int
6685 iwx_rval2ridx(int rval)
6686 {
6687 int ridx;
6688
6689 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6690 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6691 continue;
6692 if (rval == iwx_rates[ridx].rate)
6693 break;
6694 }
6695
6696 return ridx;
6697 }
6698
6699 static void
6700 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6701 int *ofdm_rates)
6702 {
6703 struct ieee80211_node *ni = &in->in_ni;
6704 struct ieee80211_rateset *rs = &ni->ni_rates;
6705 int lowest_present_ofdm = -1;
6706 int lowest_present_cck = -1;
6707 uint8_t cck = 0;
6708 uint8_t ofdm = 0;
6709 int i;
6710
6711 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6712 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6713 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6714 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6715 continue;
6716 cck |= (1 << i);
6717 if (lowest_present_cck == -1 || lowest_present_cck > i)
6718 lowest_present_cck = i;
6719 }
6720 }
6721 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6722 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6723 continue;
6724 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6725 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6726 lowest_present_ofdm = i;
6727 }
6728
6729 /*
6730 * Now we've got the basic rates as bitmaps in the ofdm and cck
6731 * variables. This isn't sufficient though, as there might not
6732 * be all the right rates in the bitmap. E.g. if the only basic
6733 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6734 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6735 *
6736 * [...] a STA responding to a received frame shall transmit
6737 * its Control Response frame [...] at the highest rate in the
6738 * BSSBasicRateSet parameter that is less than or equal to the
6739 * rate of the immediately previous frame in the frame exchange
6740 * sequence ([...]) and that is of the same modulation class
6741 * ([...]) as the received frame. If no rate contained in the
6742 * BSSBasicRateSet parameter meets these conditions, then the
6743 * control frame sent in response to a received frame shall be
6744 * transmitted at the highest mandatory rate of the PHY that is
6745 * less than or equal to the rate of the received frame, and
6746 * that is of the same modulation class as the received frame.
6747 *
6748 * As a consequence, we need to add all mandatory rates that are
6749 * lower than all of the basic rates to these bitmaps.
6750 */
6751
6752 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6753 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6754 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6755 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6756 /* 6M already there or needed so always add */
6757 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6758
6759 /*
6760 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6761 * Note, however:
6762 * - if no CCK rates are basic, it must be ERP since there must
6763 * be some basic rates at all, so they're OFDM => ERP PHY
6764 * (or we're in 5 GHz, and the cck bitmap will never be used)
6765 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6766 * - if 5.5M is basic, 1M and 2M are mandatory
6767 * - if 2M is basic, 1M is mandatory
6768 * - if 1M is basic, that's the only valid ACK rate.
6769 * As a consequence, it's not as complicated as it sounds, just add
6770 * any lower rates to the ACK rate bitmap.
6771 */
6772 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6773 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6774 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6775 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6776 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6777 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6778 /* 1M already there or needed so always add */
6779 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6780
6781 *cck_rates = cck;
6782 *ofdm_rates = ofdm;
6783 }
6784
6785 static void
6786 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6787 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6788 {
6789 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6790 struct ieee80211com *ic = &sc->sc_ic;
6791 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6792 struct ieee80211_node *ni = vap->iv_bss;
6793 int cck_ack_rates, ofdm_ack_rates;
6794
6795 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6796 in->in_color));
6797 cmd->action = htole32(action);
6798
6799 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6800 return;
6801
6802 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6803 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6804 else if (ic->ic_opmode == IEEE80211_M_STA)
6805 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6806 else
6807 panic("unsupported operating mode %d", ic->ic_opmode);
6808 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6809
6810 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6811 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6812 ether_sprintf(cmd->node_addr)));
6813 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6814 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6815 return;
6816 }
6817
6818 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6819 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6820 ether_sprintf(cmd->bssid_addr)));
6821 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6822 cmd->cck_rates = htole32(cck_ack_rates);
6823 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6824
6825 cmd->cck_short_preamble
6826 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6827 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6828 cmd->short_slot
6829 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6830 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6831
6832 struct chanAccParams chp;
6833 ieee80211_wme_vap_getparams(vap, &chp);
6834
6835 for (int i = 0; i < WME_NUM_AC; i++) {
6836 int txf = iwx_ac_to_tx_fifo[i];
6837 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6838 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6839 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6840 cmd->ac[txf].fifos_mask = (1 << txf);
6841 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6842
6843 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6844 }
6845
6846 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6847 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6848 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6849 }
6850
6851 if (ni->ni_flags & IEEE80211_NODE_HT) {
6852 switch (vap->iv_curhtprotmode) {
6853 case IEEE80211_HTINFO_OPMODE_PURE:
6854 break;
6855 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6856 case IEEE80211_HTINFO_OPMODE_MIXED:
6857 cmd->protection_flags |=
6858 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6859 IWX_MAC_PROT_FLG_FAT_PROT);
6860 break;
6861 case IEEE80211_HTINFO_OPMODE_HT20PR:
6862 if (in->in_phyctxt &&
6863 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6864 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6865 cmd->protection_flags |=
6866 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6867 IWX_MAC_PROT_FLG_FAT_PROT);
6868 }
6869 break;
6870 default:
6871 break;
6872 }
6873 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6874 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6875 }
6876
6877 if (ic->ic_flags & IEEE80211_F_USEPROT)
6878 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6879 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6880 #undef IWX_EXP2
6881 }
6882
6883 static void
6884 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6885 struct iwx_mac_data_sta *sta, int assoc)
6886 {
6887 struct ieee80211_node *ni = &in->in_ni;
6888 struct ieee80211com *ic = &sc->sc_ic;
6889 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6890 uint32_t dtim_off;
6891 uint64_t tsf;
6892 int dtim_period;
6893
6894 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6895 tsf = le64toh(ni->ni_tstamp.tsf);
6896 dtim_period = vap->iv_dtim_period;
6897
6898 sta->is_assoc = htole32(assoc);
6899
6900 if (assoc) {
6901 sta->dtim_time = htole32(tsf + dtim_off);
6902 sta->dtim_tsf = htole64(tsf + dtim_off);
6903 // XXX: unset in iwm
6904 sta->assoc_beacon_arrive_time = 0;
6905 }
6906 sta->bi = htole32(ni->ni_intval);
6907 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6908 sta->data_policy = htole32(0);
6909 sta->listen_interval = htole32(10);
6910 sta->assoc_id = htole32(ni->ni_associd);
6911 }
6912
6913 static int
6914 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6915 int assoc)
6916 {
6917 struct ieee80211com *ic = &sc->sc_ic;
6918 struct ieee80211_node *ni = &in->in_ni;
6919 struct iwx_mac_ctx_cmd cmd;
6920 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6921
6922 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6923 panic("MAC already added");
6924 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6925 panic("MAC already removed");
6926
6927 memset(&cmd, 0, sizeof(cmd));
6928
6929 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6930
6931 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6932 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6933 sizeof(cmd), &cmd);
6934 }
6935
6936 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6937 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6938 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6939 IWX_MAC_FILTER_ACCEPT_GRP |
6940 IWX_MAC_FILTER_IN_BEACON |
6941 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6942 IWX_MAC_FILTER_IN_CRC32);
6943 // XXX: dtim period is in vap
6944 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6945 /*
6946 * Allow beacons to pass through as long as we are not
6947 * associated or we do not have dtim period information.
6948 */
6949 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6950 }
6951 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6952 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6953 }
6954
6955 static int
6956 iwx_clear_statistics(struct iwx_softc *sc)
6957 {
6958 struct iwx_statistics_cmd scmd = {
6959 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6960 };
6961 struct iwx_host_cmd cmd = {
6962 .id = IWX_STATISTICS_CMD,
6963 .len[0] = sizeof(scmd),
6964 .data[0] = &scmd,
6965 .flags = IWX_CMD_WANT_RESP,
6966 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
6967 };
6968 int err;
6969
6970 err = iwx_send_cmd(sc, &cmd);
6971 if (err)
6972 return err;
6973
6974 iwx_free_resp(sc, &cmd);
6975 return 0;
6976 }
6977
6978 static int
6979 iwx_scan(struct iwx_softc *sc)
6980 {
6981 int err;
6982 err = iwx_umac_scan_v14(sc, 0);
6983
6984 if (err) {
6985 printf("%s: could not initiate scan\n", DEVNAME(sc));
6986 return err;
6987 }
6988 return 0;
6989 }
6990
6991 static int
6992 iwx_bgscan(struct ieee80211com *ic)
6993 {
6994 struct iwx_softc *sc = ic->ic_softc;
6995 int err;
6996
6997 err = iwx_umac_scan_v14(sc, 1);
6998 if (err) {
6999 printf("%s: could not initiate scan\n", DEVNAME(sc));
7000 return err;
7001 }
7002 return 0;
7003 }
7004
7005 static int
7006 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7007 {
7008 int err;
7009
7010 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7011
7012 /*
7013 * Non-QoS frames use the "MGMT" TID and queue.
7014 * Other TIDs and data queues are reserved for QoS data frames.
7015 */
7016 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7017 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7018 if (err) {
7019 printf("%s: could not enable Tx queue %d (error %d)\n",
7020 DEVNAME(sc), sc->first_data_qid, err);
7021 return err;
7022 }
7023
7024 return 0;
7025 }
7026
7027 static int
7028 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7029 {
7030 int err, cmd_ver;
7031
7032 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7033 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7034 IWX_SCD_QUEUE_CONFIG_CMD);
7035 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7036 return 0;
7037
7038 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7039
7040 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7041 IWX_MGMT_TID);
7042 if (err) {
7043 printf("%s: could not disable Tx queue %d (error %d)\n",
7044 DEVNAME(sc), sc->first_data_qid, err);
7045 return err;
7046 }
7047
7048 return 0;
7049 }
7050
7051 static int
7052 iwx_rs_rval2idx(uint8_t rval)
7053 {
7054 /* Firmware expects indices which match our 11g rate set. */
7055 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7056 int i;
7057
7058 for (i = 0; i < rs->rs_nrates; i++) {
7059 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7060 return i;
7061 }
7062
7063 return -1;
7064 }
7065
7066 static uint16_t
7067 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7068 {
7069 uint16_t htrates = 0;
7070 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7071 int i;
7072
7073 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7074 for (i = 0; i < htrs->rs_nrates; i++) {
7075 if (htrs->rs_rates[i] <= 7)
7076 htrates |= (1 << htrs->rs_rates[i]);
7077 }
7078 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7079 for (i = 0; i < htrs->rs_nrates; i++) {
7080 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7081 htrates |= (1 << (htrs->rs_rates[i] - 8));
7082 }
7083 } else
7084 panic(("iwx_rs_ht_rates"));
7085
7086 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7087 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7088
7089 return htrates;
7090 }
7091
7092 uint16_t
7093 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7094 {
7095 uint16_t rx_mcs;
7096 int max_mcs = -1;
7097 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7098 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7099 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7100 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7101 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7102
7103 switch (rx_mcs) {
7104 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7105 break;
7106 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7107 max_mcs = 7;
7108 break;
7109 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7110 max_mcs = 8;
7111 break;
7112 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7113 /* Disable VHT MCS 9 for 20MHz-only stations. */
7114 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7115 max_mcs = 8;
7116 else
7117 max_mcs = 9;
7118 break;
7119 default:
7120 /* Should not happen; Values above cover the possible range. */
7121 panic("invalid VHT Rx MCS value %u", rx_mcs);
7122 }
7123
7124 return ((1 << (max_mcs + 1)) - 1);
7125 }
7126
7127 static int
7128 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7129 {
7130 #if 1
7131 panic("iwx: Trying to init rate set on untested version");
7132 #else
7133 struct ieee80211_node *ni = &in->in_ni;
7134 struct ieee80211_rateset *rs = &ni->ni_rates;
7135 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7136 uint32_t cmd_id;
7137 int i;
7138 size_t cmd_size = sizeof(cfg_cmd);
7139
7140 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7141
7142 for (i = 0; i < rs->rs_nrates; i++) {
7143 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7144 int idx = iwx_rs_rval2idx(rval);
7145 if (idx == -1)
7146 return EINVAL;
7147 cfg_cmd.non_ht_rates |= (1 << idx);
7148 }
7149
7150 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7151 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7152 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7153 htole16(iwx_rs_vht_rates(sc, ni, 1));
7154 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7155 htole16(iwx_rs_vht_rates(sc, ni, 2));
7156 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7157 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7158 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7159 htole16(iwx_rs_ht_rates(sc, ni,
7160 IEEE80211_HT_RATESET_SISO));
7161 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7162 htole16(iwx_rs_ht_rates(sc, ni,
7163 IEEE80211_HT_RATESET_MIMO2));
7164 } else
7165 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7166
7167 cfg_cmd.sta_id = IWX_STATION_ID;
7168 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7169 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7170 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7171 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7172 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7173 else
7174 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7175 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7176 if (ni->ni_flags & IEEE80211_NODE_VHT)
7177 cfg_cmd.max_mpdu_len = htole16(3895);
7178 else
7179 cfg_cmd.max_mpdu_len = htole16(3839);
7180 if (ni->ni_flags & IEEE80211_NODE_HT) {
7181 if (ieee80211_node_supports_ht_sgi20(ni)) {
7182 cfg_cmd.sgi_ch_width_supp |= (1 <<
7183 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7184 }
7185 if (ieee80211_node_supports_ht_sgi40(ni)) {
7186 cfg_cmd.sgi_ch_width_supp |= (1 <<
7187 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7188 }
7189 }
7190 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7191 ieee80211_node_supports_vht_sgi80(ni))
7192 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7193
7194 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7195 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7196 #endif
7197 }
7198
7199 static int
7200 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7201 {
7202 struct ieee80211_node *ni = &in->in_ni;
7203 struct ieee80211_rateset *rs = &ni->ni_rates;
7204 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7205 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7206 uint32_t cmd_id;
7207 int i;
7208 int sgi80 = 0;
7209 size_t cmd_size = sizeof(cfg_cmd);
7210
7211 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7212
7213 for (i = 0; i < rs->rs_nrates; i++) {
7214 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7215 int idx = iwx_rs_rval2idx(rval);
7216 if (idx == -1)
7217 return EINVAL;
7218 cfg_cmd.non_ht_rates |= (1 << idx);
7219 }
7220 for (i = 0; i < htrs->rs_nrates; i++) {
7221 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7222 }
7223
7224 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7225 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7226 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7227 htole16(iwx_rs_vht_rates(sc, ni, 1));
7228 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7229 htole16(iwx_rs_vht_rates(sc, ni, 2));
7230
7231 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7232 __func__, __LINE__,
7233 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7234 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7235 __func__, __LINE__,
7236 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7237 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7238 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7239 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7240 htole16(iwx_rs_ht_rates(sc, ni,
7241 IEEE80211_HT_RATESET_SISO));
7242 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7243 htole16(iwx_rs_ht_rates(sc, ni,
7244 IEEE80211_HT_RATESET_MIMO2));
7245
7246 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7247 __func__, __LINE__,
7248 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7249 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7250 __func__, __LINE__,
7251 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7252 } else
7253 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7254
7255 cfg_cmd.sta_id = IWX_STATION_ID;
7256 #if 0
7257 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7258 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7259 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7260 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7261 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7262 else
7263 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7264 #endif
7265 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7266 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7267 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7268 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7269 } else {
7270 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7271 }
7272
7273 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7274 if (ni->ni_flags & IEEE80211_NODE_VHT)
7275 cfg_cmd.max_mpdu_len = htole16(3895);
7276 else
7277 cfg_cmd.max_mpdu_len = htole16(3839);
7278 if (ni->ni_flags & IEEE80211_NODE_HT) {
7279 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7280 cfg_cmd.sgi_ch_width_supp |= (1 <<
7281 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7282 }
7283 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7284 cfg_cmd.sgi_ch_width_supp |= (1 <<
7285 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7286 }
7287 }
7288 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7289 IEEE80211_VHTCAP_SHORT_GI_80);
7290 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7291 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7292 }
7293
7294 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7295 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7296 }
7297
7298 static int
7299 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7300 {
7301 int cmd_ver;
7302
7303 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7304 IWX_TLC_MNG_CONFIG_CMD);
7305 if (cmd_ver == 4)
7306 return iwx_rs_init_v4(sc, in);
7307 else
7308 return iwx_rs_init_v3(sc, in);
7309 }
7310
7311 static void
7312 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7313 {
7314 struct ieee80211com *ic = &sc->sc_ic;
7315 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7316 struct ieee80211_node *ni = (void *)vap->iv_bss;
7317
7318 struct ieee80211_rateset *rs = &ni->ni_rates;
7319 uint32_t rate_n_flags;
7320 uint8_t plcp, rval;
7321 int i, cmd_ver, rate_n_flags_ver2 = 0;
7322
7323 if (notif->sta_id != IWX_STATION_ID ||
7324 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7325 return;
7326
7327 rate_n_flags = le32toh(notif->rate);
7328
7329 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7330 print_ratenflags(__func__, __LINE__,
7331 rate_n_flags, sc->sc_rate_n_flags_version);
7332
7333 cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7334 IWX_TLC_MNG_UPDATE_NOTIF);
7335 if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7336 rate_n_flags_ver2 = 1;
7337
7338 if (rate_n_flags_ver2) {
7339 uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7340 if (mod_type == IWX_RATE_MCS_HT_MSK) {
7341
7342 ieee80211_node_set_txrate_dot11rate(ni,
7343 IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
7344 IEEE80211_RATE_MCS);
7345 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7346 "%s:%d new MCS: %d rate_n_flags: %x\n",
7347 __func__, __LINE__,
7348 ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
7349 rate_n_flags);
7350 return;
7351 }
7352 } else {
7353 if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7354 ieee80211_node_set_txrate_dot11rate(ni,
7355 rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7356 IWX_RATE_HT_MCS_NSS_MSK_V1));
7357
7358 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7359 "%s:%d new MCS idx: %d rate_n_flags: %x\n",
7360 __func__, __LINE__,
7361 ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
7362 return;
7363 }
7364 }
7365
7366 if (rate_n_flags_ver2) {
7367 const struct ieee80211_rateset *rs;
7368 uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7369 if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7370 rs = &ieee80211_std_rateset_11a;
7371 else
7372 rs = &ieee80211_std_rateset_11b;
7373 if (ridx < rs->rs_nrates)
7374 rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7375 else
7376 rval = 0;
7377 } else {
7378 plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7379
7380 rval = 0;
7381 for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7382 if (iwx_rates[i].plcp == plcp) {
7383 rval = iwx_rates[i].rate;
7384 break;
7385 }
7386 }
7387 }
7388
7389 if (rval) {
7390 uint8_t rv;
7391 for (i = 0; i < rs->rs_nrates; i++) {
7392 rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7393 if (rv == rval) {
7394 ieee80211_node_set_txrate_dot11rate(ni, i);
7395 break;
7396 }
7397 }
7398 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7399 "%s:%d new rate %d\n", __func__, __LINE__,
7400 ieee80211_node_get_txrate_dot11rate(ni));
7401 }
7402 }
7403
7404 static int
7405 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7406 uint8_t chains_static, uint8_t chains_dynamic)
7407 {
7408 struct iwx_rlc_config_cmd cmd;
7409 uint32_t cmd_id;
7410 uint8_t active_cnt, idle_cnt;
7411
7412 memset(&cmd, 0, sizeof(cmd));
7413
7414 idle_cnt = chains_static;
7415 active_cnt = chains_dynamic;
7416
7417 cmd.phy_id = htole32(phyctxt->id);
7418 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7419 IWX_PHY_RX_CHAIN_VALID_POS);
7420 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7421 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7422 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7423
7424 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7425 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7426 }
7427
7428 static int
7429 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7430 struct ieee80211_channel *chan, uint8_t chains_static,
7431 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7432 uint8_t vht_chan_width)
7433 {
7434 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7435 int err;
7436
7437 if (chan == IEEE80211_CHAN_ANYC) {
7438 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7439 DEVNAME(sc));
7440 return EIO;
7441 }
7442
7443 if (isset(sc->sc_enabled_capa,
7444 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7445 (phyctxt->channel->ic_flags & band_flags) !=
7446 (chan->ic_flags & band_flags)) {
7447 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7448 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7449 vht_chan_width);
7450 if (err) {
7451 printf("%s: could not remove PHY context "
7452 "(error %d)\n", DEVNAME(sc), err);
7453 return err;
7454 }
7455 phyctxt->channel = chan;
7456 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7457 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7458 vht_chan_width);
7459 if (err) {
7460 printf("%s: could not add PHY context "
7461 "(error %d)\n", DEVNAME(sc), err);
7462 return err;
7463 }
7464 } else {
7465 phyctxt->channel = chan;
7466 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7467 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7468 vht_chan_width);
7469 if (err) {
7470 printf("%s: could not update PHY context (error %d)\n",
7471 DEVNAME(sc), err);
7472 return err;
7473 }
7474 }
7475
7476 phyctxt->sco = sco;
7477 phyctxt->vht_chan_width = vht_chan_width;
7478
7479 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7480 phyctxt->channel->ic_ieee));
7481 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7482 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7483 phyctxt->vht_chan_width));
7484
7485 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7486 IWX_RLC_CONFIG_CMD) == 2)
7487 return iwx_phy_send_rlc(sc, phyctxt,
7488 chains_static, chains_dynamic);
7489
7490 return 0;
7491 }
7492
7493 static int
7494 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7495 {
7496 struct ieee80211com *ic = &sc->sc_ic;
7497 struct iwx_node *in;
7498 struct iwx_vap *ivp = IWX_VAP(vap);
7499 struct ieee80211_node *ni;
7500 uint32_t duration;
7501 int generation = sc->sc_generation, err;
7502
7503 IWX_ASSERT_LOCKED(sc);
7504
7505 ni = ieee80211_ref_node(vap->iv_bss);
7506 in = IWX_NODE(ni);
7507
7508 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7509 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7510 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7511 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7512 if (err)
7513 return err;
7514 } else {
7515 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7516 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7517 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7518 if (err)
7519 return err;
7520 }
7521 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7522 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7523 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7524 ether_sprintf(in->in_macaddr)));
7525
7526 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7527 if (err) {
7528 printf("%s: could not add MAC context (error %d)\n",
7529 DEVNAME(sc), err);
7530 return err;
7531 }
7532 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7533
7534 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7535 if (err) {
7536 printf("%s: could not add binding (error %d)\n",
7537 DEVNAME(sc), err);
7538 goto rm_mac_ctxt;
7539 }
7540 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7541
7542 err = iwx_add_sta_cmd(sc, in, 0);
7543 if (err) {
7544 printf("%s: could not add sta (error %d)\n",
7545 DEVNAME(sc), err);
7546 goto rm_binding;
7547 }
7548 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7549
7550 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7551 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7552 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7553 IWX_TX_RING_COUNT);
7554 if (err)
7555 goto rm_sta;
7556 return 0;
7557 }
7558
7559 err = iwx_enable_mgmt_queue(sc);
7560 if (err)
7561 goto rm_sta;
7562
7563 err = iwx_clear_statistics(sc);
7564 if (err)
7565 goto rm_mgmt_queue;
7566
7567 /*
7568 * Prevent the FW from wandering off channel during association
7569 * by "protecting" the session with a time event.
7570 */
7571 if (in->in_ni.ni_intval)
7572 duration = in->in_ni.ni_intval * 9;
7573 else
7574 duration = 900;
7575 return iwx_schedule_session_protection(sc, in, duration);
7576
7577 rm_mgmt_queue:
7578 if (generation == sc->sc_generation)
7579 iwx_disable_mgmt_queue(sc);
7580 rm_sta:
7581 if (generation == sc->sc_generation) {
7582 iwx_rm_sta_cmd(sc, in);
7583 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7584 }
7585 rm_binding:
7586 if (generation == sc->sc_generation) {
7587 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7588 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7589 }
7590 rm_mac_ctxt:
7591 if (generation == sc->sc_generation) {
7592 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7593 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7594 }
7595 return err;
7596 }
7597
7598 static int
7599 iwx_deauth(struct iwx_softc *sc)
7600 {
7601 struct ieee80211com *ic = &sc->sc_ic;
7602 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7603 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7604 int err;
7605
7606 IWX_ASSERT_LOCKED(sc);
7607
7608 iwx_unprotect_session(sc, in);
7609
7610 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7611 err = iwx_rm_sta(sc, in);
7612 if (err)
7613 return err;
7614 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7615 }
7616
7617 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7618 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7619 if (err) {
7620 printf("%s: could not remove binding (error %d)\n",
7621 DEVNAME(sc), err);
7622 return err;
7623 }
7624 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7625 }
7626
7627 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7628 IWX_FLAG_MAC_ACTIVE));
7629 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7630 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7631 if (err) {
7632 printf("%s: could not remove MAC context (error %d)\n",
7633 DEVNAME(sc), err);
7634 return err;
7635 }
7636 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7637 }
7638
7639 /* Move unused PHY context to a default channel. */
7640 //TODO uncommented in obsd, but stays on the way of auth->auth
7641 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7642 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7643 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7644 if (err)
7645 return err;
7646
7647 return 0;
7648 }
7649
7650 static int
7651 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7652 {
7653 struct ieee80211com *ic = &sc->sc_ic;
7654 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7655 struct ieee80211_node *ni = &in->in_ni;
7656 struct iwx_vap *ivp = IWX_VAP(vap);
7657 int err;
7658
7659 IWX_ASSERT_LOCKED(sc);
7660
7661 if (ni->ni_flags & IEEE80211_NODE_HT) {
7662 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7663 uint8_t sco, vht_chan_width;
7664 sco = IEEE80211_HTOP0_SCO_SCN;
7665 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7666 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7667 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7668 else
7669 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7670 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7671 ivp->phy_ctxt->channel, chains, chains,
7672 0, sco, vht_chan_width);
7673 if (err) {
7674 printf("%s: failed to update PHY\n", DEVNAME(sc));
7675 return err;
7676 }
7677 }
7678
7679 /* Update STA again to apply HT and VHT settings. */
7680 err = iwx_add_sta_cmd(sc, in, 1);
7681 if (err) {
7682 printf("%s: could not update STA (error %d)\n",
7683 DEVNAME(sc), err);
7684 return err;
7685 }
7686
7687 /* We have now been assigned an associd by the AP. */
7688 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7689 if (err) {
7690 printf("%s: failed to update MAC\n", DEVNAME(sc));
7691 return err;
7692 }
7693
7694 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7695 if (err) {
7696 printf("%s: could not set sf full on (error %d)\n",
7697 DEVNAME(sc), err);
7698 return err;
7699 }
7700
7701 err = iwx_allow_mcast(sc);
7702 if (err) {
7703 printf("%s: could not allow mcast (error %d)\n",
7704 DEVNAME(sc), err);
7705 return err;
7706 }
7707
7708 err = iwx_power_update_device(sc);
7709 if (err) {
7710 printf("%s: could not send power command (error %d)\n",
7711 DEVNAME(sc), err);
7712 return err;
7713 }
7714 #ifdef notyet
7715 /*
7716 * Disabled for now. Default beacon filter settings
7717 * prevent net80211 from getting ERP and HT protection
7718 * updates from beacons.
7719 */
7720 err = iwx_enable_beacon_filter(sc, in);
7721 if (err) {
7722 printf("%s: could not enable beacon filter\n",
7723 DEVNAME(sc));
7724 return err;
7725 }
7726 #endif
7727 err = iwx_power_mac_update_mode(sc, in);
7728 if (err) {
7729 printf("%s: could not update MAC power (error %d)\n",
7730 DEVNAME(sc), err);
7731 return err;
7732 }
7733
7734 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7735 return 0;
7736
7737 err = iwx_rs_init(sc, in);
7738 if (err) {
7739 printf("%s: could not init rate scaling (error %d)\n",
7740 DEVNAME(sc), err);
7741 return err;
7742 }
7743
7744 return 0;
7745 }
7746
7747 static int
7748 iwx_run_stop(struct iwx_softc *sc)
7749 {
7750 struct ieee80211com *ic = &sc->sc_ic;
7751 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7752 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7753 struct ieee80211_node *ni = &in->in_ni;
7754 int err, i;
7755
7756 IWX_ASSERT_LOCKED(sc);
7757
7758 err = iwx_flush_sta(sc, in);
7759 if (err) {
7760 printf("%s: could not flush Tx path (error %d)\n",
7761 DEVNAME(sc), err);
7762 return err;
7763 }
7764
7765 /*
7766 * Stop Rx BA sessions now. We cannot rely on the BA task
7767 * for this when moving out of RUN state since it runs in a
7768 * separate thread.
7769 * Note that in->in_ni (struct ieee80211_node) already represents
7770 * our new access point in case we are roaming between APs.
7771 * This means we cannot rely on struct ieee802111_node to tell
7772 * us which BA sessions exist.
7773 */
7774 // TODO agg
7775 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7776 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7777 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7778 continue;
7779 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7780 }
7781
7782 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7783 if (err)
7784 return err;
7785
7786 err = iwx_disable_beacon_filter(sc);
7787 if (err) {
7788 printf("%s: could not disable beacon filter (error %d)\n",
7789 DEVNAME(sc), err);
7790 return err;
7791 }
7792
7793 /* Mark station as disassociated. */
7794 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7795 if (err) {
7796 printf("%s: failed to update MAC\n", DEVNAME(sc));
7797 return err;
7798 }
7799
7800 return 0;
7801 }
7802
7803 static struct ieee80211_node *
7804 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7805 {
7806 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7807 M_NOWAIT | M_ZERO);
7808 }
7809
7810 #if 0
7811 int
7812 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7813 struct ieee80211_key *k)
7814 {
7815 struct iwx_softc *sc = ic->ic_softc;
7816 struct iwx_node *in = (void *)ni;
7817 struct iwx_setkey_task_arg *a;
7818 int err;
7819
7820 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7821 /* Fallback to software crypto for other ciphers. */
7822 err = ieee80211_set_key(ic, ni, k);
7823 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7824 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7825 return err;
7826 }
7827
7828 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7829 return ENOSPC;
7830
7831 a = &sc->setkey_arg[sc->setkey_cur];
7832 a->sta_id = IWX_STATION_ID;
7833 a->ni = ni;
7834 a->k = k;
7835 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7836 sc->setkey_nkeys++;
7837 iwx_add_task(sc, systq, &sc->setkey_task);
7838 return EBUSY;
7839 }
7840
7841 int
7842 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7843 struct ieee80211_key *k)
7844 {
7845 struct ieee80211com *ic = &sc->sc_ic;
7846 struct iwx_node *in = (void *)ni;
7847 struct iwx_add_sta_key_cmd cmd;
7848 uint32_t status;
7849 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7850 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7851 int err;
7852
7853 /*
7854 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7855 * Currently we only implement station mode where 'ni' is always
7856 * ic->ic_bss so there is no need to validate arguments beyond this:
7857 */
7858 KASSERT(ni == ic->ic_bss);
7859
7860 memset(&cmd, 0, sizeof(cmd));
7861
7862 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7863 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7864 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7865 IWX_STA_KEY_FLG_KEYID_MSK));
7866 if (k->k_flags & IEEE80211_KEY_GROUP) {
7867 cmd.common.key_offset = 1;
7868 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7869 } else
7870 cmd.common.key_offset = 0;
7871
7872 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7873 cmd.common.sta_id = sta_id;
7874
7875 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7876
7877 status = IWX_ADD_STA_SUCCESS;
7878 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7879 &status);
7880 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7881 return ECANCELED;
7882 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7883 err = EIO;
7884 if (err) {
7885 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7886 IEEE80211_REASON_AUTH_LEAVE);
7887 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7888 return err;
7889 }
7890
7891 if (k->k_flags & IEEE80211_KEY_GROUP)
7892 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7893 else
7894 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7895
7896 if ((in->in_flags & want_keymask) == want_keymask) {
7897 DPRINTF(("marking port %s valid\n",
7898 ether_sprintf(ni->ni_macaddr)));
7899 ni->ni_port_valid = 1;
7900 ieee80211_set_link_state(ic, LINK_STATE_UP);
7901 }
7902
7903 return 0;
7904 }
7905
7906 void
7907 iwx_setkey_task(void *arg)
7908 {
7909 struct iwx_softc *sc = arg;
7910 struct iwx_setkey_task_arg *a;
7911 int err = 0, s = splnet();
7912
7913 while (sc->setkey_nkeys > 0) {
7914 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7915 break;
7916 a = &sc->setkey_arg[sc->setkey_tail];
7917 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7918 a->sta_id = 0;
7919 a->ni = NULL;
7920 a->k = NULL;
7921 sc->setkey_tail = (sc->setkey_tail + 1) %
7922 nitems(sc->setkey_arg);
7923 sc->setkey_nkeys--;
7924 }
7925
7926 refcnt_rele_wake(&sc->task_refs);
7927 splx(s);
7928 }
7929
7930 void
7931 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7932 struct ieee80211_key *k)
7933 {
7934 struct iwx_softc *sc = ic->ic_softc;
7935 struct iwx_add_sta_key_cmd cmd;
7936
7937 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7938 /* Fallback to software crypto for other ciphers. */
7939 ieee80211_delete_key(ic, ni, k);
7940 return;
7941 }
7942
7943 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7944 return;
7945
7946 memset(&cmd, 0, sizeof(cmd));
7947
7948 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7949 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7950 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7951 IWX_STA_KEY_FLG_KEYID_MSK));
7952 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7953 if (k->k_flags & IEEE80211_KEY_GROUP)
7954 cmd.common.key_offset = 1;
7955 else
7956 cmd.common.key_offset = 0;
7957 cmd.common.sta_id = IWX_STATION_ID;
7958
7959 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7960 }
7961 #endif
7962
7963 static int
7964 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
7965 {
7966 struct ieee80211com *ic = vap->iv_ic;
7967 struct iwx_softc *sc = ic->ic_softc;
7968 enum ieee80211_state ostate = vap->iv_state;
7969 int err = 0;
7970
7971 IWX_LOCK(sc);
7972
7973 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
7974 switch (ostate) {
7975 case IEEE80211_S_RUN:
7976 err = iwx_run_stop(sc);
7977 if (err)
7978 goto out;
7979 /* FALLTHROUGH */
7980 case IEEE80211_S_ASSOC:
7981 case IEEE80211_S_AUTH:
7982 if (nstate <= IEEE80211_S_AUTH) {
7983 err = iwx_deauth(sc);
7984 if (err)
7985 goto out;
7986 }
7987 /* FALLTHROUGH */
7988 case IEEE80211_S_SCAN:
7989 case IEEE80211_S_INIT:
7990 default:
7991 break;
7992 }
7993 //
7994 // /* Die now if iwx_stop() was called while we were sleeping. */
7995 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7996 // refcnt_rele_wake(&sc->task_refs);
7997 // splx(s);
7998 // return;
7999 // }
8000 }
8001
8002 switch (nstate) {
8003 case IEEE80211_S_INIT:
8004 break;
8005
8006 case IEEE80211_S_SCAN:
8007 break;
8008
8009 case IEEE80211_S_AUTH:
8010 err = iwx_auth(vap, sc);
8011 break;
8012
8013 case IEEE80211_S_ASSOC:
8014 break;
8015
8016 case IEEE80211_S_RUN:
8017 err = iwx_run(vap, sc);
8018 break;
8019 default:
8020 break;
8021 }
8022
8023 out:
8024 IWX_UNLOCK(sc);
8025
8026 return (err);
8027 }
8028
8029 static int
8030 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8031 {
8032 struct iwx_vap *ivp = IWX_VAP(vap);
8033 struct ieee80211com *ic = vap->iv_ic;
8034 enum ieee80211_state ostate = vap->iv_state;
8035 int err;
8036
8037 /*
8038 * Prevent attempts to transition towards the same state, unless
8039 * we are scanning in which case a SCAN -> SCAN transition
8040 * triggers another scan iteration. And AUTH -> AUTH is needed
8041 * to support band-steering.
8042 */
8043 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8044 nstate != IEEE80211_S_AUTH)
8045 return 0;
8046 IEEE80211_UNLOCK(ic);
8047 err = iwx_newstate_sub(vap, nstate);
8048 IEEE80211_LOCK(ic);
8049 if (err == 0)
8050 err = ivp->iv_newstate(vap, nstate, arg);
8051
8052 return (err);
8053 }
8054
8055 static void
8056 iwx_endscan(struct iwx_softc *sc)
8057 {
8058 struct ieee80211com *ic = &sc->sc_ic;
8059 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8060
8061 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8062 return;
8063
8064 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8065
8066 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8067 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8068 }
8069
8070 /*
8071 * Aging and idle timeouts for the different possible scenarios
8072 * in default configuration
8073 */
8074 static const uint32_t
8075 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8076 {
8077 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8078 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8079 },
8080 {
8081 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8082 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8083 },
8084 {
8085 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8086 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8087 },
8088 {
8089 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8090 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8091 },
8092 {
8093 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8094 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8095 },
8096 };
8097
8098 /*
8099 * Aging and idle timeouts for the different possible scenarios
8100 * in single BSS MAC configuration.
8101 */
8102 static const uint32_t
8103 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8104 {
8105 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8106 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8107 },
8108 {
8109 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8110 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8111 },
8112 {
8113 htole32(IWX_SF_MCAST_AGING_TIMER),
8114 htole32(IWX_SF_MCAST_IDLE_TIMER)
8115 },
8116 {
8117 htole32(IWX_SF_BA_AGING_TIMER),
8118 htole32(IWX_SF_BA_IDLE_TIMER)
8119 },
8120 {
8121 htole32(IWX_SF_TX_RE_AGING_TIMER),
8122 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8123 },
8124 };
8125
8126 static void
8127 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8128 struct ieee80211_node *ni)
8129 {
8130 int i, j, watermark;
8131
8132 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8133
8134 /*
8135 * If we are in association flow - check antenna configuration
8136 * capabilities of the AP station, and choose the watermark accordingly.
8137 */
8138 if (ni) {
8139 if (ni->ni_flags & IEEE80211_NODE_HT) {
8140 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8141 int hasmimo = 0;
8142 for (i = 0; i < htrs->rs_nrates; i++) {
8143 if (htrs->rs_rates[i] > 7) {
8144 hasmimo = 1;
8145 break;
8146 }
8147 }
8148 if (hasmimo)
8149 watermark = IWX_SF_W_MARK_MIMO2;
8150 else
8151 watermark = IWX_SF_W_MARK_SISO;
8152 } else {
8153 watermark = IWX_SF_W_MARK_LEGACY;
8154 }
8155 /* default watermark value for unassociated mode. */
8156 } else {
8157 watermark = IWX_SF_W_MARK_MIMO2;
8158 }
8159 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8160
8161 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8162 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8163 sf_cmd->long_delay_timeouts[i][j] =
8164 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8165 }
8166 }
8167
8168 if (ni) {
8169 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8170 sizeof(iwx_sf_full_timeout));
8171 } else {
8172 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8173 sizeof(iwx_sf_full_timeout_def));
8174 }
8175
8176 }
8177
8178 static int
8179 iwx_sf_config(struct iwx_softc *sc, int new_state)
8180 {
8181 struct ieee80211com *ic = &sc->sc_ic;
8182 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8183 struct ieee80211_node *ni = vap->iv_bss;
8184 struct iwx_sf_cfg_cmd sf_cmd = {
8185 .state = htole32(new_state),
8186 };
8187 int err = 0;
8188
8189 switch (new_state) {
8190 case IWX_SF_UNINIT:
8191 case IWX_SF_INIT_OFF:
8192 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8193 break;
8194 case IWX_SF_FULL_ON:
8195 iwx_fill_sf_command(sc, &sf_cmd, ni);
8196 break;
8197 default:
8198 return EINVAL;
8199 }
8200
8201 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8202 sizeof(sf_cmd), &sf_cmd);
8203 return err;
8204 }
8205
8206 static int
8207 iwx_send_bt_init_conf(struct iwx_softc *sc)
8208 {
8209 struct iwx_bt_coex_cmd bt_cmd;
8210
8211 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8212
8213 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8214 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8215 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8216
8217
8218 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8219 &bt_cmd);
8220 }
8221
8222 static int
8223 iwx_send_soc_conf(struct iwx_softc *sc)
8224 {
8225 struct iwx_soc_configuration_cmd cmd;
8226 int err;
8227 uint32_t cmd_id, flags = 0;
8228
8229 memset(&cmd, 0, sizeof(cmd));
8230
8231 /*
8232 * In VER_1 of this command, the discrete value is considered
8233 * an integer; In VER_2, it's a bitmask. Since we have only 2
8234 * values in VER_1, this is backwards-compatible with VER_2,
8235 * as long as we don't set any other flag bits.
8236 */
8237 if (!sc->sc_integrated) { /* VER_1 */
8238 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8239 } else { /* VER_2 */
8240 uint8_t scan_cmd_ver;
8241 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8242 flags |= (sc->sc_ltr_delay &
8243 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8244 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8245 IWX_SCAN_REQ_UMAC);
8246 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8247 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8248 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8249 }
8250 cmd.flags = htole32(flags);
8251
8252 cmd.latency = htole32(sc->sc_xtal_latency);
8253
8254 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8255 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8256 if (err)
8257 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8258 return err;
8259 }
8260
8261 static int
8262 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8263 {
8264 struct iwx_mcc_update_cmd mcc_cmd;
8265 struct iwx_host_cmd hcmd = {
8266 .id = IWX_MCC_UPDATE_CMD,
8267 .flags = IWX_CMD_WANT_RESP,
8268 .data = { &mcc_cmd },
8269 };
8270 struct iwx_rx_packet *pkt;
8271 struct iwx_mcc_update_resp *resp;
8272 size_t resp_len;
8273 int err;
8274
8275 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8276 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8277 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8278 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8279 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8280 else
8281 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8282
8283 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8284 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8285
8286 err = iwx_send_cmd(sc, &hcmd);
8287 if (err)
8288 return err;
8289
8290 pkt = hcmd.resp_pkt;
8291 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8292 err = EIO;
8293 goto out;
8294 }
8295
8296 resp_len = iwx_rx_packet_payload_len(pkt);
8297 if (resp_len < sizeof(*resp)) {
8298 err = EIO;
8299 goto out;
8300 }
8301
8302 resp = (void *)pkt->data;
8303 if (resp_len != sizeof(*resp) +
8304 resp->n_channels * sizeof(resp->channels[0])) {
8305 err = EIO;
8306 goto out;
8307 }
8308
8309 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8310 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8311
8312 out:
8313 iwx_free_resp(sc, &hcmd);
8314
8315 return err;
8316 }
8317
8318 static int
8319 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8320 {
8321 struct iwx_temp_report_ths_cmd cmd;
8322 int err;
8323
8324 /*
8325 * In order to give responsibility for critical-temperature-kill
8326 * and TX backoff to FW we need to send an empty temperature
8327 * reporting command at init time.
8328 */
8329 memset(&cmd, 0, sizeof(cmd));
8330
8331 err = iwx_send_cmd_pdu(sc,
8332 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8333 0, sizeof(cmd), &cmd);
8334 if (err)
8335 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8336 DEVNAME(sc), err);
8337
8338 return err;
8339 }
8340
8341 static int
8342 iwx_init_hw(struct iwx_softc *sc)
8343 {
8344 struct ieee80211com *ic = &sc->sc_ic;
8345 int err = 0, i;
8346
8347 err = iwx_run_init_mvm_ucode(sc, 0);
8348 if (err)
8349 return err;
8350
8351 if (!iwx_nic_lock(sc))
8352 return EBUSY;
8353
8354 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8355 if (err) {
8356 printf("%s: could not init tx ant config (error %d)\n",
8357 DEVNAME(sc), err);
8358 goto err;
8359 }
8360
8361 if (sc->sc_tx_with_siso_diversity) {
8362 err = iwx_send_phy_cfg_cmd(sc);
8363 if (err) {
8364 printf("%s: could not send phy config (error %d)\n",
8365 DEVNAME(sc), err);
8366 goto err;
8367 }
8368 }
8369
8370 err = iwx_send_bt_init_conf(sc);
8371 if (err) {
8372 printf("%s: could not init bt coex (error %d)\n",
8373 DEVNAME(sc), err);
8374 return err;
8375 }
8376
8377 err = iwx_send_soc_conf(sc);
8378 if (err) {
8379 printf("%s: iwx_send_soc_conf failed\n", __func__);
8380 return err;
8381 }
8382
8383 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8384 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8385 err = iwx_send_dqa_cmd(sc);
8386 if (err) {
8387 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8388 "failed (error %d)\n", __func__, err);
8389 return err;
8390 }
8391 }
8392 // TODO phyctxt
8393 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8394 /*
8395 * The channel used here isn't relevant as it's
8396 * going to be overwritten in the other flows.
8397 * For now use the first channel we have.
8398 */
8399 sc->sc_phyctxt[i].id = i;
8400 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8401 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8402 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8403 if (err) {
8404 printf("%s: could not add phy context %d (error %d)\n",
8405 DEVNAME(sc), i, err);
8406 goto err;
8407 }
8408 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8409 IWX_RLC_CONFIG_CMD) == 2) {
8410 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8411 if (err) {
8412 printf("%s: could not configure RLC for PHY "
8413 "%d (error %d)\n", DEVNAME(sc), i, err);
8414 goto err;
8415 }
8416 }
8417 }
8418
8419 err = iwx_config_ltr(sc);
8420 if (err) {
8421 printf("%s: PCIe LTR configuration failed (error %d)\n",
8422 DEVNAME(sc), err);
8423 }
8424
8425 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8426 err = iwx_send_temp_report_ths_cmd(sc);
8427 if (err) {
8428 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8429 __func__);
8430 goto err;
8431 }
8432 }
8433
8434 err = iwx_power_update_device(sc);
8435 if (err) {
8436 printf("%s: could not send power command (error %d)\n",
8437 DEVNAME(sc), err);
8438 goto err;
8439 }
8440
8441 if (sc->sc_nvm.lar_enabled) {
8442 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8443 if (err) {
8444 printf("%s: could not init LAR (error %d)\n",
8445 DEVNAME(sc), err);
8446 goto err;
8447 }
8448 }
8449
8450 err = iwx_config_umac_scan_reduced(sc);
8451 if (err) {
8452 printf("%s: could not configure scan (error %d)\n",
8453 DEVNAME(sc), err);
8454 goto err;
8455 }
8456
8457 err = iwx_disable_beacon_filter(sc);
8458 if (err) {
8459 printf("%s: could not disable beacon filter (error %d)\n",
8460 DEVNAME(sc), err);
8461 goto err;
8462 }
8463
8464 err:
8465 iwx_nic_unlock(sc);
8466 return err;
8467 }
8468
8469 /* Allow multicast from our BSSID. */
8470 static int
8471 iwx_allow_mcast(struct iwx_softc *sc)
8472 {
8473 struct ieee80211com *ic = &sc->sc_ic;
8474 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8475 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8476 struct iwx_mcast_filter_cmd *cmd;
8477 size_t size;
8478 int err;
8479
8480 size = roundup(sizeof(*cmd), 4);
8481 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8482 if (cmd == NULL)
8483 return ENOMEM;
8484 cmd->filter_own = 1;
8485 cmd->port_id = 0;
8486 cmd->count = 0;
8487 cmd->pass_all = 1;
8488 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8489
8490 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8491 0, size, cmd);
8492 free(cmd, M_DEVBUF);
8493 return err;
8494 }
8495
8496 static int
8497 iwx_init(struct iwx_softc *sc)
8498 {
8499 int err, generation;
8500 generation = ++sc->sc_generation;
8501 iwx_preinit(sc);
8502
8503 err = iwx_start_hw(sc);
8504 if (err) {
8505 printf("%s: iwx_start_hw failed\n", __func__);
8506 return err;
8507 }
8508
8509 err = iwx_init_hw(sc);
8510 if (err) {
8511 if (generation == sc->sc_generation)
8512 iwx_stop_device(sc);
8513 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8514 return err;
8515 }
8516
8517 sc->sc_flags |= IWX_FLAG_HW_INITED;
8518 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8519
8520 return 0;
8521 }
8522
8523 static void
8524 iwx_start(struct iwx_softc *sc)
8525 {
8526 struct ieee80211_node *ni;
8527 struct mbuf *m;
8528
8529 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8530 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8531 if (iwx_tx(sc, m, ni) != 0) {
8532 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8533 continue;
8534 }
8535 }
8536 }
8537
8538 static void
8539 iwx_stop(struct iwx_softc *sc)
8540 {
8541 struct ieee80211com *ic = &sc->sc_ic;
8542 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8543 struct iwx_vap *ivp = IWX_VAP(vap);
8544
8545 iwx_stop_device(sc);
8546
8547 /* Reset soft state. */
8548 sc->sc_generation++;
8549 ivp->phy_ctxt = NULL;
8550
8551 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8552 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8553 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8554 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8555 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8556 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8557 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8558 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8559
8560 sc->sc_rx_ba_sessions = 0;
8561 sc->ba_rx.start_tidmask = 0;
8562 sc->ba_rx.stop_tidmask = 0;
8563 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8564 sc->ba_tx.start_tidmask = 0;
8565 sc->ba_tx.stop_tidmask = 0;
8566 }
8567
8568 static void
8569 iwx_watchdog(void *arg)
8570 {
8571 struct iwx_softc *sc = arg;
8572 struct ieee80211com *ic = &sc->sc_ic;
8573 int i;
8574
8575 /*
8576 * We maintain a separate timer for each Tx queue because
8577 * Tx aggregation queues can get "stuck" while other queues
8578 * keep working. The Linux driver uses a similar workaround.
8579 */
8580 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8581 if (sc->sc_tx_timer[i] > 0) {
8582 if (--sc->sc_tx_timer[i] == 0) {
8583 printf("%s: device timeout\n", DEVNAME(sc));
8584
8585 iwx_nic_error(sc);
8586 iwx_dump_driver_status(sc);
8587 ieee80211_restart_all(ic);
8588 return;
8589 }
8590 }
8591 }
8592 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8593 }
8594
8595 /*
8596 * Note: This structure is read from the device with IO accesses,
8597 * and the reading already does the endian conversion. As it is
8598 * read with uint32_t-sized accesses, any members with a different size
8599 * need to be ordered correctly though!
8600 */
8601 struct iwx_error_event_table {
8602 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8603 uint32_t error_id; /* type of error */
8604 uint32_t trm_hw_status0; /* TRM HW status */
8605 uint32_t trm_hw_status1; /* TRM HW status */
8606 uint32_t blink2; /* branch link */
8607 uint32_t ilink1; /* interrupt link */
8608 uint32_t ilink2; /* interrupt link */
8609 uint32_t data1; /* error-specific data */
8610 uint32_t data2; /* error-specific data */
8611 uint32_t data3; /* error-specific data */
8612 uint32_t bcon_time; /* beacon timer */
8613 uint32_t tsf_low; /* network timestamp function timer */
8614 uint32_t tsf_hi; /* network timestamp function timer */
8615 uint32_t gp1; /* GP1 timer register */
8616 uint32_t gp2; /* GP2 timer register */
8617 uint32_t fw_rev_type; /* firmware revision type */
8618 uint32_t major; /* uCode version major */
8619 uint32_t minor; /* uCode version minor */
8620 uint32_t hw_ver; /* HW Silicon version */
8621 uint32_t brd_ver; /* HW board version */
8622 uint32_t log_pc; /* log program counter */
8623 uint32_t frame_ptr; /* frame pointer */
8624 uint32_t stack_ptr; /* stack pointer */
8625 uint32_t hcmd; /* last host command header */
8626 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8627 * rxtx_flag */
8628 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8629 * host_flag */
8630 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8631 * enc_flag */
8632 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8633 * time_flag */
8634 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8635 * wico interrupt */
8636 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8637 uint32_t wait_event; /* wait event() caller address */
8638 uint32_t l2p_control; /* L2pControlField */
8639 uint32_t l2p_duration; /* L2pDurationField */
8640 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8641 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8642 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8643 * (LMPM_PMG_SEL) */
8644 uint32_t u_timestamp; /* indicate when the date and time of the
8645 * compilation */
8646 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8647 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8648
8649 /*
8650 * UMAC error struct - relevant starting from family 8000 chip.
8651 * Note: This structure is read from the device with IO accesses,
8652 * and the reading already does the endian conversion. As it is
8653 * read with u32-sized accesses, any members with a different size
8654 * need to be ordered correctly though!
8655 */
8656 struct iwx_umac_error_event_table {
8657 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8658 uint32_t error_id; /* type of error */
8659 uint32_t blink1; /* branch link */
8660 uint32_t blink2; /* branch link */
8661 uint32_t ilink1; /* interrupt link */
8662 uint32_t ilink2; /* interrupt link */
8663 uint32_t data1; /* error-specific data */
8664 uint32_t data2; /* error-specific data */
8665 uint32_t data3; /* error-specific data */
8666 uint32_t umac_major;
8667 uint32_t umac_minor;
8668 uint32_t frame_pointer; /* core register 27*/
8669 uint32_t stack_pointer; /* core register 28 */
8670 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8671 uint32_t nic_isr_pref; /* ISR status register */
8672 } __packed;
8673
8674 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8675 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8676
8677 static void
8678 iwx_nic_umac_error(struct iwx_softc *sc)
8679 {
8680 struct iwx_umac_error_event_table table;
8681 uint32_t base;
8682
8683 base = sc->sc_uc.uc_umac_error_event_table;
8684
8685 if (base < 0x400000) {
8686 printf("%s: Invalid error log pointer 0x%08x\n",
8687 DEVNAME(sc), base);
8688 return;
8689 }
8690
8691 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8692 printf("%s: reading errlog failed\n", DEVNAME(sc));
8693 return;
8694 }
8695
8696 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8697 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8698 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8699 sc->sc_flags, table.valid);
8700 }
8701
8702 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8703 iwx_desc_lookup(table.error_id));
8704 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8705 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8706 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8707 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8708 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8709 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8710 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8711 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8712 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8713 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8714 table.frame_pointer);
8715 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8716 table.stack_pointer);
8717 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8718 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8719 table.nic_isr_pref);
8720 }
8721
8722 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8723 static struct {
8724 const char *name;
8725 uint8_t num;
8726 } advanced_lookup[] = {
8727 { "NMI_INTERRUPT_WDG", 0x34 },
8728 { "SYSASSERT", 0x35 },
8729 { "UCODE_VERSION_MISMATCH", 0x37 },
8730 { "BAD_COMMAND", 0x38 },
8731 { "BAD_COMMAND", 0x39 },
8732 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8733 { "FATAL_ERROR", 0x3D },
8734 { "NMI_TRM_HW_ERR", 0x46 },
8735 { "NMI_INTERRUPT_TRM", 0x4C },
8736 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8737 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8738 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8739 { "NMI_INTERRUPT_HOST", 0x66 },
8740 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8741 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8742 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8743 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8744 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8745 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8746 { "ADVANCED_SYSASSERT", 0 },
8747 };
8748
8749 static const char *
8750 iwx_desc_lookup(uint32_t num)
8751 {
8752 int i;
8753
8754 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8755 if (advanced_lookup[i].num ==
8756 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8757 return advanced_lookup[i].name;
8758
8759 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8760 return advanced_lookup[i].name;
8761 }
8762
8763 /*
8764 * Support for dumping the error log seemed like a good idea ...
8765 * but it's mostly hex junk and the only sensible thing is the
8766 * hw/ucode revision (which we know anyway). Since it's here,
8767 * I'll just leave it in, just in case e.g. the Intel guys want to
8768 * help us decipher some "ADVANCED_SYSASSERT" later.
8769 */
8770 static void
8771 iwx_nic_error(struct iwx_softc *sc)
8772 {
8773 struct iwx_error_event_table table;
8774 uint32_t base;
8775
8776 printf("%s: dumping device error log\n", DEVNAME(sc));
8777 printf("%s: GOS-3758: 1\n", __func__);
8778 base = sc->sc_uc.uc_lmac_error_event_table[0];
8779 printf("%s: GOS-3758: 2\n", __func__);
8780 if (base < 0x400000) {
8781 printf("%s: Invalid error log pointer 0x%08x\n",
8782 DEVNAME(sc), base);
8783 return;
8784 }
8785
8786 printf("%s: GOS-3758: 3\n", __func__);
8787 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8788 printf("%s: reading errlog failed\n", DEVNAME(sc));
8789 return;
8790 }
8791
8792 printf("%s: GOS-3758: 4\n", __func__);
8793 if (!table.valid) {
8794 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8795 return;
8796 }
8797
8798 printf("%s: GOS-3758: 5\n", __func__);
8799 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8800 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8801 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8802 sc->sc_flags, table.valid);
8803 }
8804
8805 printf("%s: GOS-3758: 6\n", __func__);
8806 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8807 iwx_desc_lookup(table.error_id));
8808 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8809 table.trm_hw_status0);
8810 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8811 table.trm_hw_status1);
8812 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8813 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8814 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8815 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8816 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8817 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8818 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8819 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8820 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8821 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8822 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8823 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8824 table.fw_rev_type);
8825 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8826 table.major);
8827 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8828 table.minor);
8829 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8830 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8831 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8832 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8833 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8834 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8835 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8836 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8837 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8838 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8839 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8840 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8841 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8842 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8843 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8844 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8845 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8846
8847 if (sc->sc_uc.uc_umac_error_event_table)
8848 iwx_nic_umac_error(sc);
8849 }
8850
8851 static void
8852 iwx_dump_driver_status(struct iwx_softc *sc)
8853 {
8854 struct ieee80211com *ic = &sc->sc_ic;
8855 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8856 enum ieee80211_state state = vap->iv_state;
8857 int i;
8858
8859 printf("driver status:\n");
8860 for (i = 0; i < nitems(sc->txq); i++) {
8861 struct iwx_tx_ring *ring = &sc->txq[i];
8862 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8863 "cur_hw=%-3d queued=%-3d\n",
8864 i, ring->qid, ring->cur, ring->cur_hw,
8865 ring->queued);
8866 }
8867 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8868 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8869 }
8870
8871 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8872 do { \
8873 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8874 _var_ = (void *)((_pkt_)+1); \
8875 } while (/*CONSTCOND*/0)
8876
8877 static int
8878 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8879 {
8880 int qid, idx, code;
8881
8882 qid = pkt->hdr.qid & ~0x80;
8883 idx = pkt->hdr.idx;
8884 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8885
8886 return (!(qid == 0 && idx == 0 && code == 0) &&
8887 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8888 }
8889
8890 static void
8891 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8892 {
8893 struct ieee80211com *ic = &sc->sc_ic;
8894 struct iwx_rx_packet *pkt, *nextpkt;
8895 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8896 struct mbuf *m0, *m;
8897 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8898 int qid, idx, code, handled = 1;
8899
8900 m0 = data->m;
8901 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8902 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8903 qid = pkt->hdr.qid;
8904 idx = pkt->hdr.idx;
8905 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8906
8907 if (!iwx_rx_pkt_valid(pkt))
8908 break;
8909
8910 /*
8911 * XXX Intel inside (tm)
8912 * Any commands in the LONG_GROUP could actually be in the
8913 * LEGACY group. Firmware API versions >= 50 reject commands
8914 * in group 0, forcing us to use this hack.
8915 */
8916 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8917 struct iwx_tx_ring *ring = &sc->txq[qid];
8918 struct iwx_tx_data *txdata = &ring->data[idx];
8919 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8920 code = iwx_cmd_opcode(code);
8921 }
8922
8923 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8924 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8925 break;
8926
8927 // TODO ???
8928 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8929 /* Take mbuf m0 off the RX ring. */
8930 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8931 break;
8932 }
8933 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8934 }
8935
8936 switch (code) {
8937 case IWX_REPLY_RX_PHY_CMD:
8938 /* XXX-THJ: I've not managed to hit this path in testing */
8939 iwx_rx_rx_phy_cmd(sc, pkt, data);
8940 break;
8941
8942 case IWX_REPLY_RX_MPDU_CMD: {
8943 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8944 nextoff = offset +
8945 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8946 nextpkt = (struct iwx_rx_packet *)
8947 (m0->m_data + nextoff);
8948 /* AX210 devices ship only one packet per Rx buffer. */
8949 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
8950 nextoff + minsz >= IWX_RBUF_SIZE ||
8951 !iwx_rx_pkt_valid(nextpkt)) {
8952 /* No need to copy last frame in buffer. */
8953 if (offset > 0)
8954 m_adj(m0, offset);
8955 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
8956 m0 = NULL; /* stack owns m0 now; abort loop */
8957 } else {
8958 /*
8959 * Create an mbuf which points to the current
8960 * packet. Always copy from offset zero to
8961 * preserve m_pkthdr.
8962 */
8963 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
8964 if (m == NULL) {
8965 m_freem(m0);
8966 m0 = NULL;
8967 break;
8968 }
8969 m_adj(m, offset);
8970 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
8971 }
8972 break;
8973 }
8974
8975 // case IWX_BAR_FRAME_RELEASE:
8976 // iwx_rx_bar_frame_release(sc, pkt, ml);
8977 // break;
8978 //
8979 case IWX_TX_CMD:
8980 iwx_rx_tx_cmd(sc, pkt, data);
8981 break;
8982
8983 case IWX_BA_NOTIF:
8984 iwx_rx_compressed_ba(sc, pkt);
8985 break;
8986
8987 case IWX_MISSED_BEACONS_NOTIFICATION:
8988 iwx_rx_bmiss(sc, pkt, data);
8989 DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
8990 __func__));
8991 ieee80211_beacon_miss(ic);
8992 break;
8993
8994 case IWX_MFUART_LOAD_NOTIFICATION:
8995 break;
8996
8997 case IWX_ALIVE: {
8998 struct iwx_alive_resp_v4 *resp4;
8999 struct iwx_alive_resp_v5 *resp5;
9000 struct iwx_alive_resp_v6 *resp6;
9001
9002 DPRINTF(("%s: firmware alive\n", __func__));
9003 sc->sc_uc.uc_ok = 0;
9004
9005 /*
9006 * For v5 and above, we can check the version, for older
9007 * versions we need to check the size.
9008 */
9009 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9010 IWX_ALIVE) == 6) {
9011 SYNC_RESP_STRUCT(resp6, pkt);
9012 if (iwx_rx_packet_payload_len(pkt) !=
9013 sizeof(*resp6)) {
9014 sc->sc_uc.uc_intr = 1;
9015 wakeup(&sc->sc_uc);
9016 break;
9017 }
9018 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9019 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9020 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9021 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9022 sc->sc_uc.uc_log_event_table = le32toh(
9023 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9024 sc->sc_uc.uc_umac_error_event_table = le32toh(
9025 resp6->umac_data.dbg_ptrs.error_info_addr);
9026 sc->sc_sku_id[0] =
9027 le32toh(resp6->sku_id.data[0]);
9028 sc->sc_sku_id[1] =
9029 le32toh(resp6->sku_id.data[1]);
9030 sc->sc_sku_id[2] =
9031 le32toh(resp6->sku_id.data[2]);
9032 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9033 sc->sc_uc.uc_ok = 1;
9034 }
9035 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9036 IWX_ALIVE) == 5) {
9037 SYNC_RESP_STRUCT(resp5, pkt);
9038 if (iwx_rx_packet_payload_len(pkt) !=
9039 sizeof(*resp5)) {
9040 sc->sc_uc.uc_intr = 1;
9041 wakeup(&sc->sc_uc);
9042 break;
9043 }
9044 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9045 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9046 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9047 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9048 sc->sc_uc.uc_log_event_table = le32toh(
9049 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9050 sc->sc_uc.uc_umac_error_event_table = le32toh(
9051 resp5->umac_data.dbg_ptrs.error_info_addr);
9052 sc->sc_sku_id[0] =
9053 le32toh(resp5->sku_id.data[0]);
9054 sc->sc_sku_id[1] =
9055 le32toh(resp5->sku_id.data[1]);
9056 sc->sc_sku_id[2] =
9057 le32toh(resp5->sku_id.data[2]);
9058 if (resp5->status == IWX_ALIVE_STATUS_OK)
9059 sc->sc_uc.uc_ok = 1;
9060 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9061 SYNC_RESP_STRUCT(resp4, pkt);
9062 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9063 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9064 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9065 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9066 sc->sc_uc.uc_log_event_table = le32toh(
9067 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9068 sc->sc_uc.uc_umac_error_event_table = le32toh(
9069 resp4->umac_data.dbg_ptrs.error_info_addr);
9070 if (resp4->status == IWX_ALIVE_STATUS_OK)
9071 sc->sc_uc.uc_ok = 1;
9072 } else
9073 printf("unknown payload version");
9074
9075 sc->sc_uc.uc_intr = 1;
9076 wakeup(&sc->sc_uc);
9077 break;
9078 }
9079
9080 case IWX_STATISTICS_NOTIFICATION: {
9081 struct iwx_notif_statistics *stats;
9082 SYNC_RESP_STRUCT(stats, pkt);
9083 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9084 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9085 break;
9086 }
9087
9088 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9089 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9090 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9091 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9092 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9093 break;
9094
9095 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9096 IWX_CT_KILL_NOTIFICATION): {
9097 struct iwx_ct_kill_notif *notif;
9098 SYNC_RESP_STRUCT(notif, pkt);
9099 printf("%s: device at critical temperature (%u degC), "
9100 "stopping device\n",
9101 DEVNAME(sc), le16toh(notif->temperature));
9102 sc->sc_flags |= IWX_FLAG_HW_ERR;
9103 ieee80211_restart_all(ic);
9104 break;
9105 }
9106
9107 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9108 IWX_SCD_QUEUE_CONFIG_CMD):
9109 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9110 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9111 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9112 IWX_SESSION_PROTECTION_CMD):
9113 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9114 IWX_NVM_GET_INFO):
9115 case IWX_ADD_STA_KEY:
9116 case IWX_PHY_CONFIGURATION_CMD:
9117 case IWX_TX_ANT_CONFIGURATION_CMD:
9118 case IWX_ADD_STA:
9119 case IWX_MAC_CONTEXT_CMD:
9120 case IWX_REPLY_SF_CFG_CMD:
9121 case IWX_POWER_TABLE_CMD:
9122 case IWX_LTR_CONFIG:
9123 case IWX_PHY_CONTEXT_CMD:
9124 case IWX_BINDING_CONTEXT_CMD:
9125 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9126 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9127 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9128 case IWX_REPLY_BEACON_FILTERING_CMD:
9129 case IWX_MAC_PM_POWER_TABLE:
9130 case IWX_TIME_QUOTA_CMD:
9131 case IWX_REMOVE_STA:
9132 case IWX_TXPATH_FLUSH:
9133 case IWX_BT_CONFIG:
9134 case IWX_MCC_UPDATE_CMD:
9135 case IWX_TIME_EVENT_CMD:
9136 case IWX_STATISTICS_CMD:
9137 case IWX_SCD_QUEUE_CFG: {
9138 size_t pkt_len;
9139
9140 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9141 break;
9142
9143 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9144 BUS_DMASYNC_POSTREAD);
9145
9146 pkt_len = sizeof(pkt->len_n_flags) +
9147 iwx_rx_packet_len(pkt);
9148
9149 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9150 pkt_len < sizeof(*pkt) ||
9151 pkt_len > sc->sc_cmd_resp_len[idx]) {
9152 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9153 sc->sc_cmd_resp_pkt[idx] = NULL;
9154 break;
9155 }
9156
9157 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9158 BUS_DMASYNC_POSTREAD);
9159 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9160 break;
9161 }
9162
9163 case IWX_INIT_COMPLETE_NOTIF:
9164 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9165 wakeup(&sc->sc_init_complete);
9166 break;
9167
9168 case IWX_SCAN_COMPLETE_UMAC: {
9169 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9170 struct iwx_umac_scan_complete *notif __attribute__((unused));
9171 SYNC_RESP_STRUCT(notif, pkt);
9172 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9173 notif->status));
9174 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9175 iwx_endscan(sc);
9176 break;
9177 }
9178
9179 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9180 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9181 __func__));
9182 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9183 SYNC_RESP_STRUCT(notif, pkt);
9184 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9185 notif->status));
9186 iwx_endscan(sc);
9187 break;
9188 }
9189
9190 case IWX_MCC_CHUB_UPDATE_CMD: {
9191 struct iwx_mcc_chub_notif *notif;
9192 SYNC_RESP_STRUCT(notif, pkt);
9193 iwx_mcc_update(sc, notif);
9194 break;
9195 }
9196
9197 case IWX_REPLY_ERROR: {
9198 struct iwx_error_resp *resp;
9199 SYNC_RESP_STRUCT(resp, pkt);
9200 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9201 DEVNAME(sc), le32toh(resp->error_type),
9202 resp->cmd_id);
9203 break;
9204 }
9205
9206 case IWX_TIME_EVENT_NOTIFICATION: {
9207 struct iwx_time_event_notif *notif;
9208 uint32_t action;
9209 SYNC_RESP_STRUCT(notif, pkt);
9210
9211 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9212 break;
9213 action = le32toh(notif->action);
9214 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9215 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9216 break;
9217 }
9218
9219 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9220 IWX_SESSION_PROTECTION_NOTIF): {
9221 struct iwx_session_prot_notif *notif;
9222 uint32_t status, start, conf_id;
9223
9224 SYNC_RESP_STRUCT(notif, pkt);
9225
9226 status = le32toh(notif->status);
9227 start = le32toh(notif->start);
9228 conf_id = le32toh(notif->conf_id);
9229 /* Check for end of successful PROTECT_CONF_ASSOC. */
9230 if (status == 1 && start == 0 &&
9231 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9232 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9233 break;
9234 }
9235
9236 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9237 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9238 break;
9239
9240 /*
9241 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9242 * messages. Just ignore them for now.
9243 */
9244 case IWX_DEBUG_LOG_MSG:
9245 break;
9246
9247 case IWX_MCAST_FILTER_CMD:
9248 break;
9249
9250 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9251 break;
9252
9253 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9254 break;
9255
9256 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9257 break;
9258
9259 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9260 IWX_NVM_ACCESS_COMPLETE):
9261 break;
9262
9263 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9264 break; /* happens in monitor mode; ignore for now */
9265
9266 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9267 break;
9268
9269 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9270 IWX_TLC_MNG_UPDATE_NOTIF): {
9271 struct iwx_tlc_update_notif *notif;
9272 SYNC_RESP_STRUCT(notif, pkt);
9273 (void)notif;
9274 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9275 iwx_rs_update(sc, notif);
9276 break;
9277 }
9278
9279 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9280 break;
9281
9282 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9283 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9284 break;
9285
9286 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9287 IWX_PNVM_INIT_COMPLETE):
9288 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9289 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9290 wakeup(&sc->sc_init_complete);
9291 break;
9292
9293 default:
9294 handled = 0;
9295 /* XXX wulf: Get rid of bluetooth-related spam */
9296 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9297 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9298 break;
9299 printf("%s: unhandled firmware response 0x%x/0x%x "
9300 "rx ring %d[%d]\n",
9301 DEVNAME(sc), code, pkt->len_n_flags,
9302 (qid & ~0x80), idx);
9303 break;
9304 }
9305
9306 /*
9307 * uCode sets bit 0x80 when it originates the notification,
9308 * i.e. when the notification is not a direct response to a
9309 * command sent by the driver.
9310 * For example, uCode issues IWX_REPLY_RX when it sends a
9311 * received frame to the driver.
9312 */
9313 if (handled && !(qid & (1 << 7))) {
9314 iwx_cmd_done(sc, qid, idx, code);
9315 }
9316
9317 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9318
9319 /* AX210 devices ship only one packet per Rx buffer. */
9320 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9321 break;
9322 }
9323
9324 if (m0 && m0 != data->m)
9325 m_freem(m0);
9326 }
9327
9328 static void
9329 iwx_notif_intr(struct iwx_softc *sc)
9330 {
9331 struct mbuf m;
9332 uint16_t hw;
9333
9334 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9335 BUS_DMASYNC_POSTREAD);
9336
9337 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9338 uint16_t *status = sc->rxq.stat_dma.vaddr;
9339 hw = le16toh(*status) & 0xfff;
9340 } else
9341 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9342 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9343 while (sc->rxq.cur != hw) {
9344 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9345
9346 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9347 BUS_DMASYNC_POSTREAD);
9348
9349 iwx_rx_pkt(sc, data, &m);
9350 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9351 }
9352
9353 /*
9354 * Tell the firmware what we have processed.
9355 * Seems like the hardware gets upset unless we align the write by 8??
9356 */
9357 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9358 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9359 }
9360
9361 #if 0
9362 int
9363 iwx_intr(void *arg)
9364 {
9365 struct iwx_softc *sc = arg;
9366 struct ieee80211com *ic = &sc->sc_ic;
9367 struct ifnet *ifp = IC2IFP(ic);
9368 int r1, r2, rv = 0;
9369
9370 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9371
9372 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9373 uint32_t *ict = sc->ict_dma.vaddr;
9374 int tmp;
9375
9376 tmp = htole32(ict[sc->ict_cur]);
9377 if (!tmp)
9378 goto out_ena;
9379
9380 /*
9381 * ok, there was something. keep plowing until we have all.
9382 */
9383 r1 = r2 = 0;
9384 while (tmp) {
9385 r1 |= tmp;
9386 ict[sc->ict_cur] = 0;
9387 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9388 tmp = htole32(ict[sc->ict_cur]);
9389 }
9390
9391 /* this is where the fun begins. don't ask */
9392 if (r1 == 0xffffffff)
9393 r1 = 0;
9394
9395 /* i am not expected to understand this */
9396 if (r1 & 0xc0000)
9397 r1 |= 0x8000;
9398 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9399 } else {
9400 r1 = IWX_READ(sc, IWX_CSR_INT);
9401 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9402 goto out;
9403 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9404 }
9405 if (r1 == 0 && r2 == 0) {
9406 goto out_ena;
9407 }
9408
9409 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9410
9411 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9412 #if 0
9413 int i;
9414 /* Firmware has now configured the RFH. */
9415 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9416 iwx_update_rx_desc(sc, &sc->rxq, i);
9417 #endif
9418 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9419 }
9420
9421
9422 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9423 iwx_check_rfkill(sc);
9424 rv = 1;
9425 goto out_ena;
9426 }
9427
9428 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9429 if (ifp->if_flags & IFF_DEBUG) {
9430 iwx_nic_error(sc);
9431 iwx_dump_driver_status(sc);
9432 }
9433 printf("%s: fatal firmware error\n", DEVNAME(sc));
9434 ieee80211_restart_all(ic);
9435 rv = 1;
9436 goto out;
9437
9438 }
9439
9440 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9441 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9442 iwx_stop(sc);
9443 rv = 1;
9444 goto out;
9445 }
9446
9447 /* firmware chunk loaded */
9448 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9449 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9450
9451 sc->sc_fw_chunk_done = 1;
9452 wakeup(&sc->sc_fw);
9453 }
9454
9455 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9456 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9457 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9458 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9459 }
9460 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9461 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9462 }
9463
9464 /* Disable periodic interrupt; we use it as just a one-shot. */
9465 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9466
9467 /*
9468 * Enable periodic interrupt in 8 msec only if we received
9469 * real RX interrupt (instead of just periodic int), to catch
9470 * any dangling Rx interrupt. If it was just the periodic
9471 * interrupt, there was no dangling Rx activity, and no need
9472 * to extend the periodic interrupt; one-shot is enough.
9473 */
9474 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9475 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9476 IWX_CSR_INT_PERIODIC_ENA);
9477
9478 iwx_notif_intr(sc);
9479 }
9480
9481 rv = 1;
9482
9483 out_ena:
9484 iwx_restore_interrupts(sc);
9485 out:
9486 return rv;
9487 }
9488 #endif
9489
9490 static void
9491 iwx_intr_msix(void *arg)
9492 {
9493 struct iwx_softc *sc = arg;
9494 struct ieee80211com *ic = &sc->sc_ic;
9495 uint32_t inta_fh, inta_hw;
9496 int vector = 0;
9497
9498 IWX_LOCK(sc);
9499
9500 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9501 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9502 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9503 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9504 inta_fh &= sc->sc_fh_mask;
9505 inta_hw &= sc->sc_hw_mask;
9506
9507 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9508 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9509 iwx_notif_intr(sc);
9510 }
9511
9512 /* firmware chunk loaded */
9513 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9514 sc->sc_fw_chunk_done = 1;
9515 wakeup(&sc->sc_fw);
9516 }
9517
9518 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9519 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9520 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9521 if (sc->sc_debug) {
9522 iwx_nic_error(sc);
9523 iwx_dump_driver_status(sc);
9524 }
9525 printf("%s: fatal firmware error\n", DEVNAME(sc));
9526 ieee80211_restart_all(ic);
9527 goto out;
9528 }
9529
9530 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9531 iwx_check_rfkill(sc);
9532 }
9533
9534 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9535 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9536 sc->sc_flags |= IWX_FLAG_HW_ERR;
9537 iwx_stop(sc);
9538 goto out;
9539 }
9540
9541 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9542 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9543 "%s:%d WARNING: Skipping rx desc update\n",
9544 __func__, __LINE__);
9545 #if 0
9546 /*
9547 * XXX-THJ: we don't have the dma segment handy. This is hacked
9548 * out in the fc release, return to it if we ever get this
9549 * warning.
9550 */
9551 /* Firmware has now configured the RFH. */
9552 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9553 iwx_update_rx_desc(sc, &sc->rxq, i);
9554 #endif
9555 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9556 }
9557
9558 /*
9559 * Before sending the interrupt the HW disables it to prevent
9560 * a nested interrupt. This is done by writing 1 to the corresponding
9561 * bit in the mask register. After handling the interrupt, it should be
9562 * re-enabled by clearing this bit. This register is defined as
9563 * write 1 clear (W1C) register, meaning that it's being clear
9564 * by writing 1 to the bit.
9565 */
9566 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9567 out:
9568 IWX_UNLOCK(sc);
9569 return;
9570 }
9571
9572 /*
9573 * The device info table below contains device-specific config overrides.
9574 * The most important parameter derived from this table is the name of the
9575 * firmware image to load.
9576 *
9577 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9578 * The "old" table matches devices based on PCI vendor/product IDs only.
9579 * The "new" table extends this with various device parameters derived
9580 * from MAC type, and RF type.
9581 *
9582 * In iwlwifi "old" and "new" tables share the same array, where "old"
9583 * entries contain dummy values for data defined only for "new" entries.
9584 * As of 2022, Linux developers are still in the process of moving entries
9585 * from "old" to "new" style and it looks like this effort has stalled in
9586 * in some work-in-progress state for quite a while. Linux commits moving
9587 * entries from "old" to "new" have at times been reverted due to regressions.
9588 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9589 * devices in the same driver.
9590 *
9591 * Our table below contains mostly "new" entries declared in iwlwifi
9592 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9593 * Other devices are matched based on PCI vendor/product ID as usual,
9594 * unless matching specific PCI subsystem vendor/product IDs is required.
9595 *
9596 * Some "old"-style entries are required to identify the firmware image to use.
9597 * Others might be used to print a specific marketing name into Linux dmesg,
9598 * but we can't be sure whether the corresponding devices would be matched
9599 * correctly in the absence of their entries. So we include them just in case.
9600 */
9601
9602 struct iwx_dev_info {
9603 uint16_t device;
9604 uint16_t subdevice;
9605 uint16_t mac_type;
9606 uint16_t rf_type;
9607 uint8_t mac_step;
9608 uint8_t rf_id;
9609 uint8_t no_160;
9610 uint8_t cores;
9611 uint8_t cdb;
9612 uint8_t jacket;
9613 const struct iwx_device_cfg *cfg;
9614 };
9615
9616 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9617 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9618 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9619 .mac_type = _mac_type, .rf_type = _rf_type, \
9620 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9621 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9622
9623 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9624 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9625 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9626 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9627
9628 /*
9629 * When adding entries to this table keep in mind that entries must
9630 * be listed in the same order as in the Linux driver. Code walks this
9631 * table backwards and uses the first matching entry it finds.
9632 * Device firmware must be available in fw_update(8).
9633 */
9634 static const struct iwx_dev_info iwx_dev_info_table[] = {
9635 /* So with HR */
9636 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9637 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9638 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9639 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9640 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9641 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9642 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9643 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9644 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9645 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9646 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9647 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9648 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9649 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9650 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9651 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9652 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9653 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9654 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9655 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9656 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9657 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9658 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9659 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9660 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9661 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9662 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9663 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9664 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9665 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9666 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9667 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9668 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9669 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9670 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9671 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9672
9673 /* So with GF2 */
9674 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9675 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9676 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9677 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9678 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9679 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9680 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9681 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9682 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9683 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9684 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9685 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9686
9687 /* Qu with Jf, C step */
9688 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9689 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9690 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9691 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9692 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9693 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9694 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9695 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9696 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9697 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9698 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9699 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9700 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9701 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9702 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9703 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9704 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9705 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9706 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9707 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9708 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9709 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9710 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9711 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9712 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9713 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9714 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9715 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9716 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9717 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9718 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9719 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9720 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9721 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9722 IWX_CFG_ANY,
9723 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9724 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9725 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9726 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9727 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9728 IWX_CFG_ANY,
9729 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9730
9731 /* QuZ with Jf */
9732 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9733 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9734 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9735 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9736 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9737 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9738 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9739 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9740 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9741 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9742 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9743 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9744 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9745 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9746 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9747 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9748 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9749 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9750 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9751 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9752 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9753 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9754 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9755 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9756 IWX_CFG_ANY,
9757 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9758 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9759 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9760 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9761 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9762 IWX_CFG_ANY,
9763 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9764
9765 /* Qu with Hr, B step */
9766 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9767 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9768 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9769 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9770 iwx_qu_b0_hr1_b0), /* AX101 */
9771 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9772 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9773 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9774 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9775 iwx_qu_b0_hr_b0), /* AX203 */
9776
9777 /* Qu with Hr, C step */
9778 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9779 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9780 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9781 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9782 iwx_qu_c0_hr1_b0), /* AX101 */
9783 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9784 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9785 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9786 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9787 iwx_qu_c0_hr_b0), /* AX203 */
9788 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9789 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9790 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9791 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9792 iwx_qu_c0_hr_b0), /* AX201 */
9793
9794 /* QuZ with Hr */
9795 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9796 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9797 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9798 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9799 iwx_quz_a0_hr1_b0), /* AX101 */
9800 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9801 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9802 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9803 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9804 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9805
9806 /* SoF with JF2 */
9807 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9808 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9809 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9810 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9811 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9812 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9813 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9814 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9815 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9816 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9817
9818 /* SoF with JF */
9819 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9820 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9821 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9822 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9823 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9824 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9825 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9826 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9827 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9828 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9829 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9830 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9831 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9832 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9833 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9834 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9835 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9836 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9837 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9838 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9839
9840 /* So with Hr */
9841 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9842 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9843 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9844 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9845 iwx_cfg_so_a0_hr_b0), /* AX203 */
9846 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9847 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9848 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9849 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9850 iwx_cfg_so_a0_hr_b0), /* ax101 */
9851 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9852 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9853 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9854 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9855 iwx_cfg_so_a0_hr_b0), /* ax201 */
9856
9857 /* So-F with Hr */
9858 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9859 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9860 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9861 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9862 iwx_cfg_so_a0_hr_b0), /* AX203 */
9863 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9864 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9865 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9866 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9867 iwx_cfg_so_a0_hr_b0), /* AX101 */
9868 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9869 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9870 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9871 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9872 iwx_cfg_so_a0_hr_b0), /* AX201 */
9873
9874 /* So-F with GF */
9875 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9876 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9877 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9878 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9879 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9880 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9881 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9882 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9883 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9884 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9885
9886 /* So with GF */
9887 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9888 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9889 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9890 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9891 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9892 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9893 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9894 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9895 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9896 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9897
9898 /* So with JF2 */
9899 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9900 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9901 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9902 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9903 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9904 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9905 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9906 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9907 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9908 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9909
9910 /* So with JF */
9911 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9912 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9913 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9914 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9915 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9916 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9917 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9918 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9919 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9920 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9921 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9922 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9923 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9924 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9925 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9926 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9927 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9928 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9929 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9930 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9931 };
9932
9933 static int
9934 iwx_preinit(struct iwx_softc *sc)
9935 {
9936 struct ieee80211com *ic = &sc->sc_ic;
9937 int err;
9938
9939 err = iwx_prepare_card_hw(sc);
9940 if (err) {
9941 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9942 return err;
9943 }
9944
9945 if (sc->attached) {
9946 return 0;
9947 }
9948
9949 err = iwx_start_hw(sc);
9950 if (err) {
9951 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9952 return err;
9953 }
9954
9955 err = iwx_run_init_mvm_ucode(sc, 1);
9956 iwx_stop_device(sc);
9957 if (err) {
9958 printf("%s: failed to stop device\n", DEVNAME(sc));
9959 return err;
9960 }
9961
9962 /* Print version info and MAC address on first successful fw load. */
9963 sc->attached = 1;
9964 if (sc->sc_pnvm_ver) {
9965 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
9966 "address %s\n",
9967 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9968 sc->sc_fwver, sc->sc_pnvm_ver,
9969 ether_sprintf(sc->sc_nvm.hw_addr));
9970 } else {
9971 printf("%s: hw rev 0x%x, fw %s, address %s\n",
9972 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9973 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9974 }
9975
9976 /* not all hardware can do 5GHz band */
9977 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9978 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9979 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9980
9981 return 0;
9982 }
9983
9984 static void
9985 iwx_attach_hook(void *self)
9986 {
9987 struct iwx_softc *sc = (void *)self;
9988 struct ieee80211com *ic = &sc->sc_ic;
9989 int err;
9990
9991 IWX_LOCK(sc);
9992 err = iwx_preinit(sc);
9993 IWX_UNLOCK(sc);
9994 if (err != 0)
9995 goto out;
9996
9997 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
9998 ic->ic_channels);
9999
10000 ieee80211_ifattach(ic);
10001 ic->ic_vap_create = iwx_vap_create;
10002 ic->ic_vap_delete = iwx_vap_delete;
10003 ic->ic_raw_xmit = iwx_raw_xmit;
10004 ic->ic_node_alloc = iwx_node_alloc;
10005 ic->ic_scan_start = iwx_scan_start;
10006 ic->ic_scan_end = iwx_scan_end;
10007 ic->ic_update_mcast = iwx_update_mcast;
10008 ic->ic_getradiocaps = iwx_init_channel_map;
10009
10010 ic->ic_set_channel = iwx_set_channel;
10011 ic->ic_scan_curchan = iwx_scan_curchan;
10012 ic->ic_scan_mindwell = iwx_scan_mindwell;
10013 ic->ic_wme.wme_update = iwx_wme_update;
10014 ic->ic_parent = iwx_parent;
10015 ic->ic_transmit = iwx_transmit;
10016
10017 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10018 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10019 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10020 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10021
10022 sc->sc_addba_request = ic->ic_addba_request;
10023 ic->ic_addba_request = iwx_addba_request;
10024 sc->sc_addba_response = ic->ic_addba_response;
10025 ic->ic_addba_response = iwx_addba_response;
10026
10027 iwx_radiotap_attach(sc);
10028 ieee80211_announce(ic);
10029 out:
10030 config_intrhook_disestablish(&sc->sc_preinit_hook);
10031 }
10032
10033 const struct iwx_device_cfg *
10034 iwx_find_device_cfg(struct iwx_softc *sc)
10035 {
10036 uint16_t sdev_id, mac_type, rf_type;
10037 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10038 int i;
10039
10040 sdev_id = pci_get_subdevice(sc->sc_dev);
10041 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10042 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10043 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10044 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10045 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10046
10047 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10048 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10049 cores = IWX_SUBDEVICE_CORES(sdev_id);
10050
10051 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10052 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10053
10054 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10055 dev_info->device != sc->sc_pid)
10056 continue;
10057
10058 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10059 dev_info->subdevice != sdev_id)
10060 continue;
10061
10062 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10063 dev_info->mac_type != mac_type)
10064 continue;
10065
10066 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10067 dev_info->mac_step != mac_step)
10068 continue;
10069
10070 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10071 dev_info->rf_type != rf_type)
10072 continue;
10073
10074 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10075 dev_info->cdb != cdb)
10076 continue;
10077
10078 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10079 dev_info->jacket != jacket)
10080 continue;
10081
10082 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10083 dev_info->rf_id != rf_id)
10084 continue;
10085
10086 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10087 dev_info->no_160 != no_160)
10088 continue;
10089
10090 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10091 dev_info->cores != cores)
10092 continue;
10093
10094 return dev_info->cfg;
10095 }
10096
10097 return NULL;
10098 }
10099
10100 static int
10101 iwx_probe(device_t dev)
10102 {
10103 int i;
10104
10105 for (i = 0; i < nitems(iwx_devices); i++) {
10106 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10107 pci_get_device(dev) == iwx_devices[i].device) {
10108 device_set_desc(dev, iwx_devices[i].name);
10109
10110 /*
10111 * Due to significant existing deployments using
10112 * iwlwifi lower the priority of iwx.
10113 *
10114 * This inverts the advice in bus.h where drivers
10115 * supporting newer hardware should return
10116 * BUS_PROBE_DEFAULT and drivers for older devices
10117 * return BUS_PROBE_LOW_PRIORITY.
10118 *
10119 */
10120 return (BUS_PROBE_LOW_PRIORITY);
10121 }
10122 }
10123
10124 return (ENXIO);
10125 }
10126
10127 static int
10128 iwx_attach(device_t dev)
10129 {
10130 struct iwx_softc *sc = device_get_softc(dev);
10131 struct ieee80211com *ic = &sc->sc_ic;
10132 const struct iwx_device_cfg *cfg;
10133 int err;
10134 int txq_i, i, j;
10135 size_t ctxt_info_size;
10136 int rid;
10137 int count;
10138 int error;
10139 sc->sc_dev = dev;
10140 sc->sc_pid = pci_get_device(dev);
10141 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10142
10143 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10144 IWX_LOCK_INIT(sc);
10145 mbufq_init(&sc->sc_snd, ifqmaxlen);
10146 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10147 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10148 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10149 taskqueue_thread_enqueue, &sc->sc_tq);
10150 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10151 if (error != 0) {
10152 device_printf(dev, "can't start taskq thread, error %d\n",
10153 error);
10154 return (ENXIO);
10155 }
10156
10157 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10158 if (sc->sc_cap_off == 0) {
10159 device_printf(dev, "PCIe capability structure not found!\n");
10160 return (ENXIO);
10161 }
10162
10163 /*
10164 * We disable the RETRY_TIMEOUT register (0x41) to keep
10165 * PCI Tx retries from interfering with C3 CPU state.
10166 */
10167 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10168
10169 if (pci_msix_count(dev)) {
10170 sc->sc_msix = 1;
10171 } else {
10172 device_printf(dev, "no MSI-X found\n");
10173 return (ENXIO);
10174 }
10175
10176 pci_enable_busmaster(dev);
10177 rid = PCIR_BAR(0);
10178 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10179 RF_ACTIVE);
10180 if (sc->sc_mem == NULL) {
10181 device_printf(sc->sc_dev, "can't map mem space\n");
10182 return (ENXIO);
10183 }
10184 sc->sc_st = rman_get_bustag(sc->sc_mem);
10185 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10186
10187 count = 1;
10188 rid = 0;
10189 if (pci_alloc_msix(dev, &count) == 0)
10190 rid = 1;
10191 DPRINTF(("%s: count=%d\n", __func__, count));
10192 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10193 (rid != 0 ? 0 : RF_SHAREABLE));
10194 if (sc->sc_irq == NULL) {
10195 device_printf(dev, "can't map interrupt\n");
10196 return (ENXIO);
10197 }
10198 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10199 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10200 if (error != 0) {
10201 device_printf(dev, "can't establish interrupt\n");
10202 return (ENXIO);
10203 }
10204
10205 /* Clear pending interrupts. */
10206 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10207 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10208 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10209
10210 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10211 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10212 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10213 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10214
10215 /*
10216 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10217 * changed, and now the revision step also includes bit 0-1 (no more
10218 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10219 * in the old format.
10220 */
10221 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10222 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10223
10224 switch (sc->sc_pid) {
10225 case PCI_PRODUCT_INTEL_WL_22500_1:
10226 sc->sc_fwname = IWX_CC_A_FW;
10227 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10228 sc->sc_integrated = 0;
10229 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10230 sc->sc_low_latency_xtal = 0;
10231 sc->sc_xtal_latency = 0;
10232 sc->sc_tx_with_siso_diversity = 0;
10233 sc->sc_uhb_supported = 0;
10234 break;
10235 case PCI_PRODUCT_INTEL_WL_22500_2:
10236 case PCI_PRODUCT_INTEL_WL_22500_5:
10237 /* These devices should be QuZ only. */
10238 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10239 device_printf(dev, "unsupported AX201 adapter\n");
10240 return (ENXIO);
10241 }
10242 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10243 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10244 sc->sc_integrated = 1;
10245 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10246 sc->sc_low_latency_xtal = 0;
10247 sc->sc_xtal_latency = 500;
10248 sc->sc_tx_with_siso_diversity = 0;
10249 sc->sc_uhb_supported = 0;
10250 break;
10251 case PCI_PRODUCT_INTEL_WL_22500_3:
10252 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10253 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10254 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10255 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10256 else
10257 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10258 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10259 sc->sc_integrated = 1;
10260 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10261 sc->sc_low_latency_xtal = 0;
10262 sc->sc_xtal_latency = 500;
10263 sc->sc_tx_with_siso_diversity = 0;
10264 sc->sc_uhb_supported = 0;
10265 break;
10266 case PCI_PRODUCT_INTEL_WL_22500_4:
10267 case PCI_PRODUCT_INTEL_WL_22500_7:
10268 case PCI_PRODUCT_INTEL_WL_22500_8:
10269 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10270 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10271 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10272 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10273 else
10274 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10275 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10276 sc->sc_integrated = 1;
10277 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10278 sc->sc_low_latency_xtal = 0;
10279 sc->sc_xtal_latency = 1820;
10280 sc->sc_tx_with_siso_diversity = 0;
10281 sc->sc_uhb_supported = 0;
10282 break;
10283 case PCI_PRODUCT_INTEL_WL_22500_6:
10284 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10285 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10286 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10287 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10288 else
10289 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10290 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10291 sc->sc_integrated = 1;
10292 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10293 sc->sc_low_latency_xtal = 1;
10294 sc->sc_xtal_latency = 12000;
10295 sc->sc_tx_with_siso_diversity = 0;
10296 sc->sc_uhb_supported = 0;
10297 break;
10298 case PCI_PRODUCT_INTEL_WL_22500_9:
10299 case PCI_PRODUCT_INTEL_WL_22500_10:
10300 case PCI_PRODUCT_INTEL_WL_22500_11:
10301 case PCI_PRODUCT_INTEL_WL_22500_13:
10302 /* _14 is an MA device, not yet supported */
10303 case PCI_PRODUCT_INTEL_WL_22500_15:
10304 case PCI_PRODUCT_INTEL_WL_22500_16:
10305 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10306 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10307 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10308 sc->sc_integrated = 0;
10309 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10310 sc->sc_low_latency_xtal = 0;
10311 sc->sc_xtal_latency = 0;
10312 sc->sc_tx_with_siso_diversity = 0;
10313 sc->sc_uhb_supported = 1;
10314 break;
10315 case PCI_PRODUCT_INTEL_WL_22500_12:
10316 case PCI_PRODUCT_INTEL_WL_22500_17:
10317 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10318 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10319 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10320 sc->sc_integrated = 1;
10321 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10322 sc->sc_low_latency_xtal = 1;
10323 sc->sc_xtal_latency = 12000;
10324 sc->sc_tx_with_siso_diversity = 0;
10325 sc->sc_uhb_supported = 0;
10326 sc->sc_imr_enabled = 1;
10327 break;
10328 default:
10329 device_printf(dev, "unknown adapter type\n");
10330 return (ENXIO);
10331 }
10332
10333 cfg = iwx_find_device_cfg(sc);
10334 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10335 if (cfg) {
10336 sc->sc_fwname = cfg->fw_name;
10337 sc->sc_pnvm_name = cfg->pnvm_name;
10338 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10339 sc->sc_uhb_supported = cfg->uhb_supported;
10340 if (cfg->xtal_latency) {
10341 sc->sc_xtal_latency = cfg->xtal_latency;
10342 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10343 }
10344 }
10345
10346 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10347
10348 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10349 sc->sc_umac_prph_offset = 0x300000;
10350 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10351 } else
10352 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10353
10354 /* Allocate DMA memory for loading firmware. */
10355 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10356 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10357 else
10358 ctxt_info_size = sizeof(struct iwx_context_info);
10359 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10360 ctxt_info_size, 1);
10361 if (err) {
10362 device_printf(dev,
10363 "could not allocate memory for loading firmware\n");
10364 return (ENXIO);
10365 }
10366
10367 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10368 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10369 sizeof(struct iwx_prph_scratch), 1);
10370 if (err) {
10371 device_printf(dev,
10372 "could not allocate prph scratch memory\n");
10373 goto fail1;
10374 }
10375
10376 /*
10377 * Allocate prph information. The driver doesn't use this.
10378 * We use the second half of this page to give the device
10379 * some dummy TR/CR tail pointers - which shouldn't be
10380 * necessary as we don't use this, but the hardware still
10381 * reads/writes there and we can't let it go do that with
10382 * a NULL pointer.
10383 */
10384 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10385 ("iwx_prph_info has wrong size"));
10386 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10387 PAGE_SIZE, 1);
10388 if (err) {
10389 device_printf(dev,
10390 "could not allocate prph info memory\n");
10391 goto fail1;
10392 }
10393 }
10394
10395 /* Allocate interrupt cause table (ICT).*/
10396 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10397 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10398 if (err) {
10399 device_printf(dev, "could not allocate ICT table\n");
10400 goto fail1;
10401 }
10402
10403 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10404 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10405 if (err) {
10406 device_printf(dev, "could not allocate TX ring %d\n",
10407 txq_i);
10408 goto fail4;
10409 }
10410 }
10411
10412 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10413 if (err) {
10414 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10415 goto fail4;
10416 }
10417
10418 #ifdef IWX_DEBUG
10419 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10421 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10422
10423 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10425 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10426 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10427 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10428 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10429
10430 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10431 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10432 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10433
10434 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10435 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10436 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10437 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10438 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10439 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10440 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10441 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10442 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10443 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10444 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10445 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10446 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10448 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10449 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10450 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10451 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10452 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10453 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10454 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10455 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10457 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10458 #endif
10459 ic->ic_softc = sc;
10460 ic->ic_name = device_get_nameunit(sc->sc_dev);
10461 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10462 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10463
10464 /* Set device capabilities. */
10465 ic->ic_caps =
10466 IEEE80211_C_STA |
10467 IEEE80211_C_MONITOR |
10468 IEEE80211_C_WPA | /* WPA/RSN */
10469 IEEE80211_C_WME |
10470 IEEE80211_C_PMGT |
10471 IEEE80211_C_SHSLOT | /* short slot time supported */
10472 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10473 IEEE80211_C_BGSCAN /* capable of bg scanning */
10474 ;
10475 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10476 /* Enable seqno offload */
10477 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
10478 /* Don't send null data frames; let firmware do it */
10479 ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
10480
10481 ic->ic_txstream = 2;
10482 ic->ic_rxstream = 2;
10483 ic->ic_htcaps |= IEEE80211_HTC_HT
10484 | IEEE80211_HTCAP_SMPS_OFF
10485 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10486 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10487 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10488 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10489 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10490 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10491
10492 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10493
10494 /*
10495 * XXX: setupcurchan() expects vhtcaps to be non-zero
10496 * https://bugs.freebsd.org/274156
10497 */
10498 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10499 | IEEE80211_VHTCAP_SHORT_GI_80
10500 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10501 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10502 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10503
10504 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10505 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10506 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10507 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10508 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10509 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10510 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10511 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10512 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10513 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10514 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10515
10516 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10517 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10518 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10519 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10520 rxba->sc = sc;
10521 for (j = 0; j < nitems(rxba->entries); j++)
10522 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10523 }
10524
10525 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10526 sc->sc_preinit_hook.ich_arg = sc;
10527 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10528 device_printf(dev,
10529 "config_intrhook_establish failed\n");
10530 goto fail4;
10531 }
10532
10533 return (0);
10534
10535 fail4:
10536 while (--txq_i >= 0)
10537 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10538 iwx_free_rx_ring(sc, &sc->rxq);
10539 if (sc->ict_dma.vaddr != NULL)
10540 iwx_dma_contig_free(&sc->ict_dma);
10541
10542 fail1:
10543 iwx_dma_contig_free(&sc->ctxt_info_dma);
10544 iwx_dma_contig_free(&sc->prph_scratch_dma);
10545 iwx_dma_contig_free(&sc->prph_info_dma);
10546 return (ENXIO);
10547 }
10548
10549 static int
10550 iwx_detach(device_t dev)
10551 {
10552 struct iwx_softc *sc = device_get_softc(dev);
10553 int txq_i;
10554
10555 iwx_stop_device(sc);
10556
10557 taskqueue_drain_all(sc->sc_tq);
10558 taskqueue_free(sc->sc_tq);
10559
10560 ieee80211_ifdetach(&sc->sc_ic);
10561
10562 callout_drain(&sc->watchdog_to);
10563
10564 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10565 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10566 iwx_free_rx_ring(sc, &sc->rxq);
10567
10568 if (sc->sc_fwp != NULL) {
10569 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10570 sc->sc_fwp = NULL;
10571 }
10572
10573 if (sc->sc_pnvm != NULL) {
10574 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10575 sc->sc_pnvm = NULL;
10576 }
10577
10578 if (sc->sc_irq != NULL) {
10579 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10580 bus_release_resource(dev, SYS_RES_IRQ,
10581 rman_get_rid(sc->sc_irq), sc->sc_irq);
10582 pci_release_msi(dev);
10583 }
10584 if (sc->sc_mem != NULL)
10585 bus_release_resource(dev, SYS_RES_MEMORY,
10586 rman_get_rid(sc->sc_mem), sc->sc_mem);
10587
10588 IWX_LOCK_DESTROY(sc);
10589
10590 return (0);
10591 }
10592
10593 static void
10594 iwx_radiotap_attach(struct iwx_softc *sc)
10595 {
10596 struct ieee80211com *ic = &sc->sc_ic;
10597
10598 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10599 "->%s begin\n", __func__);
10600
10601 ieee80211_radiotap_attach(ic,
10602 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10603 IWX_TX_RADIOTAP_PRESENT,
10604 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10605 IWX_RX_RADIOTAP_PRESENT);
10606
10607 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10608 "->%s end\n", __func__);
10609 }
10610
10611 struct ieee80211vap *
10612 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10613 enum ieee80211_opmode opmode, int flags,
10614 const uint8_t bssid[IEEE80211_ADDR_LEN],
10615 const uint8_t mac[IEEE80211_ADDR_LEN])
10616 {
10617 struct iwx_vap *ivp;
10618 struct ieee80211vap *vap;
10619
10620 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10621 return NULL;
10622 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10623 vap = &ivp->iv_vap;
10624 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10625 vap->iv_bmissthreshold = 10; /* override default */
10626 /* Override with driver methods. */
10627 ivp->iv_newstate = vap->iv_newstate;
10628 vap->iv_newstate = iwx_newstate;
10629
10630 ivp->id = IWX_DEFAULT_MACID;
10631 ivp->color = IWX_DEFAULT_COLOR;
10632
10633 ivp->have_wme = TRUE;
10634 ivp->ps_disabled = FALSE;
10635
10636 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10637 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10638
10639 /* h/w crypto support */
10640 vap->iv_key_alloc = iwx_key_alloc;
10641 vap->iv_key_delete = iwx_key_delete;
10642 vap->iv_key_set = iwx_key_set;
10643 vap->iv_key_update_begin = iwx_key_update_begin;
10644 vap->iv_key_update_end = iwx_key_update_end;
10645
10646 ieee80211_ratectl_init(vap);
10647 /* Complete setup. */
10648 ieee80211_vap_attach(vap, ieee80211_media_change,
10649 ieee80211_media_status, mac);
10650 ic->ic_opmode = opmode;
10651
10652 return vap;
10653 }
10654
10655 static void
10656 iwx_vap_delete(struct ieee80211vap *vap)
10657 {
10658 struct iwx_vap *ivp = IWX_VAP(vap);
10659
10660 ieee80211_ratectl_deinit(vap);
10661 ieee80211_vap_detach(vap);
10662 free(ivp, M_80211_VAP);
10663 }
10664
10665 static void
10666 iwx_parent(struct ieee80211com *ic)
10667 {
10668 struct iwx_softc *sc = ic->ic_softc;
10669 IWX_LOCK(sc);
10670
10671 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10672 iwx_stop(sc);
10673 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10674 } else {
10675 iwx_init(sc);
10676 ieee80211_start_all(ic);
10677 }
10678 IWX_UNLOCK(sc);
10679 }
10680
10681 static int
10682 iwx_suspend(device_t dev)
10683 {
10684 struct iwx_softc *sc = device_get_softc(dev);
10685 struct ieee80211com *ic = &sc->sc_ic;
10686
10687 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10688 ieee80211_suspend_all(ic);
10689
10690 iwx_stop(sc);
10691 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10692 }
10693 return (0);
10694 }
10695
10696 static int
10697 iwx_resume(device_t dev)
10698 {
10699 struct iwx_softc *sc = device_get_softc(dev);
10700 struct ieee80211com *ic = &sc->sc_ic;
10701 int err;
10702
10703 /*
10704 * We disable the RETRY_TIMEOUT register (0x41) to keep
10705 * PCI Tx retries from interfering with C3 CPU state.
10706 */
10707 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10708
10709 IWX_LOCK(sc);
10710
10711 err = iwx_init(sc);
10712 if (err) {
10713 iwx_stop_device(sc);
10714 IWX_UNLOCK(sc);
10715 return err;
10716 }
10717
10718 IWX_UNLOCK(sc);
10719
10720 ieee80211_resume_all(ic);
10721 return (0);
10722 }
10723
10724 static void
10725 iwx_scan_start(struct ieee80211com *ic)
10726 {
10727 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10728 struct iwx_softc *sc = ic->ic_softc;
10729 int err;
10730
10731 IWX_LOCK(sc);
10732 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10733 err = iwx_scan(sc);
10734 else
10735 err = iwx_bgscan(ic);
10736 IWX_UNLOCK(sc);
10737 if (err)
10738 ieee80211_cancel_scan(vap);
10739
10740 return;
10741 }
10742
10743 static void
10744 iwx_update_mcast(struct ieee80211com *ic)
10745 {
10746 }
10747
10748 static void
10749 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10750 {
10751 }
10752
10753 static void
10754 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10755 {
10756 }
10757
10758 static void
10759 iwx_scan_end(struct ieee80211com *ic)
10760 {
10761 iwx_endscan(ic->ic_softc);
10762 }
10763
10764 static void
10765 iwx_set_channel(struct ieee80211com *ic)
10766 {
10767 #if 0
10768 struct iwx_softc *sc = ic->ic_softc;
10769 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10770
10771 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10772 iwx_phy_ctxt_task((void *)sc);
10773 #endif
10774 }
10775
10776 static void
10777 iwx_endscan_cb(void *arg, int pending)
10778 {
10779 struct iwx_softc *sc = arg;
10780 struct ieee80211com *ic = &sc->sc_ic;
10781
10782 DPRINTF(("scan ended\n"));
10783 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10784 }
10785
10786 static int
10787 iwx_wme_update(struct ieee80211com *ic)
10788 {
10789 return 0;
10790 }
10791
10792 static int
10793 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10794 const struct ieee80211_bpf_params *params)
10795 {
10796 struct ieee80211com *ic = ni->ni_ic;
10797 struct iwx_softc *sc = ic->ic_softc;
10798 int err;
10799
10800 IWX_LOCK(sc);
10801 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10802 err = iwx_tx(sc, m, ni);
10803 IWX_UNLOCK(sc);
10804 return err;
10805 } else {
10806 IWX_UNLOCK(sc);
10807 return EIO;
10808 }
10809 }
10810
10811 static int
10812 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10813 {
10814 struct iwx_softc *sc = ic->ic_softc;
10815 int error;
10816
10817 // TODO: mbufq_enqueue in iwm
10818 // TODO dequeue in iwm_start, counters, locking
10819 IWX_LOCK(sc);
10820 error = mbufq_enqueue(&sc->sc_snd, m);
10821 if (error) {
10822 IWX_UNLOCK(sc);
10823 return (error);
10824 }
10825
10826 iwx_start(sc);
10827 IWX_UNLOCK(sc);
10828 return (0);
10829 }
10830
10831 static int
10832 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10833 int baparamset, int batimeout, int baseqctl)
10834 {
10835 struct ieee80211com *ic = ni->ni_ic;
10836 struct iwx_softc *sc = ic->ic_softc;
10837 int tid;
10838
10839 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10840 sc->ni_rx_ba[tid].ba_winstart =
10841 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10842 sc->ni_rx_ba[tid].ba_winsize =
10843 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10844 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10845
10846 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10847 tid >= IWX_MAX_TID_COUNT)
10848 return ENOSPC;
10849
10850 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10851 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10852 return EBUSY;
10853 }
10854 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10855
10856 sc->ba_rx.start_tidmask |= (1 << tid);
10857 DPRINTF(("%s: tid=%i\n", __func__, tid));
10858 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10859 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10860 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10861
10862 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10863
10864 // TODO:misha move to ba_task (serialize)
10865 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10866
10867 return (0);
10868 }
10869
10870 static void
10871 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10872 {
10873 return;
10874 }
10875
10876 static int
10877 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10878 int dialogtoken, int baparamset, int batimeout)
10879 {
10880 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10881 int tid;
10882
10883 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10884 DPRINTF(("%s: tid=%i\n", __func__, tid));
10885 sc->ba_tx.start_tidmask |= (1 << tid);
10886 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10887 return 0;
10888 }
10889
10890
10891 static int
10892 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10893 int code, int baparamset, int batimeout)
10894 {
10895 return 0;
10896 }
10897
10898 static void
10899 iwx_key_update_begin(struct ieee80211vap *vap)
10900 {
10901 return;
10902 }
10903
10904 static void
10905 iwx_key_update_end(struct ieee80211vap *vap)
10906 {
10907 return;
10908 }
10909
10910 static int
10911 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10912 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
10913 {
10914
10915 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
10916 return 1;
10917 }
10918 if (!(&vap->iv_nw_keys[0] <= k &&
10919 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
10920 /*
10921 * Not in the global key table, the driver should handle this
10922 * by allocating a slot in the h/w key table/cache. In
10923 * lieu of that return key slot 0 for any unicast key
10924 * request. We disallow the request if this is a group key.
10925 * This default policy does the right thing for legacy hardware
10926 * with a 4 key table. It also handles devices that pass
10927 * packets through untouched when marked with the WEP bit
10928 * and key index 0.
10929 */
10930 if (k->wk_flags & IEEE80211_KEY_GROUP)
10931 return 0;
10932 *keyix = 0; /* NB: use key index 0 for ucast key */
10933 } else {
10934 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
10935 }
10936 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
10937 return 1;
10938 }
10939
10940 static int
10941 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
10942 {
10943 struct ieee80211com *ic = vap->iv_ic;
10944 struct iwx_softc *sc = ic->ic_softc;
10945 struct iwx_add_sta_key_cmd cmd;
10946 uint32_t status;
10947 int err;
10948 int id;
10949
10950 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
10951 return 1;
10952 }
10953
10954 IWX_LOCK(sc);
10955 /*
10956 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
10957 * Currently we only implement station mode where 'ni' is always
10958 * ic->ic_bss so there is no need to validate arguments beyond this:
10959 */
10960
10961 memset(&cmd, 0, sizeof(cmd));
10962
10963 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10964 DPRINTF(("%s: adding group key\n", __func__));
10965 } else {
10966 DPRINTF(("%s: adding key\n", __func__));
10967 }
10968 if (k >= &vap->iv_nw_keys[0] &&
10969 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
10970 id = (k - vap->iv_nw_keys);
10971 else
10972 id = (0);
10973 DPRINTF(("%s: setting keyid=%i\n", __func__, id));
10974 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
10975 IWX_STA_KEY_FLG_WEP_KEY_MAP |
10976 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
10977 IWX_STA_KEY_FLG_KEYID_MSK));
10978 if (k->wk_flags & IEEE80211_KEY_GROUP) {
10979 cmd.common.key_offset = 1;
10980 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
10981 } else {
10982 cmd.common.key_offset = 0;
10983 }
10984 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
10985 k->wk_keylen));
10986 DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
10987 for (int i=0; i<k->wk_keylen; i++) {
10988 DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
10989 }
10990 cmd.common.sta_id = IWX_STATION_ID;
10991
10992 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
10993 DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
10994
10995 status = IWX_ADD_STA_SUCCESS;
10996 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
10997 &status);
10998 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
10999 err = EIO;
11000 if (err) {
11001 printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
11002 IWX_UNLOCK(sc);
11003 return err;
11004 } else
11005 DPRINTF(("%s: key added successfully\n", __func__));
11006 IWX_UNLOCK(sc);
11007 return 1;
11008 }
11009
11010 static int
11011 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11012 {
11013 return 1;
11014 }
11015
11016 static device_method_t iwx_pci_methods[] = {
11017 /* Device interface */
11018 DEVMETHOD(device_probe, iwx_probe),
11019 DEVMETHOD(device_attach, iwx_attach),
11020 DEVMETHOD(device_detach, iwx_detach),
11021 DEVMETHOD(device_suspend, iwx_suspend),
11022 DEVMETHOD(device_resume, iwx_resume),
11023
11024 DEVMETHOD_END
11025 };
11026
11027 static driver_t iwx_pci_driver = {
11028 "iwx",
11029 iwx_pci_methods,
11030 sizeof (struct iwx_softc)
11031 };
11032
11033 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11034 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11035 iwx_devices, nitems(iwx_devices));
11036 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11037 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11038 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11039