1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186
187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195
196 #include <dev/iwx/if_iwx_debug.h>
197
198 #define PCI_CFG_RETRY_TIMEOUT 0x41
199
200 #define PCI_VENDOR_INTEL 0x8086
201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
218
219 static const struct iwx_devices {
220 uint16_t device;
221 char *name;
222 } iwx_devices[] = {
223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
240 };
241
242 static const uint8_t iwx_nvm_channels_8000[] = {
243 /* 2.4 GHz */
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 /* 5 GHz */
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250
251 static const uint8_t iwx_nvm_channels_uhb[] = {
252 /* 2.4 GHz */
253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
254 /* 5 GHz */
255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
257 149, 153, 157, 161, 165, 169, 173, 177, 181,
258 /* 6-7 GHz */
259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
263 };
264
265 #define IWX_NUM_2GHZ_CHANNELS 14
266 #define IWX_NUM_5GHZ_CHANNELS 37
267
268 const struct iwx_rate {
269 uint16_t rate;
270 uint8_t plcp;
271 uint8_t ht_plcp;
272 } iwx_rates[] = {
273 /* Legacy */ /* HT */
274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
295 };
296 #define IWX_RIDX_CCK 0
297 #define IWX_RIDX_OFDM 4
298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
302
303 /* Convert an MCS index into an iwx_rates[] index. */
304 const int iwx_mcs2ridx[] = {
305 IWX_RATE_MCS_0_INDEX,
306 IWX_RATE_MCS_1_INDEX,
307 IWX_RATE_MCS_2_INDEX,
308 IWX_RATE_MCS_3_INDEX,
309 IWX_RATE_MCS_4_INDEX,
310 IWX_RATE_MCS_5_INDEX,
311 IWX_RATE_MCS_6_INDEX,
312 IWX_RATE_MCS_7_INDEX,
313 IWX_RATE_MCS_8_INDEX,
314 IWX_RATE_MCS_9_INDEX,
315 IWX_RATE_MCS_10_INDEX,
316 IWX_RATE_MCS_11_INDEX,
317 IWX_RATE_MCS_12_INDEX,
318 IWX_RATE_MCS_13_INDEX,
319 IWX_RATE_MCS_14_INDEX,
320 IWX_RATE_MCS_15_INDEX,
321 };
322
323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
326 #if 0
327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
329 #endif
330 static int iwx_apply_debug_destination(struct iwx_softc *);
331 static void iwx_set_ltr(struct iwx_softc *);
332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
334 const struct iwx_fw_sects *);
335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
336 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
338 struct iwx_context_info_dram *);
339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
341 const uint8_t *, size_t);
342 static int iwx_set_default_calib(struct iwx_softc *, const void *);
343 static void iwx_fw_info_free(struct iwx_fw_info *);
344 static int iwx_read_firmware(struct iwx_softc *);
345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
354 static int iwx_nic_lock(struct iwx_softc *);
355 static void iwx_nic_assert_locked(struct iwx_softc *);
356 static void iwx_nic_unlock(struct iwx_softc *);
357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
358 uint32_t);
359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
363 bus_size_t, bus_size_t);
364 static void iwx_dma_contig_free(struct iwx_dma_info *);
365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void iwx_disable_rx_dma(struct iwx_softc *);
367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_enable_rfkill_int(struct iwx_softc *);
373 static int iwx_check_rfkill(struct iwx_softc *);
374 static void iwx_enable_interrupts(struct iwx_softc *);
375 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
376 #if 0
377 static void iwx_restore_interrupts(struct iwx_softc *);
378 #endif
379 static void iwx_disable_interrupts(struct iwx_softc *);
380 static void iwx_ict_reset(struct iwx_softc *);
381 static int iwx_set_hw_ready(struct iwx_softc *);
382 static int iwx_prepare_card_hw(struct iwx_softc *);
383 static int iwx_force_power_gating(struct iwx_softc *);
384 static void iwx_apm_config(struct iwx_softc *);
385 static int iwx_apm_init(struct iwx_softc *);
386 static void iwx_apm_stop(struct iwx_softc *);
387 static int iwx_allow_mcast(struct iwx_softc *);
388 static void iwx_init_msix_hw(struct iwx_softc *);
389 static void iwx_conf_msix_hw(struct iwx_softc *, int);
390 static int iwx_clear_persistence_bit(struct iwx_softc *);
391 static int iwx_start_hw(struct iwx_softc *);
392 static void iwx_stop_device(struct iwx_softc *);
393 static void iwx_nic_config(struct iwx_softc *);
394 static int iwx_nic_rx_init(struct iwx_softc *);
395 static int iwx_nic_init(struct iwx_softc *);
396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
398 static void iwx_post_alive(struct iwx_softc *);
399 static int iwx_schedule_session_protection(struct iwx_softc *,
400 struct iwx_node *, uint32_t);
401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
402 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
403 struct ieee80211_channel[]);
404 static int iwx_mimo_enabled(struct iwx_softc *);
405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
406 uint16_t);
407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
409 uint16_t, uint16_t, int, int);
410 static void iwx_sta_tx_agg_start(struct iwx_softc *,
411 struct ieee80211_node *, uint8_t);
412 static void iwx_ba_rx_task(void *, int);
413 static void iwx_ba_tx_task(void *, int);
414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
415 static int iwx_is_valid_mac_addr(const uint8_t *);
416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
417 static int iwx_nvm_get(struct iwx_softc *);
418 static int iwx_load_firmware(struct iwx_softc *);
419 static int iwx_start_fw(struct iwx_softc *);
420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
423 static int iwx_load_pnvm(struct iwx_softc *);
424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
426 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
427 static int iwx_send_dqa_cmd(struct iwx_softc *);
428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
429 static int iwx_config_ltr(struct iwx_softc *);
430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
431 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
434 struct iwx_rx_data *);
435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
437 #if 0
438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
439 struct ieee80211_node *, struct ieee80211_rxinfo *);
440 #endif
441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
442 int, int, uint32_t, uint8_t);
443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
445 struct iwx_tx_data *);
446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
448 struct iwx_rx_data *);
449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
451 struct iwx_rx_data *);
452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
456 #if 0
457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
459 #endif
460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
464 const void *);
465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
466 uint32_t *);
467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
468 const void *, uint32_t *);
469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
470 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
472 static uint32_t iwx_fw_rateidx_cck(uint8_t);
473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
475 struct mbuf *);
476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
477 uint16_t, uint16_t);
478 static int iwx_tx(struct iwx_softc *, struct mbuf *,
479 struct ieee80211_node *);
480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
484 struct iwx_beacon_filter_cmd *);
485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
486 int);
487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
488 struct iwx_mac_power_cmd *);
489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
490 static int iwx_power_update_device(struct iwx_softc *);
491 #if 0
492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
493 #endif
494 static int iwx_disable_beacon_filter(struct iwx_softc *);
495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
498 static int iwx_fill_probe_req(struct iwx_softc *,
499 struct iwx_scan_probe_req *);
500 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
503 struct iwx_scan_general_params_v10 *, int);
504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
505 struct iwx_scan_general_params_v10 *, uint16_t, int);
506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
507 struct iwx_scan_channel_params_v6 *, uint32_t, int);
508 static int iwx_umac_scan_v14(struct iwx_softc *, int);
509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
511 static int iwx_rval2ridx(int);
512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
513 int *);
514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
515 struct iwx_mac_ctx_cmd *, uint32_t);
516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
517 struct iwx_mac_data_sta *, int);
518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
519 uint32_t, int);
520 static int iwx_clear_statistics(struct iwx_softc *);
521 static int iwx_scan(struct iwx_softc *);
522 static int iwx_bgscan(struct ieee80211com *);
523 static int iwx_enable_mgmt_queue(struct iwx_softc *);
524 static int iwx_disable_mgmt_queue(struct iwx_softc *);
525 static int iwx_rs_rval2idx(uint8_t);
526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
527 int);
528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
533 uint8_t, uint8_t);
534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
536 uint8_t);
537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
538 static int iwx_deauth(struct iwx_softc *);
539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
540 static int iwx_run_stop(struct iwx_softc *);
541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
542 const uint8_t[IEEE80211_ADDR_LEN]);
543 #if 0
544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
545 struct ieee80211_key *);
546 void iwx_setkey_task(void *);
547 void iwx_delete_key(struct ieee80211com *,
548 struct ieee80211_node *, struct ieee80211_key *);
549 #endif
550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
551 static void iwx_endscan(struct iwx_softc *);
552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
553 struct ieee80211_node *);
554 static int iwx_sf_config(struct iwx_softc *, int);
555 static int iwx_send_bt_init_conf(struct iwx_softc *);
556 static int iwx_send_soc_conf(struct iwx_softc *);
557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
559 static int iwx_init_hw(struct iwx_softc *);
560 static int iwx_init(struct iwx_softc *);
561 static void iwx_stop(struct iwx_softc *);
562 static void iwx_watchdog(void *);
563 static const char *iwx_desc_lookup(uint32_t);
564 static void iwx_nic_error(struct iwx_softc *);
565 static void iwx_dump_driver_status(struct iwx_softc *);
566 static void iwx_nic_umac_error(struct iwx_softc *);
567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
570 struct mbuf *);
571 static void iwx_notif_intr(struct iwx_softc *);
572 #if 0
573 /* XXX-THJ - I don't have hardware for this */
574 static int iwx_intr(void *);
575 #endif
576 static void iwx_intr_msix(void *);
577 static int iwx_preinit(struct iwx_softc *);
578 static void iwx_attach_hook(void *);
579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
580 static int iwx_probe(device_t);
581 static int iwx_attach(device_t);
582 static int iwx_detach(device_t);
583
584 /* FreeBSD specific glue */
585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
587
588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
590
591 #if IWX_DEBUG
592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
593 #else
594 #define DPRINTF(x) do { ; } while (0)
595 #endif
596
597 /* FreeBSD specific functions */
598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
601 static void iwx_vap_delete(struct ieee80211vap *);
602 static void iwx_parent(struct ieee80211com *);
603 static void iwx_scan_start(struct ieee80211com *);
604 static void iwx_scan_end(struct ieee80211com *);
605 static void iwx_update_mcast(struct ieee80211com *ic);
606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
607 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
608 static void iwx_set_channel(struct ieee80211com *);
609 static void iwx_endscan_cb(void *, int );
610 static int iwx_wme_update(struct ieee80211com *);
611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
612 const struct ieee80211_bpf_params *);
613 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
614 static void iwx_start(struct iwx_softc *);
615 static int iwx_ampdu_rx_start(struct ieee80211_node *,
616 struct ieee80211_rx_ampdu *, int, int, int);
617 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
618 struct ieee80211_rx_ampdu *);
619 static int iwx_addba_request(struct ieee80211_node *,
620 struct ieee80211_tx_ampdu *, int, int, int);
621 static int iwx_addba_response(struct ieee80211_node *,
622 struct ieee80211_tx_ampdu *, int, int, int);
623 static void iwx_key_update_begin(struct ieee80211vap *);
624 static void iwx_key_update_end(struct ieee80211vap *);
625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
626 ieee80211_keyix *,ieee80211_keyix *);
627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
628 static int iwx_key_delete(struct ieee80211vap *,
629 const struct ieee80211_key *);
630 static int iwx_suspend(device_t);
631 static int iwx_resume(device_t);
632 static void iwx_radiotap_attach(struct iwx_softc *);
633
634 /* OpenBSD compat defines */
635 #define IEEE80211_HTOP0_SCO_SCN 0
636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
638
639 #define IEEE80211_HT_RATESET_SISO 0
640 #define IEEE80211_HT_RATESET_MIMO2 2
641
642 const struct ieee80211_rateset ieee80211_std_rateset_11a =
643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
644
645 const struct ieee80211_rateset ieee80211_std_rateset_11b =
646 { 4, { 2, 4, 11, 22 } };
647
648 const struct ieee80211_rateset ieee80211_std_rateset_11g =
649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
650
651 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)652 ieee80211_has_addr4(const struct ieee80211_frame *wh)
653 {
654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
655 IEEE80211_FC1_DIR_DSTODS;
656 }
657
658 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
660 {
661 const struct iwx_fw_cmd_version *entry;
662 int i;
663
664 for (i = 0; i < sc->n_cmd_versions; i++) {
665 entry = &sc->cmd_versions[i];
666 if (entry->group == grp && entry->cmd == cmd)
667 return entry->cmd_ver;
668 }
669
670 return IWX_FW_CMD_VER_UNKNOWN;
671 }
672
673 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
675 {
676 const struct iwx_fw_cmd_version *entry;
677 int i;
678
679 for (i = 0; i < sc->n_cmd_versions; i++) {
680 entry = &sc->cmd_versions[i];
681 if (entry->group == grp && entry->cmd == cmd)
682 return entry->notif_ver;
683 }
684
685 return IWX_FW_CMD_VER_UNKNOWN;
686 }
687
688 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
690 {
691 const struct iwx_fw_cscheme_list *l = (const void *)data;
692
693 if (dlen < sizeof(*l) ||
694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
695 return EINVAL;
696
697 /* we don't actually store anything for now, always use s/w crypto */
698
699 return 0;
700 }
701
702 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
705 {
706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
707 if (err) {
708 printf("%s: could not allocate context info DMA memory\n",
709 DEVNAME(sc));
710 return err;
711 }
712
713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
714
715 return 0;
716 }
717
718 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)719 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
720 {
721 struct iwx_self_init_dram *dram = &sc->init_dram;
722 int i;
723
724 if (!dram->paging)
725 return;
726
727 /* free paging*/
728 for (i = 0; i < dram->paging_cnt; i++)
729 iwx_dma_contig_free(&dram->paging[i]);
730
731 free(dram->paging, M_DEVBUF);
732 dram->paging_cnt = 0;
733 dram->paging = NULL;
734 }
735
736 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
738 {
739 int i = 0;
740
741 while (start < fws->fw_count &&
742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
744 start++;
745 i++;
746 }
747
748 return i;
749 }
750
751 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
753 struct iwx_context_info_dram *ctxt_dram)
754 {
755 struct iwx_self_init_dram *dram = &sc->init_dram;
756 int i, ret, fw_cnt = 0;
757
758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
759
760 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
761 /* add 1 due to separator */
762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
763 /* add 2 due to separators */
764 dram->paging_cnt = iwx_get_num_sections(fws,
765 dram->lmac_cnt + dram->umac_cnt + 2);
766
767 IWX_UNLOCK(sc);
768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
770 if (!dram->fw) {
771 printf("%s: could not allocate memory for firmware sections\n",
772 DEVNAME(sc));
773 IWX_LOCK(sc);
774 return ENOMEM;
775 }
776
777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
778 M_DEVBUF, M_ZERO | M_WAITOK);
779 IWX_LOCK(sc);
780 if (!dram->paging) {
781 printf("%s: could not allocate memory for firmware paging\n",
782 DEVNAME(sc));
783 return ENOMEM;
784 }
785
786 /* initialize lmac sections */
787 for (i = 0; i < dram->lmac_cnt; i++) {
788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
789 &dram->fw[fw_cnt]);
790 if (ret)
791 return ret;
792 ctxt_dram->lmac_img[i] =
793 htole64(dram->fw[fw_cnt].paddr);
794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
795 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
796 __func__, i,
797 (unsigned long long)dram->fw[fw_cnt].paddr,
798 (unsigned long long)dram->fw[fw_cnt].size);
799 fw_cnt++;
800 }
801
802 /* initialize umac sections */
803 for (i = 0; i < dram->umac_cnt; i++) {
804 /* access FW with +1 to make up for lmac separator */
805 ret = iwx_ctxt_info_alloc_dma(sc,
806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
807 if (ret)
808 return ret;
809 ctxt_dram->umac_img[i] =
810 htole64(dram->fw[fw_cnt].paddr);
811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
812 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
813 __func__, i,
814 (unsigned long long)dram->fw[fw_cnt].paddr,
815 (unsigned long long)dram->fw[fw_cnt].size);
816 fw_cnt++;
817 }
818
819 /*
820 * Initialize paging.
821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
822 * stored separately.
823 * This is since the timing of its release is different -
824 * while fw memory can be released on alive, the paging memory can be
825 * freed only when the device goes down.
826 * Given that, the logic here in accessing the fw image is a bit
827 * different - fw_cnt isn't changing so loop counter is added to it.
828 */
829 for (i = 0; i < dram->paging_cnt; i++) {
830 /* access FW with +2 to make up for lmac & umac separators */
831 int fw_idx = fw_cnt + i + 2;
832
833 ret = iwx_ctxt_info_alloc_dma(sc,
834 &fws->fw_sect[fw_idx], &dram->paging[i]);
835 if (ret)
836 return ret;
837
838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
840 "%s: firmware paging section %d at 0x%llx size %lld\n",
841 __func__, i,
842 (unsigned long long)dram->paging[i].paddr,
843 (unsigned long long)dram->paging[i].size);
844 }
845
846 return 0;
847 }
848
849 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)850 iwx_fw_version_str(char *buf, size_t bufsize,
851 uint32_t major, uint32_t minor, uint32_t api)
852 {
853 /*
854 * Starting with major version 35 the Linux driver prints the minor
855 * version in hexadecimal.
856 */
857 if (major >= 35)
858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
859 else
860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
861 }
862 #if 0
863 static int
864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
865 uint8_t min_power)
866 {
867 struct iwx_dma_info *fw_mon = &sc->fw_mon;
868 uint32_t size = 0;
869 uint8_t power;
870 int err;
871
872 if (fw_mon->size)
873 return 0;
874
875 for (power = max_power; power >= min_power; power--) {
876 size = (1 << power);
877
878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
879 if (err)
880 continue;
881
882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
883 "%s: allocated 0x%08x bytes for firmware monitor.\n",
884 DEVNAME(sc), size);
885 break;
886 }
887
888 if (err) {
889 fw_mon->size = 0;
890 return err;
891 }
892
893 if (power != max_power)
894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
896 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
897 (unsigned long)(1 << (max_power - 10)));
898
899 return 0;
900 }
901
902 static int
903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
904 {
905 if (!max_power) {
906 /* default max_power is maximum */
907 max_power = 26;
908 } else {
909 max_power += 11;
910 }
911
912 if (max_power > 26) {
913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
914 "%s: External buffer size for monitor is too big %d, "
915 "check the FW TLV\n", DEVNAME(sc), max_power);
916 return 0;
917 }
918
919 if (sc->fw_mon.size)
920 return 0;
921
922 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
923 }
924 #endif
925
926 static int
iwx_apply_debug_destination(struct iwx_softc * sc)927 iwx_apply_debug_destination(struct iwx_softc *sc)
928 {
929 #if 0
930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
931 int i, err;
932 uint8_t mon_mode, size_power, base_shift, end_shift;
933 uint32_t base_reg, end_reg;
934
935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
936 mon_mode = dest_v1->monitor_mode;
937 size_power = dest_v1->size_power;
938 base_reg = le32toh(dest_v1->base_reg);
939 end_reg = le32toh(dest_v1->end_reg);
940 base_shift = dest_v1->base_shift;
941 end_shift = dest_v1->end_shift;
942
943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
944
945 if (mon_mode == EXTERNAL_MODE) {
946 err = iwx_alloc_fw_monitor(sc, size_power);
947 if (err)
948 return err;
949 }
950
951 if (!iwx_nic_lock(sc))
952 return EBUSY;
953
954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
955 uint32_t addr, val;
956 uint8_t op;
957
958 addr = le32toh(dest_v1->reg_ops[i].addr);
959 val = le32toh(dest_v1->reg_ops[i].val);
960 op = dest_v1->reg_ops[i].op;
961
962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
963 switch (op) {
964 case CSR_ASSIGN:
965 IWX_WRITE(sc, addr, val);
966 break;
967 case CSR_SETBIT:
968 IWX_SETBITS(sc, addr, (1 << val));
969 break;
970 case CSR_CLEARBIT:
971 IWX_CLRBITS(sc, addr, (1 << val));
972 break;
973 case PRPH_ASSIGN:
974 iwx_write_prph(sc, addr, val);
975 break;
976 case PRPH_SETBIT:
977 err = iwx_set_bits_prph(sc, addr, (1 << val));
978 if (err)
979 return err;
980 break;
981 case PRPH_CLEARBIT:
982 err = iwx_clear_bits_prph(sc, addr, (1 << val));
983 if (err)
984 return err;
985 break;
986 case PRPH_BLOCKBIT:
987 if (iwx_read_prph(sc, addr) & (1 << val))
988 goto monitor;
989 break;
990 default:
991 DPRINTF(("%s: FW debug - unknown OP %d\n",
992 DEVNAME(sc), op));
993 break;
994 }
995 }
996
997 monitor:
998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
999 iwx_write_prph(sc, le32toh(base_reg),
1000 sc->fw_mon.paddr >> base_shift);
1001 iwx_write_prph(sc, end_reg,
1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1003 >> end_shift);
1004 }
1005
1006 iwx_nic_unlock(sc);
1007 return 0;
1008 #else
1009 return 0;
1010 #endif
1011 }
1012
1013 static void
iwx_set_ltr(struct iwx_softc * sc)1014 iwx_set_ltr(struct iwx_softc *sc)
1015 {
1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1027
1028 /*
1029 * To workaround hardware latency issues during the boot process,
1030 * initialize the LTR to ~250 usec (see ltr_val above).
1031 * The firmware initializes this again later (to a smaller value).
1032 */
1033 if (!sc->sc_integrated) {
1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1035 } else if (sc->sc_integrated &&
1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1038 IWX_HPM_MAC_LRT_ENABLE_ALL);
1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1040 }
1041 }
1042
1043 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1045 {
1046 struct iwx_context_info *ctxt_info;
1047 struct iwx_context_info_rbd_cfg *rx_cfg;
1048 uint32_t control_flags = 0;
1049 uint64_t paddr;
1050 int err;
1051
1052 ctxt_info = sc->ctxt_info_dma.vaddr;
1053 memset(ctxt_info, 0, sizeof(*ctxt_info));
1054
1055 ctxt_info->version.version = 0;
1056 ctxt_info->version.mac_id =
1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1058 /* size is in DWs */
1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1060
1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1063
1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1068 ctxt_info->control.control_flags = htole32(control_flags);
1069
1070 /* initialize RX default queue */
1071 rx_cfg = &ctxt_info->rbd_cfg;
1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1075
1076 /* initialize TX command queue */
1077 ctxt_info->hcmd_cfg.cmd_queue_addr =
1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1079 ctxt_info->hcmd_cfg.cmd_queue_size =
1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1081
1082 /* allocate ucode sections in dram and set addresses */
1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1084 if (err) {
1085 iwx_ctxt_info_free_fw_img(sc);
1086 return err;
1087 }
1088
1089 /* Configure debug, if exists */
1090 if (sc->sc_fw.dbg_dest_tlv_v1) {
1091 #if 1
1092 err = iwx_apply_debug_destination(sc);
1093 if (err) {
1094 iwx_ctxt_info_free_fw_img(sc);
1095 return err;
1096 }
1097 #endif
1098 }
1099
1100 /*
1101 * Write the context info DMA base address. The device expects a
1102 * 64-bit address but a simple bus_space_write_8 to this register
1103 * won't work on some devices, such as the AX201.
1104 */
1105 paddr = sc->ctxt_info_dma.paddr;
1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1108
1109 /* kick FW self load */
1110 if (!iwx_nic_lock(sc)) {
1111 iwx_ctxt_info_free_fw_img(sc);
1112 return EBUSY;
1113 }
1114
1115 iwx_set_ltr(sc);
1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1117 iwx_nic_unlock(sc);
1118
1119 /* Context info will be released upon alive or failure to get one */
1120
1121 return 0;
1122 }
1123
1124 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1126 {
1127 struct iwx_context_info_gen3 *ctxt_info_gen3;
1128 struct iwx_prph_scratch *prph_scratch;
1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1130 uint16_t cb_size;
1131 uint32_t control_flags, scratch_size;
1132 uint64_t paddr;
1133 int err;
1134
1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1136 printf("%s: no image loader found in firmware file\n",
1137 DEVNAME(sc));
1138 iwx_ctxt_info_free_fw_img(sc);
1139 return EINVAL;
1140 }
1141
1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1143 sc->sc_fw.iml_len, 1);
1144 if (err) {
1145 printf("%s: could not allocate DMA memory for "
1146 "firmware image loader\n", DEVNAME(sc));
1147 iwx_ctxt_info_free_fw_img(sc);
1148 return ENOMEM;
1149 }
1150
1151 prph_scratch = sc->prph_scratch_dma.vaddr;
1152 memset(prph_scratch, 0, sizeof(*prph_scratch));
1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1154 prph_sc_ctrl->version.version = 0;
1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1157
1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1159 IWX_PRPH_SCRATCH_MTR_MODE |
1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1161 if (sc->sc_imr_enabled)
1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1163 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1164
1165 /* initialize RX default queue */
1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1167 htole64(sc->rxq.free_desc_dma.paddr);
1168
1169 /* allocate ucode sections in dram and set addresses */
1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1171 if (err) {
1172 iwx_dma_contig_free(&sc->iml_dma);
1173 iwx_ctxt_info_free_fw_img(sc);
1174 return err;
1175 }
1176
1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1180 ctxt_info_gen3->prph_scratch_base_addr =
1181 htole64(sc->prph_scratch_dma.paddr);
1182 scratch_size = sizeof(*prph_scratch);
1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1184 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1185 htole64(sc->rxq.stat_dma.paddr);
1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1190 ctxt_info_gen3->mtr_base_addr =
1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1194 ctxt_info_gen3->mtr_size = htole16(cb_size);
1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1196 ctxt_info_gen3->mcr_size = htole16(cb_size);
1197
1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1199
1200 paddr = sc->ctxt_info_dma.paddr;
1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1203
1204 paddr = sc->iml_dma.paddr;
1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1208
1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1210 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1211
1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1213 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1214
1215 /* kick FW self load */
1216 if (!iwx_nic_lock(sc)) {
1217 iwx_dma_contig_free(&sc->iml_dma);
1218 iwx_ctxt_info_free_fw_img(sc);
1219 return EBUSY;
1220 }
1221 iwx_set_ltr(sc);
1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1223 iwx_nic_unlock(sc);
1224
1225 /* Context info will be released upon alive or failure to get one */
1226 return 0;
1227 }
1228
1229 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1231 {
1232 struct iwx_self_init_dram *dram = &sc->init_dram;
1233 int i;
1234
1235 if (!dram->fw)
1236 return;
1237
1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1239 iwx_dma_contig_free(&dram->fw[i]);
1240
1241 free(dram->fw, M_DEVBUF);
1242 dram->lmac_cnt = 0;
1243 dram->umac_cnt = 0;
1244 dram->fw = NULL;
1245 }
1246
1247 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1249 const uint8_t *data, size_t dlen)
1250 {
1251 struct iwx_fw_sects *fws;
1252 struct iwx_fw_onesect *fwone;
1253
1254 if (type >= IWX_UCODE_TYPE_MAX)
1255 return EINVAL;
1256 if (dlen < sizeof(uint32_t))
1257 return EINVAL;
1258
1259 fws = &sc->sc_fw.fw_sects[type];
1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1263 return EINVAL;
1264
1265 fwone = &fws->fw_sect[fws->fw_count];
1266
1267 /* first 32bit are device load offset */
1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1269
1270 /* rest is data */
1271 fwone->fws_data = data + sizeof(uint32_t);
1272 fwone->fws_len = dlen - sizeof(uint32_t);
1273
1274 fws->fw_count++;
1275 fws->fw_totlen += fwone->fws_len;
1276
1277 return 0;
1278 }
1279
1280 #define IWX_DEFAULT_SCAN_CHANNELS 40
1281 /* Newer firmware might support more channels. Raise this value if needed. */
1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1283
1284 struct iwx_tlv_calib_data {
1285 uint32_t ucode_type;
1286 struct iwx_tlv_calib_ctrl calib;
1287 } __packed;
1288
1289 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1291 {
1292 const struct iwx_tlv_calib_data *def_calib = data;
1293 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1294
1295 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1296 return EINVAL;
1297
1298 sc->sc_default_calib[ucode_type].flow_trigger =
1299 def_calib->calib.flow_trigger;
1300 sc->sc_default_calib[ucode_type].event_trigger =
1301 def_calib->calib.event_trigger;
1302
1303 return 0;
1304 }
1305
1306 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1307 iwx_fw_info_free(struct iwx_fw_info *fw)
1308 {
1309 free(fw->fw_rawdata, M_DEVBUF);
1310 fw->fw_rawdata = NULL;
1311 fw->fw_rawsize = 0;
1312 /* don't touch fw->fw_status */
1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1314 free(fw->iml, M_DEVBUF);
1315 fw->iml = NULL;
1316 fw->iml_len = 0;
1317 }
1318
1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1320
1321 static int
iwx_read_firmware(struct iwx_softc * sc)1322 iwx_read_firmware(struct iwx_softc *sc)
1323 {
1324 struct iwx_fw_info *fw = &sc->sc_fw;
1325 const struct iwx_tlv_ucode_header *uhdr;
1326 struct iwx_ucode_tlv tlv;
1327 uint32_t tlv_type;
1328 const uint8_t *data;
1329 int err = 0;
1330 size_t len;
1331 const struct firmware *fwp;
1332
1333 if (fw->fw_status == IWX_FW_STATUS_DONE)
1334 return 0;
1335
1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1337 fwp = firmware_get(sc->sc_fwname);
1338 sc->sc_fwp = fwp;
1339
1340 if (fwp == NULL) {
1341 printf("%s: could not read firmware %s\n",
1342 DEVNAME(sc), sc->sc_fwname);
1343 err = ENOENT;
1344 goto out;
1345 }
1346
1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1349
1350
1351 sc->sc_capaflags = 0;
1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1355 sc->n_cmd_versions = 0;
1356
1357 uhdr = (const void *)(fwp->data);
1358 if (*(const uint32_t *)fwp->data != 0
1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1360 printf("%s: invalid firmware %s\n",
1361 DEVNAME(sc), sc->sc_fwname);
1362 err = EINVAL;
1363 goto out;
1364 }
1365
1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_API(le32toh(uhdr->ver)));
1370
1371 data = uhdr->data;
1372 len = fwp->datasize - sizeof(*uhdr);
1373
1374 while (len >= sizeof(tlv)) {
1375 size_t tlv_len;
1376 const void *tlv_data;
1377
1378 memcpy(&tlv, data, sizeof(tlv));
1379 tlv_len = le32toh(tlv.length);
1380 tlv_type = le32toh(tlv.type);
1381
1382 len -= sizeof(tlv);
1383 data += sizeof(tlv);
1384 tlv_data = data;
1385
1386 if (len < tlv_len) {
1387 printf("%s: firmware too short: %zu bytes\n",
1388 DEVNAME(sc), len);
1389 err = EINVAL;
1390 goto parse_out;
1391 }
1392
1393 switch (tlv_type) {
1394 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1395 if (tlv_len < sizeof(uint32_t)) {
1396 err = EINVAL;
1397 goto parse_out;
1398 }
1399 sc->sc_capa_max_probe_len
1400 = le32toh(*(const uint32_t *)tlv_data);
1401 if (sc->sc_capa_max_probe_len >
1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1403 err = EINVAL;
1404 goto parse_out;
1405 }
1406 break;
1407 case IWX_UCODE_TLV_PAN:
1408 if (tlv_len) {
1409 err = EINVAL;
1410 goto parse_out;
1411 }
1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1413 break;
1414 case IWX_UCODE_TLV_FLAGS:
1415 if (tlv_len < sizeof(uint32_t)) {
1416 err = EINVAL;
1417 goto parse_out;
1418 }
1419 /*
1420 * Apparently there can be many flags, but Linux driver
1421 * parses only the first one, and so do we.
1422 *
1423 * XXX: why does this override IWX_UCODE_TLV_PAN?
1424 * Intentional or a bug? Observations from
1425 * current firmware file:
1426 * 1) TLV_PAN is parsed first
1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1428 * ==> this resets TLV_PAN to itself... hnnnk
1429 */
1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1431 break;
1432 case IWX_UCODE_TLV_CSCHEME:
1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1434 if (err)
1435 goto parse_out;
1436 break;
1437 case IWX_UCODE_TLV_NUM_OF_CPU: {
1438 uint32_t num_cpu;
1439 if (tlv_len != sizeof(uint32_t)) {
1440 err = EINVAL;
1441 goto parse_out;
1442 }
1443 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1444 if (num_cpu < 1 || num_cpu > 2) {
1445 err = EINVAL;
1446 goto parse_out;
1447 }
1448 break;
1449 }
1450 case IWX_UCODE_TLV_SEC_RT:
1451 err = iwx_firmware_store_section(sc,
1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1453 if (err)
1454 goto parse_out;
1455 break;
1456 case IWX_UCODE_TLV_SEC_INIT:
1457 err = iwx_firmware_store_section(sc,
1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1459 if (err)
1460 goto parse_out;
1461 break;
1462 case IWX_UCODE_TLV_SEC_WOWLAN:
1463 err = iwx_firmware_store_section(sc,
1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1465 if (err)
1466 goto parse_out;
1467 break;
1468 case IWX_UCODE_TLV_DEF_CALIB:
1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1470 err = EINVAL;
1471 goto parse_out;
1472 }
1473 err = iwx_set_default_calib(sc, tlv_data);
1474 if (err)
1475 goto parse_out;
1476 break;
1477 case IWX_UCODE_TLV_PHY_SKU:
1478 if (tlv_len != sizeof(uint32_t)) {
1479 err = EINVAL;
1480 goto parse_out;
1481 }
1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1483 break;
1484
1485 case IWX_UCODE_TLV_API_CHANGES_SET: {
1486 const struct iwx_ucode_api *api;
1487 int idx, i;
1488 if (tlv_len != sizeof(*api)) {
1489 err = EINVAL;
1490 goto parse_out;
1491 }
1492 api = (const struct iwx_ucode_api *)tlv_data;
1493 idx = le32toh(api->api_index);
1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1495 err = EINVAL;
1496 goto parse_out;
1497 }
1498 for (i = 0; i < 32; i++) {
1499 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1500 continue;
1501 setbit(sc->sc_ucode_api, i + (32 * idx));
1502 }
1503 break;
1504 }
1505
1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1507 const struct iwx_ucode_capa *capa;
1508 int idx, i;
1509 if (tlv_len != sizeof(*capa)) {
1510 err = EINVAL;
1511 goto parse_out;
1512 }
1513 capa = (const struct iwx_ucode_capa *)tlv_data;
1514 idx = le32toh(capa->api_index);
1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1516 goto parse_out;
1517 }
1518 for (i = 0; i < 32; i++) {
1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1520 continue;
1521 setbit(sc->sc_enabled_capa, i + (32 * idx));
1522 }
1523 break;
1524 }
1525
1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1528 /* ignore, not used by current driver */
1529 break;
1530
1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1532 err = iwx_firmware_store_section(sc,
1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1534 tlv_len);
1535 if (err)
1536 goto parse_out;
1537 break;
1538
1539 case IWX_UCODE_TLV_PAGING:
1540 if (tlv_len != sizeof(uint32_t)) {
1541 err = EINVAL;
1542 goto parse_out;
1543 }
1544 break;
1545
1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1547 if (tlv_len != sizeof(uint32_t)) {
1548 err = EINVAL;
1549 goto parse_out;
1550 }
1551 sc->sc_capa_n_scan_channels =
1552 le32toh(*(const uint32_t *)tlv_data);
1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1554 err = ERANGE;
1555 goto parse_out;
1556 }
1557 break;
1558
1559 case IWX_UCODE_TLV_FW_VERSION:
1560 if (tlv_len != sizeof(uint32_t) * 3) {
1561 err = EINVAL;
1562 goto parse_out;
1563 }
1564
1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1566 le32toh(((const uint32_t *)tlv_data)[0]),
1567 le32toh(((const uint32_t *)tlv_data)[1]),
1568 le32toh(((const uint32_t *)tlv_data)[2]));
1569 break;
1570
1571 case IWX_UCODE_TLV_FW_DBG_DEST: {
1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1573
1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1575 if (*fw->dbg_dest_ver != 0) {
1576 err = EINVAL;
1577 goto parse_out;
1578 }
1579
1580 if (fw->dbg_dest_tlv_init)
1581 break;
1582 fw->dbg_dest_tlv_init = true;
1583
1584 dest_v1 = (const void *)tlv_data;
1585 fw->dbg_dest_tlv_v1 = dest_v1;
1586 fw->n_dest_reg = tlv_len -
1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1590 "%s: found debug dest; n_dest_reg=%d\n",
1591 __func__, fw->n_dest_reg);
1592 break;
1593 }
1594
1595 case IWX_UCODE_TLV_FW_DBG_CONF: {
1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1597
1598 if (!fw->dbg_dest_tlv_init ||
1599 conf->id >= nitems(fw->dbg_conf_tlv) ||
1600 fw->dbg_conf_tlv[conf->id] != NULL)
1601 break;
1602
1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1604 "Found debug configuration: %d\n", conf->id);
1605 fw->dbg_conf_tlv[conf->id] = conf;
1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1607 break;
1608 }
1609
1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1611 const struct iwx_umac_debug_addrs *dbg_ptrs =
1612 (const void *)tlv_data;
1613
1614 if (tlv_len != sizeof(*dbg_ptrs)) {
1615 err = EINVAL;
1616 goto parse_out;
1617 }
1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1619 break;
1620 sc->sc_uc.uc_umac_error_event_table =
1621 le32toh(dbg_ptrs->error_info_addr) &
1622 ~IWX_FW_ADDR_CACHE_CONTROL;
1623 sc->sc_uc.error_event_table_tlv_status |=
1624 IWX_ERROR_EVENT_TABLE_UMAC;
1625 break;
1626 }
1627
1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1629 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1630 (const void *)tlv_data;
1631
1632 if (tlv_len != sizeof(*dbg_ptrs)) {
1633 err = EINVAL;
1634 goto parse_out;
1635 }
1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1637 break;
1638 sc->sc_uc.uc_lmac_error_event_table[0] =
1639 le32toh(dbg_ptrs->error_event_table_ptr) &
1640 ~IWX_FW_ADDR_CACHE_CONTROL;
1641 sc->sc_uc.error_event_table_tlv_status |=
1642 IWX_ERROR_EVENT_TABLE_LMAC1;
1643 break;
1644 }
1645
1646 case IWX_UCODE_TLV_FW_MEM_SEG:
1647 break;
1648
1649 case IWX_UCODE_TLV_IML:
1650 if (sc->sc_fw.iml != NULL) {
1651 free(fw->iml, M_DEVBUF);
1652 fw->iml_len = 0;
1653 }
1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1655 M_WAITOK | M_ZERO);
1656 if (sc->sc_fw.iml == NULL) {
1657 err = ENOMEM;
1658 goto parse_out;
1659 }
1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1661 sc->sc_fw.iml_len = tlv_len;
1662 break;
1663
1664 case IWX_UCODE_TLV_CMD_VERSIONS:
1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1666 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1667 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1668 }
1669 if (sc->n_cmd_versions != 0) {
1670 err = EINVAL;
1671 goto parse_out;
1672 }
1673 if (tlv_len > sizeof(sc->cmd_versions)) {
1674 err = EINVAL;
1675 goto parse_out;
1676 }
1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1679 break;
1680
1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1682 break;
1683
1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1686 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1687 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1688 break;
1689
1690 /* undocumented TLVs found in iwx-cc-a0-46 image */
1691 case 58:
1692 case 0x1000003:
1693 case 0x1000004:
1694 break;
1695
1696 /* undocumented TLVs found in iwx-cc-a0-48 image */
1697 case 0x1000000:
1698 case 0x1000002:
1699 break;
1700
1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1703 case IWX_UCODE_TLV_TYPE_HCMD:
1704 case IWX_UCODE_TLV_TYPE_REGIONS:
1705 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1706 case IWX_UCODE_TLV_TYPE_CONF_SET:
1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1709 case IWX_UCODE_TLV_CURRENT_PC:
1710 break;
1711
1712 /* undocumented TLV found in iwx-cc-a0-67 image */
1713 case 0x100000b:
1714 break;
1715
1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1717 case 0x101:
1718 break;
1719
1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1721 case 0x100000c:
1722 break;
1723
1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1725 case 69:
1726 break;
1727
1728 default:
1729 err = EINVAL;
1730 goto parse_out;
1731 }
1732
1733 /*
1734 * Check for size_t overflow and ignore missing padding at
1735 * end of firmware file.
1736 */
1737 if (roundup(tlv_len, 4) > len)
1738 break;
1739
1740 len -= roundup(tlv_len, 4);
1741 data += roundup(tlv_len, 4);
1742 }
1743
1744 KASSERT(err == 0, ("unhandled fw parse error"));
1745
1746 parse_out:
1747 if (err) {
1748 printf("%s: firmware parse error %d, "
1749 "section type %d\n", DEVNAME(sc), err, tlv_type);
1750 }
1751
1752 out:
1753 if (err) {
1754 fw->fw_status = IWX_FW_STATUS_NONE;
1755 if (fw->fw_rawdata != NULL)
1756 iwx_fw_info_free(fw);
1757 } else
1758 fw->fw_status = IWX_FW_STATUS_DONE;
1759 return err;
1760 }
1761
1762 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1763 iwx_prph_addr_mask(struct iwx_softc *sc)
1764 {
1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1766 return 0x00ffffff;
1767 else
1768 return 0x000fffff;
1769 }
1770
1771 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1773 {
1774 uint32_t mask = iwx_prph_addr_mask(sc);
1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1776 IWX_BARRIER_READ_WRITE(sc);
1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1778 }
1779
1780 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1782 {
1783 iwx_nic_assert_locked(sc);
1784 return iwx_read_prph_unlocked(sc, addr);
1785 }
1786
1787 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1789 {
1790 uint32_t mask = iwx_prph_addr_mask(sc);
1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1792 IWX_BARRIER_WRITE(sc);
1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1794 }
1795
1796 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1798 {
1799 iwx_nic_assert_locked(sc);
1800 iwx_write_prph_unlocked(sc, addr, val);
1801 }
1802
1803 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1805 {
1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1807 }
1808
1809 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1811 {
1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1813 }
1814
1815 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1817 {
1818 int offs, err = 0;
1819 uint32_t *vals = buf;
1820
1821 if (iwx_nic_lock(sc)) {
1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1823 for (offs = 0; offs < dwords; offs++)
1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1825 iwx_nic_unlock(sc);
1826 } else {
1827 err = EBUSY;
1828 }
1829 return err;
1830 }
1831
1832 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1834 int timo)
1835 {
1836 for (;;) {
1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1838 return 1;
1839 }
1840 if (timo < 10) {
1841 return 0;
1842 }
1843 timo -= 10;
1844 DELAY(10);
1845 }
1846 }
1847
1848 static int
iwx_nic_lock(struct iwx_softc * sc)1849 iwx_nic_lock(struct iwx_softc *sc)
1850 {
1851 if (sc->sc_nic_locks > 0) {
1852 iwx_nic_assert_locked(sc);
1853 sc->sc_nic_locks++;
1854 return 1; /* already locked */
1855 }
1856
1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1859
1860 DELAY(2);
1861
1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1866 sc->sc_nic_locks++;
1867 return 1;
1868 }
1869
1870 printf("%s: acquiring device failed\n", DEVNAME(sc));
1871 return 0;
1872 }
1873
1874 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1875 iwx_nic_assert_locked(struct iwx_softc *sc)
1876 {
1877 if (sc->sc_nic_locks <= 0)
1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1879 }
1880
1881 static void
iwx_nic_unlock(struct iwx_softc * sc)1882 iwx_nic_unlock(struct iwx_softc *sc)
1883 {
1884 if (sc->sc_nic_locks > 0) {
1885 if (--sc->sc_nic_locks == 0)
1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1888 } else
1889 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1890 }
1891
1892 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1894 uint32_t mask)
1895 {
1896 uint32_t val;
1897
1898 if (iwx_nic_lock(sc)) {
1899 val = iwx_read_prph(sc, reg) & mask;
1900 val |= bits;
1901 iwx_write_prph(sc, reg, val);
1902 iwx_nic_unlock(sc);
1903 return 0;
1904 }
1905 return EBUSY;
1906 }
1907
1908 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1910 {
1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1912 }
1913
1914 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1916 {
1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1918 }
1919
1920 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1922 {
1923 if (error != 0)
1924 return;
1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1926 *(bus_addr_t *)arg = segs[0].ds_addr;
1927 }
1928
1929 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1931 bus_size_t size, bus_size_t alignment)
1932 {
1933 int error;
1934
1935 dma->tag = NULL;
1936 dma->map = NULL;
1937 dma->size = size;
1938 dma->vaddr = NULL;
1939
1940 error = bus_dma_tag_create(tag, alignment,
1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1942 1, size, 0, NULL, NULL, &dma->tag);
1943 if (error != 0)
1944 goto fail;
1945
1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1948 if (error != 0)
1949 goto fail;
1950
1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1953 if (error != 0) {
1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1955 dma->vaddr = NULL;
1956 goto fail;
1957 }
1958
1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1960
1961 return 0;
1962
1963 fail:
1964 iwx_dma_contig_free(dma);
1965 return error;
1966 }
1967
1968 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1969 iwx_dma_contig_free(struct iwx_dma_info *dma)
1970 {
1971 if (dma->vaddr != NULL) {
1972 bus_dmamap_sync(dma->tag, dma->map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->tag, dma->map);
1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1976 dma->vaddr = NULL;
1977 }
1978 if (dma->tag != NULL) {
1979 bus_dma_tag_destroy(dma->tag);
1980 dma->tag = NULL;
1981 }
1982 }
1983
1984 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 bus_size_t size;
1988 int i, err;
1989
1990 ring->cur = 0;
1991
1992 /* Allocate RX descriptors (256-byte aligned). */
1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1994 size = sizeof(struct iwx_rx_transfer_desc);
1995 else
1996 size = sizeof(uint64_t);
1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1998 size * IWX_RX_MQ_RING_COUNT, 256);
1999 if (err) {
2000 device_printf(sc->sc_dev,
2001 "could not allocate RX ring DMA memory\n");
2002 goto fail;
2003 }
2004 ring->desc = ring->free_desc_dma.vaddr;
2005
2006 /* Allocate RX status area (16-byte aligned). */
2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2008 size = sizeof(uint16_t);
2009 else
2010 size = sizeof(*ring->stat);
2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2012 if (err) {
2013 device_printf(sc->sc_dev,
2014 "could not allocate RX status DMA memory\n");
2015 goto fail;
2016 }
2017 ring->stat = ring->stat_dma.vaddr;
2018
2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2020 size = sizeof(struct iwx_rx_completion_desc);
2021 else
2022 size = sizeof(uint32_t);
2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2024 size * IWX_RX_MQ_RING_COUNT, 256);
2025 if (err) {
2026 device_printf(sc->sc_dev,
2027 "could not allocate RX ring DMA memory\n");
2028 goto fail;
2029 }
2030
2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2033 0, NULL, NULL, &ring->data_dmat);
2034
2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2036 struct iwx_rx_data *data = &ring->data[i];
2037
2038 memset(data, 0, sizeof(*data));
2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (err) {
2041 device_printf(sc->sc_dev,
2042 "could not create RX buf DMA map\n");
2043 goto fail;
2044 }
2045
2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2047 if (err)
2048 goto fail;
2049 }
2050 return 0;
2051
2052 fail: iwx_free_rx_ring(sc, ring);
2053 return err;
2054 }
2055
2056 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2057 iwx_disable_rx_dma(struct iwx_softc *sc)
2058 {
2059 int ntries;
2060
2061 if (iwx_nic_lock(sc)) {
2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2064 else
2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2066 for (ntries = 0; ntries < 1000; ntries++) {
2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 if (iwx_read_umac_prph(sc,
2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2070 break;
2071 } else {
2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2073 IWX_RXF_DMA_IDLE)
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078 iwx_nic_unlock(sc);
2079 }
2080 }
2081
2082 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2084 {
2085 ring->cur = 0;
2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2087 BUS_DMASYNC_PREWRITE);
2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2089 uint16_t *status = sc->rxq.stat_dma.vaddr;
2090 *status = 0;
2091 } else
2092 memset(ring->stat, 0, sizeof(*ring->stat));
2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2094 BUS_DMASYNC_POSTWRITE);
2095
2096 }
2097
2098 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2100 {
2101 int i;
2102
2103 iwx_dma_contig_free(&ring->free_desc_dma);
2104 iwx_dma_contig_free(&ring->stat_dma);
2105 iwx_dma_contig_free(&ring->used_desc_dma);
2106
2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2108 struct iwx_rx_data *data = &ring->data[i];
2109 if (data->m != NULL) {
2110 bus_dmamap_sync(ring->data_dmat, data->map,
2111 BUS_DMASYNC_POSTREAD);
2112 bus_dmamap_unload(ring->data_dmat, data->map);
2113 m_freem(data->m);
2114 data->m = NULL;
2115 }
2116 if (data->map != NULL) {
2117 bus_dmamap_destroy(ring->data_dmat, data->map);
2118 data->map = NULL;
2119 }
2120 }
2121 if (ring->data_dmat != NULL) {
2122 bus_dma_tag_destroy(ring->data_dmat);
2123 ring->data_dmat = NULL;
2124 }
2125 }
2126
2127 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2129 {
2130 bus_addr_t paddr;
2131 bus_size_t size;
2132 int i, err;
2133 size_t bc_tbl_size;
2134 bus_size_t bc_align;
2135 size_t mapsize;
2136
2137 ring->qid = qid;
2138 ring->queued = 0;
2139 ring->cur = 0;
2140 ring->cur_hw = 0;
2141 ring->tail = 0;
2142 ring->tail_hw = 0;
2143
2144 /* Allocate TX descriptors (256-byte aligned). */
2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2147 if (err) {
2148 device_printf(sc->sc_dev,
2149 "could not allocate TX ring DMA memory\n");
2150 goto fail;
2151 }
2152 ring->desc = ring->desc_dma.vaddr;
2153
2154 /*
2155 * The hardware supports up to 512 Tx rings which is more
2156 * than we currently need.
2157 *
2158 * In DQA mode we use 1 command queue + 1 default queue for
2159 * management, control, and non-QoS data frames.
2160 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2161 *
2162 * Tx aggregation requires additional queues, one queue per TID for
2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2164 * Firmware may assign its own internal IDs for these queues
2165 * depending on which TID gets aggregation enabled first.
2166 * The driver maintains a table mapping driver-side queue IDs
2167 * to firmware-side queue IDs.
2168 */
2169
2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2173 bc_align = 128;
2174 } else {
2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2176 bc_align = 64;
2177 }
2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2179 bc_align);
2180 if (err) {
2181 device_printf(sc->sc_dev,
2182 "could not allocate byte count table DMA memory\n");
2183 goto fail;
2184 }
2185
2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2188 IWX_FIRST_TB_SIZE_ALIGN);
2189 if (err) {
2190 device_printf(sc->sc_dev,
2191 "could not allocate cmd DMA memory\n");
2192 goto fail;
2193 }
2194 ring->cmd = ring->cmd_dma.vaddr;
2195
2196 /* FW commands may require more mapped space than packets. */
2197 if (qid == IWX_DQA_CMD_QUEUE)
2198 mapsize = (sizeof(struct iwx_cmd_header) +
2199 IWX_MAX_CMD_PAYLOAD_SIZE);
2200 else
2201 mapsize = MCLBYTES;
2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2204 mapsize, 0, NULL, NULL, &ring->data_dmat);
2205
2206 paddr = ring->cmd_dma.paddr;
2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2208 struct iwx_tx_data *data = &ring->data[i];
2209
2210 data->cmd_paddr = paddr;
2211 paddr += sizeof(struct iwx_device_cmd);
2212
2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2214 if (err) {
2215 device_printf(sc->sc_dev,
2216 "could not create TX buf DMA map\n");
2217 goto fail;
2218 }
2219 }
2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2221 return 0;
2222
2223 fail:
2224 return err;
2225 }
2226
2227 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2229 {
2230 int i;
2231
2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2233 struct iwx_tx_data *data = &ring->data[i];
2234
2235 if (data->m != NULL) {
2236 bus_dmamap_sync(ring->data_dmat, data->map,
2237 BUS_DMASYNC_POSTWRITE);
2238 bus_dmamap_unload(ring->data_dmat, data->map);
2239 m_freem(data->m);
2240 data->m = NULL;
2241 }
2242 }
2243
2244 /* Clear byte count table. */
2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2246
2247 /* Clear TX descriptors. */
2248 memset(ring->desc, 0, ring->desc_dma.size);
2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2250 BUS_DMASYNC_PREWRITE);
2251 sc->qfullmsk &= ~(1 << ring->qid);
2252 sc->qenablemsk &= ~(1 << ring->qid);
2253 for (i = 0; i < nitems(sc->aggqid); i++) {
2254 if (sc->aggqid[i] == ring->qid) {
2255 sc->aggqid[i] = 0;
2256 break;
2257 }
2258 }
2259 ring->queued = 0;
2260 ring->cur = 0;
2261 ring->cur_hw = 0;
2262 ring->tail = 0;
2263 ring->tail_hw = 0;
2264 ring->tid = 0;
2265 }
2266
2267 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2269 {
2270 int i;
2271
2272 iwx_dma_contig_free(&ring->desc_dma);
2273 iwx_dma_contig_free(&ring->cmd_dma);
2274 iwx_dma_contig_free(&ring->bc_tbl);
2275
2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2277 struct iwx_tx_data *data = &ring->data[i];
2278
2279 if (data->m != NULL) {
2280 bus_dmamap_sync(ring->data_dmat, data->map,
2281 BUS_DMASYNC_POSTWRITE);
2282 bus_dmamap_unload(ring->data_dmat, data->map);
2283 m_freem(data->m);
2284 data->m = NULL;
2285 }
2286 if (data->map != NULL) {
2287 bus_dmamap_destroy(ring->data_dmat, data->map);
2288 data->map = NULL;
2289 }
2290 }
2291 if (ring->data_dmat != NULL) {
2292 bus_dma_tag_destroy(ring->data_dmat);
2293 ring->data_dmat = NULL;
2294 }
2295 }
2296
2297 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2298 iwx_enable_rfkill_int(struct iwx_softc *sc)
2299 {
2300 if (!sc->sc_msix) {
2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2303 } else {
2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2305 sc->sc_fh_init_mask);
2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2309 }
2310
2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2313 }
2314
2315 static int
iwx_check_rfkill(struct iwx_softc * sc)2316 iwx_check_rfkill(struct iwx_softc *sc)
2317 {
2318 uint32_t v;
2319 int rv;
2320
2321 /*
2322 * "documentation" is not really helpful here:
2323 * 27: HW_RF_KILL_SW
2324 * Indicates state of (platform's) hardware RF-Kill switch
2325 *
2326 * But apparently when it's off, it's on ...
2327 */
2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2330 if (rv) {
2331 sc->sc_flags |= IWX_FLAG_RFKILL;
2332 } else {
2333 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2334 }
2335
2336 return rv;
2337 }
2338
2339 static void
iwx_enable_interrupts(struct iwx_softc * sc)2340 iwx_enable_interrupts(struct iwx_softc *sc)
2341 {
2342 if (!sc->sc_msix) {
2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2345 } else {
2346 /*
2347 * fh/hw_mask keeps all the unmasked causes.
2348 * Unlike msi, in msix cause is enabled when it is unset.
2349 */
2350 sc->sc_hw_mask = sc->sc_hw_init_mask;
2351 sc->sc_fh_mask = sc->sc_fh_init_mask;
2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2353 ~sc->sc_fh_mask);
2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2355 ~sc->sc_hw_mask);
2356 }
2357 }
2358
2359 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2361 {
2362 if (!sc->sc_msix) {
2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2365 } else {
2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2369 /*
2370 * Leave all the FH causes enabled to get the ALIVE
2371 * notification.
2372 */
2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2374 ~sc->sc_fh_init_mask);
2375 sc->sc_fh_mask = sc->sc_fh_init_mask;
2376 }
2377 }
2378
2379 #if 0
2380 static void
2381 iwx_restore_interrupts(struct iwx_softc *sc)
2382 {
2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2384 }
2385 #endif
2386
2387 static void
iwx_disable_interrupts(struct iwx_softc * sc)2388 iwx_disable_interrupts(struct iwx_softc *sc)
2389 {
2390 if (!sc->sc_msix) {
2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2392
2393 /* acknowledge all interrupts */
2394 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2396 } else {
2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2398 sc->sc_fh_init_mask);
2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2400 sc->sc_hw_init_mask);
2401 }
2402 }
2403
2404 static void
iwx_ict_reset(struct iwx_softc * sc)2405 iwx_ict_reset(struct iwx_softc *sc)
2406 {
2407 iwx_disable_interrupts(sc);
2408
2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2410 sc->ict_cur = 0;
2411
2412 /* Set physical address of ICT (4KB aligned). */
2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2414 IWX_CSR_DRAM_INT_TBL_ENABLE
2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2418
2419 /* Switch to ICT interrupt mode in driver. */
2420 sc->sc_flags |= IWX_FLAG_USE_ICT;
2421
2422 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2423 iwx_enable_interrupts(sc);
2424 }
2425
2426 #define IWX_HW_READY_TIMEOUT 50
2427 static int
iwx_set_hw_ready(struct iwx_softc * sc)2428 iwx_set_hw_ready(struct iwx_softc *sc)
2429 {
2430 int ready;
2431
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2434
2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_HW_READY_TIMEOUT);
2439 if (ready)
2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2442
2443 DPRINTF(("%s: ready=%d\n", __func__, ready));
2444 return ready;
2445 }
2446 #undef IWX_HW_READY_TIMEOUT
2447
2448 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2449 iwx_prepare_card_hw(struct iwx_softc *sc)
2450 {
2451 int t = 0;
2452 int ntries;
2453
2454 if (iwx_set_hw_ready(sc))
2455 return 0;
2456
2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2459 DELAY(1000);
2460
2461 for (ntries = 0; ntries < 10; ntries++) {
2462 /* If HW is not ready, prepare the conditions to check again */
2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2465
2466 do {
2467 if (iwx_set_hw_ready(sc))
2468 return 0;
2469 DELAY(200);
2470 t += 200;
2471 } while (t < 150000);
2472 DELAY(25000);
2473 }
2474
2475 return ETIMEDOUT;
2476 }
2477
2478 static int
iwx_force_power_gating(struct iwx_softc * sc)2479 iwx_force_power_gating(struct iwx_softc *sc)
2480 {
2481 int err;
2482
2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2485 if (err)
2486 return err;
2487 DELAY(20);
2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2491 if (err)
2492 return err;
2493 DELAY(20);
2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2496 return err;
2497 }
2498
2499 static void
iwx_apm_config(struct iwx_softc * sc)2500 iwx_apm_config(struct iwx_softc *sc)
2501 {
2502 uint16_t lctl, cap;
2503 int pcie_ptr;
2504 int error;
2505
2506 /*
2507 * L0S states have been found to be unstable with our devices
2508 * and in newer hardware they are not officially supported at
2509 * all, so we must always set the L0S_DISABLED bit.
2510 */
2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2512
2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2514 if (error != 0) {
2515 printf("can't fill pcie_ptr\n");
2516 return;
2517 }
2518
2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2520 sizeof(lctl));
2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2523 #define PCI_PCIE_DCSR2 0x28
2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2525 sizeof(lctl));
2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2530 DEVNAME(sc),
2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2532 sc->sc_ltr_enabled ? "En" : "Dis"));
2533 #undef PCI_PCIE_LCSR_ASPM_L0S
2534 #undef PCI_PCIE_DCSR2
2535 #undef PCI_PCIE_DCSR2_LTREN
2536 #undef PCI_PCIE_LCSR_ASPM_L1
2537 }
2538
2539 /*
2540 * Start up NIC's basic functionality after it has been reset
2541 * e.g. after platform boot or shutdown.
2542 * NOTE: This does not load uCode nor start the embedded processor
2543 */
2544 static int
iwx_apm_init(struct iwx_softc * sc)2545 iwx_apm_init(struct iwx_softc *sc)
2546 {
2547 int err = 0;
2548
2549 /*
2550 * Disable L0s without affecting L1;
2551 * don't wait for ICH L0s (ICH bug W/A)
2552 */
2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2555
2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2558
2559 /*
2560 * Enable HAP INTA (interrupt from management bus) to
2561 * wake device's PCI Express link L1a -> L0s
2562 */
2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2565
2566 iwx_apm_config(sc);
2567
2568 /*
2569 * Set "initialization complete" bit to move adapter from
2570 * D0U* --> D0A* (powered-up active) state.
2571 */
2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2573
2574 /*
2575 * Wait for clock stabilization; once stabilized, access to
2576 * device-internal resources is supported, e.g. iwx_write_prph()
2577 * and accesses to uCode SRAM.
2578 */
2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2582 printf("%s: timeout waiting for clock stabilization\n",
2583 DEVNAME(sc));
2584 err = ETIMEDOUT;
2585 goto out;
2586 }
2587 out:
2588 if (err)
2589 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2590 return err;
2591 }
2592
2593 static void
iwx_apm_stop(struct iwx_softc * sc)2594 iwx_apm_stop(struct iwx_softc *sc)
2595 {
2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2601 DELAY(1000);
2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2604 DELAY(5000);
2605
2606 /* stop device's busmaster DMA activity */
2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2608
2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2613
2614 /*
2615 * Clear "initialization complete" bit to move adapter from
2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2617 */
2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2620 }
2621
2622 static void
iwx_init_msix_hw(struct iwx_softc * sc)2623 iwx_init_msix_hw(struct iwx_softc *sc)
2624 {
2625 iwx_conf_msix_hw(sc, 0);
2626
2627 if (!sc->sc_msix)
2628 return;
2629
2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2631 sc->sc_fh_mask = sc->sc_fh_init_mask;
2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2633 sc->sc_hw_mask = sc->sc_hw_init_mask;
2634 }
2635
2636 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2638 {
2639 int vector = 0;
2640
2641 if (!sc->sc_msix) {
2642 /* Newer chips default to MSIX. */
2643 if (!stopped && iwx_nic_lock(sc)) {
2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2645 IWX_UREG_CHICK_MSI_ENABLE);
2646 iwx_nic_unlock(sc);
2647 }
2648 return;
2649 }
2650
2651 if (!stopped && iwx_nic_lock(sc)) {
2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2653 IWX_UREG_CHICK_MSIX_ENABLE);
2654 iwx_nic_unlock(sc);
2655 }
2656
2657 /* Disable all interrupts */
2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2660
2661 /* Map fallback-queue (command/mgmt) to a single vector */
2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2664 /* Map RSS queue (data) to the same vector */
2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2667
2668 /* Enable the RX queues cause interrupts */
2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2671
2672 /* Map non-RX causes to the same vector */
2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2703
2704 /* Enable non-RX causes interrupts */
2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_S2D |
2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2722 }
2723
2724 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2725 iwx_clear_persistence_bit(struct iwx_softc *sc)
2726 {
2727 uint32_t hpm, wprot;
2728
2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2732 if (wprot & IWX_PREG_WFPM_ACCESS) {
2733 printf("%s: cannot clear persistence bit\n",
2734 DEVNAME(sc));
2735 return EPERM;
2736 }
2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2738 hpm & ~IWX_PERSISTENCE_BIT);
2739 }
2740
2741 return 0;
2742 }
2743
2744 static int
iwx_start_hw(struct iwx_softc * sc)2745 iwx_start_hw(struct iwx_softc *sc)
2746 {
2747 int err;
2748
2749 err = iwx_prepare_card_hw(sc);
2750 if (err)
2751 return err;
2752
2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2754 err = iwx_clear_persistence_bit(sc);
2755 if (err)
2756 return err;
2757 }
2758
2759 /* Reset the entire device */
2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2761 DELAY(5000);
2762
2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2764 sc->sc_integrated) {
2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2767 DELAY(20);
2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2771 printf("%s: timeout waiting for clock stabilization\n",
2772 DEVNAME(sc));
2773 return ETIMEDOUT;
2774 }
2775
2776 err = iwx_force_power_gating(sc);
2777 if (err)
2778 return err;
2779
2780 /* Reset the entire device */
2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2782 DELAY(5000);
2783 }
2784
2785 err = iwx_apm_init(sc);
2786 if (err)
2787 return err;
2788
2789 iwx_init_msix_hw(sc);
2790
2791 iwx_enable_rfkill_int(sc);
2792 iwx_check_rfkill(sc);
2793
2794 return 0;
2795 }
2796
2797 static void
iwx_stop_device(struct iwx_softc * sc)2798 iwx_stop_device(struct iwx_softc *sc)
2799 {
2800 int i;
2801
2802 iwx_disable_interrupts(sc);
2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2804
2805 iwx_disable_rx_dma(sc);
2806 iwx_reset_rx_ring(sc, &sc->rxq);
2807 for (i = 0; i < nitems(sc->txq); i++)
2808 iwx_reset_tx_ring(sc, &sc->txq[i]);
2809 #if 0
2810 /* XXX-THJ: Tidy up BA state on stop */
2811 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2813 if (ba->ba_state != IEEE80211_BA_AGREED)
2814 continue;
2815 ieee80211_delba_request(ic, ni, 0, 1, i);
2816 }
2817 #endif
2818 /* Make sure (redundant) we've released our request to stay awake */
2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2821 if (sc->sc_nic_locks > 0)
2822 printf("%s: %d active NIC locks forcefully cleared\n",
2823 DEVNAME(sc), sc->sc_nic_locks);
2824 sc->sc_nic_locks = 0;
2825
2826 /* Stop the device, and put it in low power state */
2827 iwx_apm_stop(sc);
2828
2829 /* Reset the on-board processor. */
2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2831 DELAY(5000);
2832
2833 /*
2834 * Upon stop, the IVAR table gets erased, so msi-x won't
2835 * work. This causes a bug in RF-KILL flows, since the interrupt
2836 * that enables radio won't fire on the correct irq, and the
2837 * driver won't be able to handle the interrupt.
2838 * Configure the IVAR table again after reset.
2839 */
2840 iwx_conf_msix_hw(sc, 1);
2841
2842 /*
2843 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2844 * Clear the interrupt again.
2845 */
2846 iwx_disable_interrupts(sc);
2847
2848 /* Even though we stop the HW we still want the RF kill interrupt. */
2849 iwx_enable_rfkill_int(sc);
2850 iwx_check_rfkill(sc);
2851
2852 iwx_prepare_card_hw(sc);
2853
2854 iwx_ctxt_info_free_paging(sc);
2855 iwx_dma_contig_free(&sc->pnvm_dma);
2856 }
2857
2858 static void
iwx_nic_config(struct iwx_softc * sc)2859 iwx_nic_config(struct iwx_softc *sc)
2860 {
2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2862 uint32_t mask, val, reg_val = 0;
2863
2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2867 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2869 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2870
2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2875
2876 /* radio configuration */
2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2880
2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2888
2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2890 val &= ~mask;
2891 val |= reg_val;
2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2893 }
2894
2895 static int
iwx_nic_rx_init(struct iwx_softc * sc)2896 iwx_nic_rx_init(struct iwx_softc *sc)
2897 {
2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2899
2900 /*
2901 * We don't configure the RFH; the firmware will do that.
2902 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2903 */
2904 return 0;
2905 }
2906
2907 static int
iwx_nic_init(struct iwx_softc * sc)2908 iwx_nic_init(struct iwx_softc *sc)
2909 {
2910 int err;
2911
2912 iwx_apm_init(sc);
2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2914 iwx_nic_config(sc);
2915
2916 err = iwx_nic_rx_init(sc);
2917 if (err)
2918 return err;
2919
2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2921
2922 return 0;
2923 }
2924
2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2926 const uint8_t iwx_ac_to_tx_fifo[] = {
2927 IWX_GEN2_EDCA_TX_FIFO_BE,
2928 IWX_GEN2_EDCA_TX_FIFO_BK,
2929 IWX_GEN2_EDCA_TX_FIFO_VI,
2930 IWX_GEN2_EDCA_TX_FIFO_VO,
2931 };
2932
2933 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2935 int num_slots)
2936 {
2937 struct iwx_rx_packet *pkt;
2938 struct iwx_tx_queue_cfg_rsp *resp;
2939 struct iwx_tx_queue_cfg_cmd cmd_v0;
2940 struct iwx_scd_queue_cfg_cmd cmd_v3;
2941 struct iwx_host_cmd hcmd = {
2942 .flags = IWX_CMD_WANT_RESP,
2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2944 };
2945 struct iwx_tx_ring *ring = &sc->txq[qid];
2946 int err, fwqid, cmd_ver;
2947 uint32_t wr_idx;
2948 size_t resp_len;
2949
2950 DPRINTF(("%s: tid=%i\n", __func__, tid));
2951 DPRINTF(("%s: qid=%i\n", __func__, qid));
2952 iwx_reset_tx_ring(sc, ring);
2953
2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2955 IWX_SCD_QUEUE_CONFIG_CMD);
2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2957 memset(&cmd_v0, 0, sizeof(cmd_v0));
2958 cmd_v0.sta_id = sta_id;
2959 cmd_v0.tid = tid;
2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2964 hcmd.id = IWX_SCD_QUEUE_CFG;
2965 hcmd.data[0] = &cmd_v0;
2966 hcmd.len[0] = sizeof(cmd_v0);
2967 } else if (cmd_ver == 3) {
2968 memset(&cmd_v3, 0, sizeof(cmd_v3));
2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2973 cmd_v3.u.add.flags = htole32(0);
2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2975 cmd_v3.u.add.tid = tid;
2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2977 IWX_SCD_QUEUE_CONFIG_CMD);
2978 hcmd.data[0] = &cmd_v3;
2979 hcmd.len[0] = sizeof(cmd_v3);
2980 } else {
2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2982 DEVNAME(sc), cmd_ver);
2983 return ENOTSUP;
2984 }
2985
2986 err = iwx_send_cmd(sc, &hcmd);
2987 if (err)
2988 return err;
2989
2990 pkt = hcmd.resp_pkt;
2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2992 err = EIO;
2993 goto out;
2994 }
2995
2996 resp_len = iwx_rx_packet_payload_len(pkt);
2997 if (resp_len != sizeof(*resp)) {
2998 err = EIO;
2999 goto out;
3000 }
3001
3002 resp = (void *)pkt->data;
3003 fwqid = le16toh(resp->queue_number);
3004 wr_idx = le16toh(resp->write_pointer);
3005
3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3007 if (fwqid != qid) {
3008 DPRINTF(("%s: === fwqid != qid\n", __func__));
3009 err = EIO;
3010 goto out;
3011 }
3012
3013 if (wr_idx != ring->cur_hw) {
3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3015 err = EIO;
3016 goto out;
3017 }
3018
3019 sc->qenablemsk |= (1 << qid);
3020 ring->tid = tid;
3021 out:
3022 iwx_free_resp(sc, &hcmd);
3023 return err;
3024 }
3025
3026 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3028 {
3029 struct iwx_rx_packet *pkt;
3030 struct iwx_tx_queue_cfg_rsp *resp;
3031 struct iwx_tx_queue_cfg_cmd cmd_v0;
3032 struct iwx_scd_queue_cfg_cmd cmd_v3;
3033 struct iwx_host_cmd hcmd = {
3034 .flags = IWX_CMD_WANT_RESP,
3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3036 };
3037 struct iwx_tx_ring *ring = &sc->txq[qid];
3038 int err, cmd_ver;
3039
3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3041 IWX_SCD_QUEUE_CONFIG_CMD);
3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3043 memset(&cmd_v0, 0, sizeof(cmd_v0));
3044 cmd_v0.sta_id = sta_id;
3045 cmd_v0.tid = tid;
3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3047 cmd_v0.cb_size = htole32(0);
3048 cmd_v0.byte_cnt_addr = htole64(0);
3049 cmd_v0.tfdq_addr = htole64(0);
3050 hcmd.id = IWX_SCD_QUEUE_CFG;
3051 hcmd.data[0] = &cmd_v0;
3052 hcmd.len[0] = sizeof(cmd_v0);
3053 } else if (cmd_ver == 3) {
3054 memset(&cmd_v3, 0, sizeof(cmd_v3));
3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3057 cmd_v3.u.remove.tid = tid;
3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3059 IWX_SCD_QUEUE_CONFIG_CMD);
3060 hcmd.data[0] = &cmd_v3;
3061 hcmd.len[0] = sizeof(cmd_v3);
3062 } else {
3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3064 DEVNAME(sc), cmd_ver);
3065 return ENOTSUP;
3066 }
3067
3068 err = iwx_send_cmd(sc, &hcmd);
3069 if (err)
3070 return err;
3071
3072 pkt = hcmd.resp_pkt;
3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3074 err = EIO;
3075 goto out;
3076 }
3077
3078 sc->qenablemsk &= ~(1 << qid);
3079 iwx_reset_tx_ring(sc, ring);
3080 out:
3081 iwx_free_resp(sc, &hcmd);
3082 return err;
3083 }
3084
3085 static void
iwx_post_alive(struct iwx_softc * sc)3086 iwx_post_alive(struct iwx_softc *sc)
3087 {
3088 int txcmd_ver;
3089
3090 iwx_ict_reset(sc);
3091
3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3094 sc->sc_rate_n_flags_version = 2;
3095 else
3096 sc->sc_rate_n_flags_version = 1;
3097
3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3099 }
3100
3101 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3103 uint32_t duration_tu)
3104 {
3105
3106 struct iwx_session_prot_cmd cmd = {
3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3108 in->in_color)),
3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3111 .duration_tu = htole32(duration_tu),
3112 };
3113 uint32_t cmd_id;
3114 int err;
3115
3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3118 if (!err)
3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3120 return err;
3121 }
3122
3123 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3125 {
3126 struct iwx_session_prot_cmd cmd = {
3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3128 in->in_color)),
3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3131 .duration_tu = 0,
3132 };
3133 uint32_t cmd_id;
3134
3135 /* Do nothing if the time event has already ended. */
3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3137 return;
3138
3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3142 }
3143
3144 /*
3145 * NVM read access and content parsing. We do not support
3146 * external NVM or writing NVM.
3147 */
3148
3149 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3151 {
3152 uint8_t tx_ant;
3153
3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3156
3157 if (sc->sc_nvm.valid_tx_ant)
3158 tx_ant &= sc->sc_nvm.valid_tx_ant;
3159
3160 return tx_ant;
3161 }
3162
3163 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3165 {
3166 uint8_t rx_ant;
3167
3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3170
3171 if (sc->sc_nvm.valid_rx_ant)
3172 rx_ant &= sc->sc_nvm.valid_rx_ant;
3173
3174 return rx_ant;
3175 }
3176
3177 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3179 struct ieee80211_channel chans[])
3180 {
3181 struct iwx_softc *sc = ic->ic_softc;
3182 struct iwx_nvm_data *data = &sc->sc_nvm;
3183 uint8_t bands[IEEE80211_MODE_BYTES];
3184 const uint8_t *nvm_channels;
3185 uint32_t ch_flags;
3186 int ch_idx, nchan;
3187
3188 if (sc->sc_uhb_supported) {
3189 nchan = nitems(iwx_nvm_channels_uhb);
3190 nvm_channels = iwx_nvm_channels_uhb;
3191 } else {
3192 nchan = nitems(iwx_nvm_channels_8000);
3193 nvm_channels = iwx_nvm_channels_8000;
3194 }
3195
3196 /* 2.4Ghz; 1-13: 11b/g channels. */
3197 if (!data->sku_cap_band_24GHz_enable)
3198 goto band_5;
3199
3200 memset(bands, 0, sizeof(bands));
3201 setbit(bands, IEEE80211_MODE_11B);
3202 setbit(bands, IEEE80211_MODE_11G);
3203 setbit(bands, IEEE80211_MODE_11NG);
3204 for (ch_idx = 0;
3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3206 ch_idx++) {
3207
3208 uint32_t nflags = 0;
3209 int cflags = 0;
3210
3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3212 ch_flags = le32_to_cpup(
3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3214 } else {
3215 ch_flags = le16_to_cpup(
3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3217 }
3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3219 continue;
3220
3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3222 cflags |= NET80211_CBW_FLAG_HT40;
3223
3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3225
3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3227 nvm_channels[ch_idx],
3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3230 nflags, bands, cflags);
3231 }
3232
3233 band_5:
3234 /* 5Ghz */
3235 if (!data->sku_cap_band_52GHz_enable)
3236 goto band_6;
3237
3238
3239 memset(bands, 0, sizeof(bands));
3240 setbit(bands, IEEE80211_MODE_11A);
3241 setbit(bands, IEEE80211_MODE_11NA);
3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3243
3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3246 ch_idx++) {
3247 uint32_t nflags = 0;
3248 int cflags = 0;
3249
3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3251 ch_flags = le32_to_cpup(
3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3253 else
3254 ch_flags = le16_to_cpup(
3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3256
3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3258 continue;
3259
3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3261 cflags |= NET80211_CBW_FLAG_HT40;
3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3263 cflags |= NET80211_CBW_FLAG_VHT80;
3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3265 cflags |= NET80211_CBW_FLAG_VHT160;
3266
3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3268
3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3270 nvm_channels[ch_idx],
3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3273 nflags, bands, cflags);
3274 }
3275 band_6:
3276 /* 6GHz one day ... */
3277 return;
3278 }
3279
3280 static int
iwx_mimo_enabled(struct iwx_softc * sc)3281 iwx_mimo_enabled(struct iwx_softc *sc)
3282 {
3283
3284 return !sc->sc_nvm.sku_cap_mimo_disable;
3285 }
3286
3287 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3289 uint16_t ssn, uint16_t buf_size)
3290 {
3291 reorder_buf->head_sn = ssn;
3292 reorder_buf->num_stored = 0;
3293 reorder_buf->buf_size = buf_size;
3294 reorder_buf->last_amsdu = 0;
3295 reorder_buf->last_sub_index = 0;
3296 reorder_buf->removed = 0;
3297 reorder_buf->valid = 0;
3298 reorder_buf->consec_oldsn_drops = 0;
3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3300 reorder_buf->consec_oldsn_prev_drop = 0;
3301 }
3302
3303 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3305 {
3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3307
3308 reorder_buf->removed = 1;
3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3310 }
3311
3312 #define IWX_MAX_RX_BA_SESSIONS 16
3313
3314 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3316 {
3317 int i;
3318
3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3320 if (sc->sc_rxba_data[i].baid ==
3321 IWX_RX_REORDER_DATA_INVALID_BAID)
3322 continue;
3323 if (sc->sc_rxba_data[i].tid == tid)
3324 return &sc->sc_rxba_data[i];
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3333 uint8_t *baid)
3334 {
3335 struct iwx_rx_baid_cfg_cmd cmd;
3336 uint32_t new_baid = 0;
3337 int err;
3338
3339 IWX_ASSERT_LOCKED(sc);
3340
3341 memset(&cmd, 0, sizeof(cmd));
3342
3343 if (start) {
3344 cmd.action = IWX_RX_BAID_ACTION_ADD;
3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3346 cmd.alloc.tid = tid;
3347 cmd.alloc.ssn = htole16(ssn);
3348 cmd.alloc.win_size = htole16(winsize);
3349 } else {
3350 struct iwx_rxba_data *rxba;
3351
3352 rxba = iwx_find_rxba_data(sc, tid);
3353 if (rxba == NULL)
3354 return ENOENT;
3355 *baid = rxba->baid;
3356
3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3360 cmd.remove_v1.baid = rxba->baid;
3361 } else {
3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3363 cmd.remove.tid = tid;
3364 }
3365 }
3366
3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3369 if (err)
3370 return err;
3371
3372 if (start) {
3373 if (new_baid >= nitems(sc->sc_rxba_data))
3374 return ERANGE;
3375 *baid = new_baid;
3376 }
3377
3378 return 0;
3379 }
3380
3381 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3384 {
3385 int err;
3386 struct iwx_rxba_data *rxba = NULL;
3387 uint8_t baid = 0;
3388
3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3390 return;
3391 }
3392
3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3395 timeout_val, start, &baid);
3396 } else {
3397 panic("sta_rx_agg unsupported hw");
3398 }
3399 if (err) {
3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3401 return;
3402 } else
3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3404
3405 rxba = &sc->sc_rxba_data[baid];
3406
3407 /* Deaggregation is done in hardware. */
3408 if (start) {
3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3410 return;
3411 }
3412 rxba->sta_id = IWX_STATION_ID;
3413 rxba->tid = tid;
3414 rxba->baid = baid;
3415 rxba->timeout = timeout_val;
3416 getmicrouptime(&rxba->last_rx);
3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3418 winsize);
3419 if (timeout_val != 0) {
3420 DPRINTF(("%s: timeout_val != 0\n", __func__));
3421 return;
3422 }
3423 } else
3424 iwx_clear_reorder_buffer(sc, rxba);
3425
3426 if (start) {
3427 sc->sc_rx_ba_sessions++;
3428 } else if (sc->sc_rx_ba_sessions > 0)
3429 sc->sc_rx_ba_sessions--;
3430 }
3431
3432 /**
3433 * @brief Allocate an A-MPDU / aggregation session for the given node and TID.
3434 *
3435 * This allocates a TX queue specifically for that TID.
3436 *
3437 * Note that this routine currently doesn't return any status/errors,
3438 * so the caller can't know if the aggregation session was setup or not.
3439 */
3440 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3441 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3442 uint8_t tid)
3443 {
3444 int err, qid;
3445
3446 qid = sc->aggqid[tid];
3447 if (qid == 0) {
3448 /* Firmware should pick the next unused Tx queue. */
3449 qid = fls(sc->qenablemsk);
3450 }
3451
3452 DPRINTF(("%s: qid=%i\n", __func__, qid));
3453
3454 /*
3455 * Simply enable the queue.
3456 * Firmware handles Tx Ba session setup and teardown.
3457 */
3458 if ((sc->qenablemsk & (1 << qid)) == 0) {
3459 if (!iwx_nic_lock(sc)) {
3460 return;
3461 }
3462 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3463 IWX_TX_RING_COUNT);
3464 iwx_nic_unlock(sc);
3465 if (err) {
3466 printf("%s: could not enable Tx queue %d "
3467 "(error %d)\n", DEVNAME(sc), qid, err);
3468 return;
3469 }
3470 }
3471 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3472 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3473 sc->aggqid[tid] = qid;
3474 }
3475
3476 static void
iwx_ba_rx_task(void * arg,int npending __unused)3477 iwx_ba_rx_task(void *arg, int npending __unused)
3478 {
3479 struct iwx_softc *sc = arg;
3480 struct ieee80211com *ic = &sc->sc_ic;
3481 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3482 struct ieee80211_node *ni = vap->iv_bss;
3483 int tid;
3484
3485 IWX_LOCK(sc);
3486 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3487 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3488 break;
3489 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3490 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3491 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3492 ba->ba_flags));
3493 if (ba->ba_flags == IWX_BA_DONE) {
3494 DPRINTF(("%s: ampdu for tid %i already added\n",
3495 __func__, tid));
3496 break;
3497 }
3498
3499 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3500 tid));
3501 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3502 ba->ba_winsize, ba->ba_timeout_val, 1);
3503 sc->ba_rx.start_tidmask &= ~(1 << tid);
3504 ba->ba_flags = IWX_BA_DONE;
3505 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3506 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3507 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3508 }
3509 }
3510 IWX_UNLOCK(sc);
3511 }
3512
3513 /**
3514 * @brief Task called to setup a deferred block-ack session.
3515 *
3516 * This sets up any/all pending blockack sessions as defined
3517 * in sc->ba_tx.start_tidmask.
3518 *
3519 * Note: the call to iwx_sta_tx_agg_start() isn't being error checked.
3520 */
3521 static void
iwx_ba_tx_task(void * arg,int npending __unused)3522 iwx_ba_tx_task(void *arg, int npending __unused)
3523 {
3524 struct iwx_softc *sc = arg;
3525 struct ieee80211com *ic = &sc->sc_ic;
3526 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3527 struct ieee80211_node *ni = vap->iv_bss;
3528 uint32_t started_mask = 0;
3529 int tid;
3530
3531 IWX_LOCK(sc);
3532 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3533 const struct ieee80211_tx_ampdu *tap;
3534
3535 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3536 break;
3537 tap = &ni->ni_tx_ampdu[tid];
3538 if (IEEE80211_AMPDU_RUNNING(tap))
3539 break;
3540 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3541 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3542 "%s: ampdu tx start for tid %i\n", __func__, tid);
3543 iwx_sta_tx_agg_start(sc, ni, tid);
3544 sc->ba_tx.start_tidmask &= ~(1 << tid);
3545 started_mask |= (1 << tid);
3546 }
3547 }
3548
3549 IWX_UNLOCK(sc);
3550
3551 /* Iterate over the sessions we started; mark them as active */
3552 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3553 if (started_mask & (1 << tid)) {
3554 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3555 "%s: informing net80211 to start ampdu on tid %i\n",
3556 __func__, tid);
3557 ieee80211_ampdu_tx_request_active_ext(ni, tid, 1);
3558 }
3559 }
3560 }
3561
3562 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3563 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3564 {
3565 uint32_t mac_addr0, mac_addr1;
3566
3567 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3568
3569 if (!iwx_nic_lock(sc))
3570 return;
3571
3572 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3573 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3574
3575 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3576
3577 /* If OEM fused a valid address, use it instead of the one in OTP. */
3578 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3579 iwx_nic_unlock(sc);
3580 return;
3581 }
3582
3583 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3584 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3585
3586 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3587
3588 iwx_nic_unlock(sc);
3589 }
3590
3591 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3592 iwx_is_valid_mac_addr(const uint8_t *addr)
3593 {
3594 static const uint8_t reserved_mac[] = {
3595 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3596 };
3597
3598 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3599 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3600 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3601 !ETHER_IS_MULTICAST(addr));
3602 }
3603
3604 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3605 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3606 {
3607 const uint8_t *hw_addr;
3608
3609 hw_addr = (const uint8_t *)&mac_addr0;
3610 dest[0] = hw_addr[3];
3611 dest[1] = hw_addr[2];
3612 dest[2] = hw_addr[1];
3613 dest[3] = hw_addr[0];
3614
3615 hw_addr = (const uint8_t *)&mac_addr1;
3616 dest[4] = hw_addr[1];
3617 dest[5] = hw_addr[0];
3618 }
3619
3620 static int
iwx_nvm_get(struct iwx_softc * sc)3621 iwx_nvm_get(struct iwx_softc *sc)
3622 {
3623 struct iwx_nvm_get_info cmd = {};
3624 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3625 struct iwx_host_cmd hcmd = {
3626 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3627 .data = { &cmd, },
3628 .len = { sizeof(cmd) },
3629 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3630 IWX_NVM_GET_INFO)
3631 };
3632 int err = 0;
3633 uint32_t mac_flags;
3634 /*
3635 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3636 * in v3, except for the channel profile part of the
3637 * regulatory. So we can just access the new struct, with the
3638 * exception of the latter.
3639 */
3640 struct iwx_nvm_get_info_rsp *rsp;
3641 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3642 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3643 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3644
3645 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3646 err = iwx_send_cmd(sc, &hcmd);
3647 if (err) {
3648 printf("%s: failed to send cmd (error %d)", __func__, err);
3649 return err;
3650 }
3651
3652 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3653 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3654 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3655 printf("%s: resp_len=%zu\n", __func__, resp_len);
3656 err = EIO;
3657 goto out;
3658 }
3659
3660 memset(nvm, 0, sizeof(*nvm));
3661
3662 iwx_set_mac_addr_from_csr(sc, nvm);
3663 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3664 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3665 err = EINVAL;
3666 goto out;
3667 }
3668
3669 rsp = (void *)hcmd.resp_pkt->data;
3670
3671 /* Initialize general data */
3672 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3673 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3674
3675 /* Initialize MAC sku data */
3676 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3677 nvm->sku_cap_11ac_enable =
3678 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3679 nvm->sku_cap_11n_enable =
3680 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3681 nvm->sku_cap_11ax_enable =
3682 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3683 nvm->sku_cap_band_24GHz_enable =
3684 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3685 nvm->sku_cap_band_52GHz_enable =
3686 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3687 nvm->sku_cap_mimo_disable =
3688 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3689
3690 /* Initialize PHY sku data */
3691 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3692 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3693
3694 if (le32toh(rsp->regulatory.lar_enabled) &&
3695 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3696 nvm->lar_enabled = 1;
3697 }
3698
3699 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3700 if (v4) {
3701 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3702 } else {
3703 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3704 }
3705 out:
3706 iwx_free_resp(sc, &hcmd);
3707 return err;
3708 }
3709
3710 static int
iwx_load_firmware(struct iwx_softc * sc)3711 iwx_load_firmware(struct iwx_softc *sc)
3712 {
3713 struct iwx_fw_sects *fws;
3714 int err;
3715
3716 IWX_ASSERT_LOCKED(sc)
3717
3718 sc->sc_uc.uc_intr = 0;
3719 sc->sc_uc.uc_ok = 0;
3720
3721 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3722 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3723 err = iwx_ctxt_info_gen3_init(sc, fws);
3724 else
3725 err = iwx_ctxt_info_init(sc, fws);
3726 if (err) {
3727 printf("%s: could not init context info\n", DEVNAME(sc));
3728 return err;
3729 }
3730
3731 /* wait for the firmware to load */
3732 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3733 if (err || !sc->sc_uc.uc_ok) {
3734 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3735 iwx_ctxt_info_free_paging(sc);
3736 }
3737
3738 iwx_dma_contig_free(&sc->iml_dma);
3739 iwx_ctxt_info_free_fw_img(sc);
3740
3741 if (!sc->sc_uc.uc_ok)
3742 return EINVAL;
3743
3744 return err;
3745 }
3746
3747 static int
iwx_start_fw(struct iwx_softc * sc)3748 iwx_start_fw(struct iwx_softc *sc)
3749 {
3750 int err;
3751
3752 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3753
3754 iwx_disable_interrupts(sc);
3755
3756 /* make sure rfkill handshake bits are cleared */
3757 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3758 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3759 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3760
3761 /* clear (again), then enable firmware load interrupt */
3762 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3763
3764 err = iwx_nic_init(sc);
3765 if (err) {
3766 printf("%s: unable to init nic\n", DEVNAME(sc));
3767 return err;
3768 }
3769
3770 iwx_enable_fwload_interrupt(sc);
3771
3772 return iwx_load_firmware(sc);
3773 }
3774
3775 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3776 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3777 size_t len)
3778 {
3779 const struct iwx_ucode_tlv *tlv;
3780 uint32_t sha1 = 0;
3781 uint16_t mac_type = 0, rf_id = 0;
3782 uint8_t *pnvm_data = NULL, *tmp;
3783 int hw_match = 0;
3784 uint32_t size = 0;
3785 int err;
3786
3787 while (len >= sizeof(*tlv)) {
3788 uint32_t tlv_len, tlv_type;
3789
3790 len -= sizeof(*tlv);
3791 tlv = (const void *)data;
3792
3793 tlv_len = le32toh(tlv->length);
3794 tlv_type = le32toh(tlv->type);
3795
3796 if (len < tlv_len) {
3797 printf("%s: invalid TLV len: %zd/%u\n",
3798 DEVNAME(sc), len, tlv_len);
3799 err = EINVAL;
3800 goto out;
3801 }
3802
3803 data += sizeof(*tlv);
3804
3805 switch (tlv_type) {
3806 case IWX_UCODE_TLV_PNVM_VERSION:
3807 if (tlv_len < sizeof(uint32_t))
3808 break;
3809
3810 sha1 = le32_to_cpup((const uint32_t *)data);
3811 break;
3812 case IWX_UCODE_TLV_HW_TYPE:
3813 if (tlv_len < 2 * sizeof(uint16_t))
3814 break;
3815
3816 if (hw_match)
3817 break;
3818
3819 mac_type = le16_to_cpup((const uint16_t *)data);
3820 rf_id = le16_to_cpup((const uint16_t *)(data +
3821 sizeof(uint16_t)));
3822
3823 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3824 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3825 hw_match = 1;
3826 break;
3827 case IWX_UCODE_TLV_SEC_RT: {
3828 const struct iwx_pnvm_section *section;
3829 uint32_t data_len;
3830
3831 section = (const void *)data;
3832 data_len = tlv_len - sizeof(*section);
3833
3834 /* TODO: remove, this is a deprecated separator */
3835 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3836 break;
3837
3838 tmp = malloc(size + data_len, M_DEVBUF,
3839 M_WAITOK | M_ZERO);
3840 if (tmp == NULL) {
3841 err = ENOMEM;
3842 goto out;
3843 }
3844 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3845 memcpy(tmp, pnvm_data, size);
3846 memcpy(tmp + size, section->data, data_len);
3847 free(pnvm_data, M_DEVBUF);
3848 pnvm_data = tmp;
3849 size += data_len;
3850 break;
3851 }
3852 case IWX_UCODE_TLV_PNVM_SKU:
3853 /* New PNVM section started, stop parsing. */
3854 goto done;
3855 default:
3856 break;
3857 }
3858
3859 if (roundup(tlv_len, 4) > len)
3860 break;
3861 len -= roundup(tlv_len, 4);
3862 data += roundup(tlv_len, 4);
3863 }
3864 done:
3865 if (!hw_match || size == 0) {
3866 err = ENOENT;
3867 goto out;
3868 }
3869
3870 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3871 if (err) {
3872 printf("%s: could not allocate DMA memory for PNVM\n",
3873 DEVNAME(sc));
3874 err = ENOMEM;
3875 goto out;
3876 }
3877 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3878 iwx_ctxt_info_gen3_set_pnvm(sc);
3879 sc->sc_pnvm_ver = sha1;
3880 out:
3881 free(pnvm_data, M_DEVBUF);
3882 return err;
3883 }
3884
3885 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3886 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3887 {
3888 const struct iwx_ucode_tlv *tlv;
3889
3890 while (len >= sizeof(*tlv)) {
3891 uint32_t tlv_len, tlv_type;
3892
3893 len -= sizeof(*tlv);
3894 tlv = (const void *)data;
3895
3896 tlv_len = le32toh(tlv->length);
3897 tlv_type = le32toh(tlv->type);
3898
3899 if (len < tlv_len || roundup(tlv_len, 4) > len)
3900 return EINVAL;
3901
3902 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3903 const struct iwx_sku_id *sku_id =
3904 (const void *)(data + sizeof(*tlv));
3905
3906 data += sizeof(*tlv) + roundup(tlv_len, 4);
3907 len -= roundup(tlv_len, 4);
3908
3909 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3910 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3911 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3912 iwx_pnvm_handle_section(sc, data, len) == 0)
3913 return 0;
3914 } else {
3915 data += sizeof(*tlv) + roundup(tlv_len, 4);
3916 len -= roundup(tlv_len, 4);
3917 }
3918 }
3919
3920 return ENOENT;
3921 }
3922
3923 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3924 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3925 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3926 {
3927 struct iwx_prph_scratch *prph_scratch;
3928 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3929
3930 prph_scratch = sc->prph_scratch_dma.vaddr;
3931 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3932
3933 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3934 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3935
3936 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3937 }
3938
3939 /*
3940 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3941 * This data apparently contains regulatory information and affects device
3942 * channel configuration.
3943 * The SKU of AX210 devices tells us which PNVM file section is needed.
3944 * Pre-AX210 devices store NVM data onboard.
3945 */
3946 static int
iwx_load_pnvm(struct iwx_softc * sc)3947 iwx_load_pnvm(struct iwx_softc *sc)
3948 {
3949 const int wait_flags = IWX_PNVM_COMPLETE;
3950 int err = 0;
3951 const struct firmware *pnvm;
3952
3953 if (sc->sc_sku_id[0] == 0 &&
3954 sc->sc_sku_id[1] == 0 &&
3955 sc->sc_sku_id[2] == 0)
3956 return 0;
3957
3958 if (sc->sc_pnvm_name) {
3959 if (sc->pnvm_dma.vaddr == NULL) {
3960 IWX_UNLOCK(sc);
3961 pnvm = firmware_get(sc->sc_pnvm_name);
3962 if (pnvm == NULL) {
3963 printf("%s: could not read %s (error %d)\n",
3964 DEVNAME(sc), sc->sc_pnvm_name, err);
3965 IWX_LOCK(sc);
3966 return EINVAL;
3967 }
3968 sc->sc_pnvm = pnvm;
3969
3970 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3971 IWX_LOCK(sc);
3972 if (err && err != ENOENT) {
3973 return EINVAL;
3974 }
3975 } else
3976 iwx_ctxt_info_gen3_set_pnvm(sc);
3977 }
3978
3979 if (!iwx_nic_lock(sc)) {
3980 return EBUSY;
3981 }
3982
3983 /*
3984 * If we don't have a platform NVM file simply ask firmware
3985 * to proceed without it.
3986 */
3987
3988 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3989 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3990
3991 /* Wait for the pnvm complete notification from firmware. */
3992 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3993 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3994 if (err)
3995 break;
3996 }
3997
3998 iwx_nic_unlock(sc);
3999
4000 return err;
4001 }
4002
4003 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)4004 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4005 {
4006 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4007 .valid = htole32(valid_tx_ant),
4008 };
4009
4010 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4011 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4012 }
4013
4014 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)4015 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4016 {
4017 struct iwx_phy_cfg_cmd phy_cfg_cmd;
4018
4019 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4020 phy_cfg_cmd.calib_control.event_trigger =
4021 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4022 phy_cfg_cmd.calib_control.flow_trigger =
4023 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4024
4025 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4026 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4027 }
4028
4029 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)4030 iwx_send_dqa_cmd(struct iwx_softc *sc)
4031 {
4032 struct iwx_dqa_enable_cmd dqa_cmd = {
4033 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4034 };
4035 uint32_t cmd_id;
4036
4037 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4038 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4039 }
4040
4041 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4042 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4043 {
4044 int err;
4045
4046 IWX_UNLOCK(sc);
4047 err = iwx_read_firmware(sc);
4048 IWX_LOCK(sc);
4049 if (err)
4050 return err;
4051
4052 err = iwx_start_fw(sc);
4053 if (err)
4054 return err;
4055
4056 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4057 err = iwx_load_pnvm(sc);
4058 if (err)
4059 return err;
4060 }
4061
4062 iwx_post_alive(sc);
4063
4064 return 0;
4065 }
4066
4067 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4068 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4069 {
4070 const int wait_flags = IWX_INIT_COMPLETE;
4071 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4072 struct iwx_init_extended_cfg_cmd init_cfg = {
4073 .init_flags = htole32(IWX_INIT_NVM),
4074 };
4075
4076 int err;
4077
4078 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4079 printf("%s: radio is disabled by hardware switch\n",
4080 DEVNAME(sc));
4081 return EPERM;
4082 }
4083
4084 sc->sc_init_complete = 0;
4085 err = iwx_load_ucode_wait_alive(sc);
4086 if (err) {
4087 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4088 "%s: failed to load init firmware\n", DEVNAME(sc));
4089 return err;
4090 } else {
4091 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4092 "%s: successfully loaded init firmware\n", __func__);
4093 }
4094
4095 /*
4096 * Send init config command to mark that we are sending NVM
4097 * access commands
4098 */
4099 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4100 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4101 if (err) {
4102 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4103 err);
4104 return err;
4105 }
4106
4107 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4108 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4109 if (err) {
4110 return err;
4111 }
4112
4113 /* Wait for the init complete notification from the firmware. */
4114 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4115 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4116 if (err) {
4117 DPRINTF(("%s: will return err=%d\n", __func__, err));
4118 return err;
4119 } else {
4120 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4121 __func__));
4122 }
4123 }
4124
4125 if (readnvm) {
4126 err = iwx_nvm_get(sc);
4127 DPRINTF(("%s: err=%d\n", __func__, err));
4128 if (err) {
4129 printf("%s: failed to read nvm (error %d)\n",
4130 DEVNAME(sc), err);
4131 return err;
4132 } else {
4133 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4134 }
4135 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4136 }
4137 return 0;
4138 }
4139
4140 static int
iwx_config_ltr(struct iwx_softc * sc)4141 iwx_config_ltr(struct iwx_softc *sc)
4142 {
4143 struct iwx_ltr_config_cmd cmd = {
4144 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4145 };
4146
4147 if (!sc->sc_ltr_enabled)
4148 return 0;
4149
4150 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4151 }
4152
4153 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4154 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4155 bus_dma_segment_t *seg)
4156 {
4157 struct iwx_rx_data *data = &ring->data[idx];
4158
4159 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4160 struct iwx_rx_transfer_desc *desc = ring->desc;
4161 desc[idx].rbid = htole16(idx & 0xffff);
4162 desc[idx].addr = htole64((*seg).ds_addr);
4163 bus_dmamap_sync(ring->data_dmat, data->map,
4164 BUS_DMASYNC_PREWRITE);
4165 } else {
4166 ((uint64_t *)ring->desc)[idx] =
4167 htole64((*seg).ds_addr);
4168 bus_dmamap_sync(ring->data_dmat, data->map,
4169 BUS_DMASYNC_PREWRITE);
4170 }
4171 }
4172
4173 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4174 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4175 {
4176 struct iwx_rx_ring *ring = &sc->rxq;
4177 struct iwx_rx_data *data = &ring->data[idx];
4178 struct mbuf *m;
4179 int err;
4180 int fatal = 0;
4181 bus_dma_segment_t seg;
4182 int nsegs;
4183
4184 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4185 if (m == NULL)
4186 return ENOBUFS;
4187
4188 if (data->m != NULL) {
4189 bus_dmamap_unload(ring->data_dmat, data->map);
4190 fatal = 1;
4191 }
4192
4193 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4194 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4195 &nsegs, BUS_DMA_NOWAIT);
4196 if (err) {
4197 /* XXX */
4198 if (fatal)
4199 panic("could not load RX mbuf");
4200 m_freem(m);
4201 return err;
4202 }
4203 data->m = m;
4204 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4205
4206 /* Update RX descriptor. */
4207 iwx_update_rx_desc(sc, ring, idx, &seg);
4208 return 0;
4209 }
4210
4211 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4212 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4213 struct iwx_rx_mpdu_desc *desc)
4214 {
4215 int energy_a, energy_b;
4216
4217 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4218 energy_a = desc->v3.energy_a;
4219 energy_b = desc->v3.energy_b;
4220 } else {
4221 energy_a = desc->v1.energy_a;
4222 energy_b = desc->v1.energy_b;
4223 }
4224 energy_a = energy_a ? -energy_a : -256;
4225 energy_b = energy_b ? -energy_b : -256;
4226 return MAX(energy_a, energy_b);
4227 }
4228
4229 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4230 iwx_rxmq_get_chains(struct iwx_softc *sc,
4231 struct iwx_rx_mpdu_desc *desc)
4232 {
4233
4234 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4235 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4236 IWX_RATE_MCS_ANT_POS);
4237 else
4238 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4239 IWX_RATE_MCS_ANT_POS);
4240 }
4241
4242 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4243 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4244 struct iwx_rx_data *data)
4245 {
4246 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4247 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4248 int qid = cmd_hdr->qid;
4249 struct iwx_tx_ring *ring = &sc->txq[qid];
4250
4251 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4252 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4253 }
4254
4255 /*
4256 * Retrieve the average noise (in dBm) among receivers.
4257 */
4258 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4259 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4260 {
4261 int i, total, nbant, noise;
4262
4263 total = nbant = noise = 0;
4264 for (i = 0; i < 3; i++) {
4265 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4266 if (noise) {
4267 total += noise;
4268 nbant++;
4269 }
4270 }
4271
4272 /* There should be at least one antenna but check anyway. */
4273 return (nbant == 0) ? -127 : (total / nbant) - 107;
4274 }
4275
4276 #if 0
4277 int
4278 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4279 struct ieee80211_rxinfo *rxi)
4280 {
4281 struct ieee80211com *ic = &sc->sc_ic;
4282 struct ieee80211_key *k;
4283 struct ieee80211_frame *wh;
4284 uint64_t pn, *prsc;
4285 uint8_t *ivp;
4286 uint8_t tid;
4287 int hdrlen, hasqos;
4288
4289 wh = mtod(m, struct ieee80211_frame *);
4290 hdrlen = ieee80211_get_hdrlen(wh);
4291 ivp = (uint8_t *)wh + hdrlen;
4292
4293 /* find key for decryption */
4294 k = ieee80211_get_rxkey(ic, m, ni);
4295 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4296 return 1;
4297
4298 /* Check that ExtIV bit is be set. */
4299 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4300 return 1;
4301
4302 hasqos = ieee80211_has_qos(wh);
4303 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4304 prsc = &k->k_rsc[tid];
4305
4306 /* Extract the 48-bit PN from the CCMP header. */
4307 pn = (uint64_t)ivp[0] |
4308 (uint64_t)ivp[1] << 8 |
4309 (uint64_t)ivp[4] << 16 |
4310 (uint64_t)ivp[5] << 24 |
4311 (uint64_t)ivp[6] << 32 |
4312 (uint64_t)ivp[7] << 40;
4313 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4314 if (pn < *prsc) {
4315 ic->ic_stats.is_ccmp_replays++;
4316 return 1;
4317 }
4318 } else if (pn <= *prsc) {
4319 ic->ic_stats.is_ccmp_replays++;
4320 return 1;
4321 }
4322 /* Last seen packet number is updated in ieee80211_inputm(). */
4323
4324 /*
4325 * Some firmware versions strip the MIC, and some don't. It is not
4326 * clear which of the capability flags could tell us what to expect.
4327 * For now, keep things simple and just leave the MIC in place if
4328 * it is present.
4329 *
4330 * The IV will be stripped by ieee80211_inputm().
4331 */
4332 return 0;
4333 }
4334 #endif
4335
4336 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4337 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4338 {
4339 struct ieee80211_frame *wh;
4340 int ret = 0;
4341 uint8_t type, subtype;
4342
4343 wh = mtod(m, struct ieee80211_frame *);
4344
4345 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4346 if (type == IEEE80211_FC0_TYPE_CTL) {
4347 return 0;
4348 }
4349
4350 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4351 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4352 return 0;
4353 }
4354
4355
4356 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4357 IEEE80211_FC0_TYPE_CTL)
4358 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4359 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4360 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4361 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4362 ret = 1;
4363 goto out;
4364 }
4365 /* Check whether decryption was successful or not. */
4366 if ((rx_pkt_status &
4367 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4368 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4369 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4370 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4371 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4372 ret = 1;
4373 goto out;
4374 }
4375 }
4376 out:
4377 return ret;
4378 }
4379
4380 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4381 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4382 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4383 uint32_t device_timestamp, uint8_t rssi)
4384 {
4385 struct ieee80211com *ic = &sc->sc_ic;
4386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4387 struct ieee80211_frame *wh;
4388 struct ieee80211_node *ni;
4389
4390 /*
4391 * We need to turn the hardware provided channel index into a channel
4392 * and then find it in our ic_channels array
4393 */
4394 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4395 /*
4396 * OpenBSD points this at the ibss chan, which it defaults to
4397 * channel 1 and then never touches again. Skip a step.
4398 */
4399 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4400 chanidx = 1;
4401 }
4402
4403 int channel = chanidx;
4404 for (int i = 0; i < ic->ic_nchans; i++) {
4405 if (ic->ic_channels[i].ic_ieee == channel) {
4406 chanidx = i;
4407 }
4408 }
4409 ic->ic_curchan = &ic->ic_channels[chanidx];
4410
4411 wh = mtod(m, struct ieee80211_frame *);
4412 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4413
4414 #if 0 /* XXX hw decrypt */
4415 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4416 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4417 m_freem(m);
4418 ieee80211_release_node(ic, ni);
4419 return;
4420 }
4421 #endif
4422 if (ieee80211_radiotap_active_vap(vap)) {
4423 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4424 uint16_t chan_flags;
4425 int have_legacy_rate = 1;
4426 uint8_t mcs, rate;
4427
4428 tap->wr_flags = 0;
4429 if (is_shortpre)
4430 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4431 tap->wr_chan_freq =
4432 htole16(ic->ic_channels[chanidx].ic_freq);
4433 chan_flags = ic->ic_channels[chanidx].ic_flags;
4434 #if 0
4435 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4436 ic->ic_curmode != IEEE80211_MODE_11AC) {
4437 chan_flags &= ~IEEE80211_CHAN_HT;
4438 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4439 }
4440 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4441 chan_flags &= ~IEEE80211_CHAN_VHT;
4442 #else
4443 chan_flags &= ~IEEE80211_CHAN_HT;
4444 #endif
4445 tap->wr_chan_flags = htole16(chan_flags);
4446 tap->wr_dbm_antsignal = rssi;
4447 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4448 tap->wr_tsft = device_timestamp;
4449
4450 if (sc->sc_rate_n_flags_version >= 2) {
4451 uint32_t mod_type = (rate_n_flags &
4452 IWX_RATE_MCS_MOD_TYPE_MSK);
4453 const struct ieee80211_rateset *rs = NULL;
4454 uint32_t ridx;
4455 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4456 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4457 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4458 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4459 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4460 rs = &ieee80211_std_rateset_11b;
4461 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4462 rs = &ieee80211_std_rateset_11a;
4463 if (rs && ridx < rs->rs_nrates) {
4464 rate = (rs->rs_rates[ridx] &
4465 IEEE80211_RATE_VAL);
4466 } else
4467 rate = 0;
4468 } else {
4469 have_legacy_rate = ((rate_n_flags &
4470 (IWX_RATE_MCS_HT_MSK_V1 |
4471 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4472 mcs = (rate_n_flags &
4473 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4474 IWX_RATE_HT_MCS_NSS_MSK_V1));
4475 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4476 }
4477 if (!have_legacy_rate) {
4478 tap->wr_rate = (0x80 | mcs);
4479 } else {
4480 switch (rate) {
4481 /* CCK rates. */
4482 case 10: tap->wr_rate = 2; break;
4483 case 20: tap->wr_rate = 4; break;
4484 case 55: tap->wr_rate = 11; break;
4485 case 110: tap->wr_rate = 22; break;
4486 /* OFDM rates. */
4487 case 0xd: tap->wr_rate = 12; break;
4488 case 0xf: tap->wr_rate = 18; break;
4489 case 0x5: tap->wr_rate = 24; break;
4490 case 0x7: tap->wr_rate = 36; break;
4491 case 0x9: tap->wr_rate = 48; break;
4492 case 0xb: tap->wr_rate = 72; break;
4493 case 0x1: tap->wr_rate = 96; break;
4494 case 0x3: tap->wr_rate = 108; break;
4495 /* Unknown rate: should not happen. */
4496 default: tap->wr_rate = 0;
4497 }
4498 // XXX hack - this needs rebased with the new rate stuff anyway
4499 tap->wr_rate = rate;
4500 }
4501 }
4502
4503 IWX_UNLOCK(sc);
4504 if (ni == NULL) {
4505 if (ieee80211_input_mimo_all(ic, m) == -1)
4506 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4507 } else {
4508
4509 if (ieee80211_input_mimo(ni, m) == -1)
4510 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4511 ieee80211_free_node(ni);
4512 }
4513 IWX_LOCK(sc);
4514 }
4515
4516 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4517 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4518 size_t maxlen)
4519 {
4520 struct ieee80211com *ic = &sc->sc_ic;
4521 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4522 struct ieee80211_node *ni = vap->iv_bss;
4523 struct ieee80211_key *k;
4524 struct ieee80211_rx_stats rxs;
4525 struct iwx_rx_mpdu_desc *desc;
4526 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4527 int rssi;
4528 uint8_t chanidx;
4529 uint16_t phy_info;
4530 size_t desc_size;
4531 int pad = 0;
4532
4533 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4534 desc_size = sizeof(*desc);
4535 else
4536 desc_size = IWX_RX_DESC_SIZE_V1;
4537
4538 if (maxlen < desc_size) {
4539 m_freem(m);
4540 return; /* drop */
4541 }
4542
4543 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4544
4545 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4546 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4547 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4548 m_freem(m);
4549 return; /* drop */
4550 }
4551
4552 len = le16toh(desc->mpdu_len);
4553 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4554 /* Allow control frames in monitor mode. */
4555 if (len < sizeof(struct ieee80211_frame_cts)) {
4556 m_freem(m);
4557 return;
4558 }
4559
4560 } else if (len < sizeof(struct ieee80211_frame)) {
4561 m_freem(m);
4562 return;
4563 }
4564 if (len > maxlen - desc_size) {
4565 m_freem(m);
4566 return;
4567 }
4568
4569 // TODO: arithmetic on a pointer to void is a GNU extension
4570 m->m_data = (char *)pktdata + desc_size;
4571 m->m_pkthdr.len = m->m_len = len;
4572
4573 /* Account for padding following the frame header. */
4574 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4575 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4576 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4577 if (type == IEEE80211_FC0_TYPE_CTL) {
4578 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4579 case IEEE80211_FC0_SUBTYPE_CTS:
4580 hdrlen = sizeof(struct ieee80211_frame_cts);
4581 break;
4582 case IEEE80211_FC0_SUBTYPE_ACK:
4583 hdrlen = sizeof(struct ieee80211_frame_ack);
4584 break;
4585 default:
4586 hdrlen = sizeof(struct ieee80211_frame_min);
4587 break;
4588 }
4589 } else
4590 hdrlen = ieee80211_hdrsize(wh);
4591
4592 if ((le16toh(desc->status) &
4593 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4594 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4595 // CCMP header length
4596 hdrlen += 8;
4597 }
4598
4599 memmove(m->m_data + 2, m->m_data, hdrlen);
4600 m_adj(m, 2);
4601
4602 }
4603
4604 if ((le16toh(desc->status) &
4605 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4606 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4607 pad = 1;
4608 }
4609
4610 // /*
4611 // * Hardware de-aggregates A-MSDUs and copies the same MAC header
4612 // * in place for each subframe. But it leaves the 'A-MSDU present'
4613 // * bit set in the frame header. We need to clear this bit ourselves.
4614 // * (XXX This workaround is not required on AX200/AX201 devices that
4615 // * have been tested by me, but it's unclear when this problem was
4616 // * fixed in the hardware. It definitely affects the 9k generation.
4617 // * Leaving this in place for now since some 9k/AX200 hybrids seem
4618 // * to exist that we may eventually add support for.)
4619 // *
4620 // * And we must allow the same CCMP PN for subframes following the
4621 // * first subframe. Otherwise they would be discarded as replays.
4622 // */
4623 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4624 DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
4625 // struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4626 // uint8_t subframe_idx = (desc->amsdu_info &
4627 // IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4628 // if (subframe_idx > 0)
4629 // rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4630 // if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4631 // m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4632 // struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4633 // struct ieee80211_qosframe_addr4 *);
4634 // qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4635 // } else if (ieee80211_has_qos(wh) &&
4636 // m->m_len >= sizeof(struct ieee80211_qosframe)) {
4637 // struct ieee80211_qosframe *qwh = mtod(m,
4638 // struct ieee80211_qosframe *);
4639 // qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4640 // }
4641 }
4642
4643 /*
4644 * Verify decryption before duplicate detection. The latter uses
4645 * the TID supplied in QoS frame headers and this TID is implicitly
4646 * verified as part of the CCMP nonce.
4647 */
4648 k = ieee80211_crypto_get_txkey(ni, m);
4649 if (k != NULL &&
4650 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4651 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4652 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4653 m_freem(m);
4654 return;
4655 }
4656
4657 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4658 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4659 chanidx = desc->v3.channel;
4660 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4661 } else {
4662 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4663 chanidx = desc->v1.channel;
4664 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4665 }
4666
4667 phy_info = le16toh(desc->phy_info);
4668
4669 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4670 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4671 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4672
4673 memset(&rxs, 0, sizeof(rxs));
4674 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4675 rxs.r_flags |= IEEE80211_R_BAND;
4676 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4677 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4678
4679 rxs.c_ieee = chanidx;
4680 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4681 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4682 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4683 rxs.c_rx_tsf = device_timestamp;
4684 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4685 if (rxs.c_chain != 0)
4686 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4687
4688 /* rssi is in 1/2db units */
4689 rxs.c_rssi = rssi * 2;
4690 rxs.c_nf = sc->sc_noise;
4691
4692 if (pad) {
4693 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4694 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4695 }
4696
4697 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4698 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4699 return;
4700 }
4701
4702 ieee80211_add_rx_params(m, &rxs);
4703
4704 #if 0
4705 if (iwx_rx_reorder(sc, m, chanidx, desc,
4706 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4707 rate_n_flags, device_timestamp, &rxi, ml))
4708 return;
4709 #endif
4710
4711 if (pad) {
4712 #define TRIM 8
4713 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4714 hdrlen = ieee80211_hdrsize(wh);
4715 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4716 m_adj(m, TRIM);
4717 #undef TRIM
4718 }
4719
4720 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4721 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4722 rate_n_flags, device_timestamp, rssi);
4723 }
4724
4725 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4726 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4727 {
4728 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4729 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4730 int i;
4731
4732 /* First TB is never cleared - it is bidirectional DMA data. */
4733 for (i = 1; i < num_tbs; i++) {
4734 struct iwx_tfh_tb *tb = &desc->tbs[i];
4735 memset(tb, 0, sizeof(*tb));
4736 }
4737 desc->num_tbs = htole16(1);
4738
4739 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4740 BUS_DMASYNC_PREWRITE);
4741 }
4742
4743 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4744 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4745 struct iwx_tx_data *txd)
4746 {
4747 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4748 bus_dmamap_unload(ring->data_dmat, txd->map);
4749
4750 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4751 txd->m = NULL;
4752 txd->in = NULL;
4753 }
4754
4755 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4756 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4757 {
4758 struct iwx_tx_data *txd;
4759
4760 while (ring->tail_hw != idx) {
4761 txd = &ring->data[ring->tail];
4762 if (txd->m != NULL) {
4763 iwx_clear_tx_desc(sc, ring, ring->tail);
4764 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4765 iwx_txd_done(sc, ring, txd);
4766 ring->queued--;
4767 if (ring->queued < 0)
4768 panic("caught negative queue count");
4769 }
4770 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4771 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4772 }
4773 }
4774
4775 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4776 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4777 struct iwx_rx_data *data)
4778 {
4779 struct ieee80211com *ic = &sc->sc_ic;
4780 struct ifnet *ifp = IC2IFP(ic);
4781 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4782 int qid = cmd_hdr->qid, status, txfail;
4783 struct iwx_tx_ring *ring = &sc->txq[qid];
4784 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4785 uint32_t ssn;
4786 uint32_t len = iwx_rx_packet_len(pkt);
4787 int idx = cmd_hdr->idx;
4788 struct iwx_tx_data *txd = &ring->data[idx];
4789 struct mbuf *m = txd->m;
4790
4791 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4792
4793 /* Sanity checks. */
4794 if (sizeof(*tx_resp) > len)
4795 return;
4796 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4797 return;
4798 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4799 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4800 return;
4801
4802 sc->sc_tx_timer[qid] = 0;
4803
4804 if (tx_resp->frame_count > 1) /* A-MPDU */
4805 return;
4806
4807 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4808 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4809 status != IWX_TX_STATUS_DIRECT_DONE);
4810
4811 #ifdef __not_yet__
4812 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4813 ieee80211_tx_complete(&in->in_ni, m, txfail);
4814 #else
4815 if (txfail)
4816 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4817 else {
4818 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4819 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4820 if (m->m_flags & M_MCAST)
4821 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4822 }
4823 #endif
4824 /*
4825 * On hardware supported by iwx(4) the SSN counter corresponds
4826 * to a Tx ring index rather than a sequence number.
4827 * Frames up to this index (non-inclusive) can now be freed.
4828 */
4829 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4830 ssn = le32toh(ssn);
4831 if (ssn < sc->max_tfd_queue_size) {
4832 iwx_txq_advance(sc, ring, ssn);
4833 iwx_clear_oactive(sc, ring);
4834 }
4835 }
4836
4837 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4838 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4839 {
4840 IWX_ASSERT_LOCKED(sc);
4841
4842 if (ring->queued < iwx_lomark) {
4843 sc->qfullmsk &= ~(1 << ring->qid);
4844 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4845 /*
4846 * Well, we're in interrupt context, but then again
4847 * I guess net80211 does all sorts of stunts in
4848 * interrupt context, so maybe this is no biggie.
4849 */
4850 iwx_start(sc);
4851 }
4852 }
4853 }
4854
4855 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4856 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4857 {
4858 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4859 struct ieee80211com *ic = &sc->sc_ic;
4860 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4861 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4862 struct ieee80211_node *ni = &in->in_ni;
4863 struct iwx_tx_ring *ring;
4864 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4865 int qid;
4866
4867 // if (ic->ic_state != IEEE80211_S_RUN)
4868 // return;
4869
4870 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4871 return;
4872
4873 if (ba_res->sta_id != IWX_STATION_ID)
4874 return;
4875
4876 in = (void *)ni;
4877
4878 tfd_cnt = le16toh(ba_res->tfd_cnt);
4879 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4880 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4881 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4882 sizeof(ba_res->tfd[0]) * tfd_cnt))
4883 return;
4884
4885 for (i = 0; i < tfd_cnt; i++) {
4886 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4887 uint8_t tid;
4888
4889 tid = ba_tfd->tid;
4890 if (tid >= nitems(sc->aggqid))
4891 continue;
4892
4893 qid = sc->aggqid[tid];
4894 if (qid != htole16(ba_tfd->q_num))
4895 continue;
4896
4897 ring = &sc->txq[qid];
4898
4899 #if 0
4900 ba = &ni->ni_tx_ba[tid];
4901 if (ba->ba_state != IEEE80211_BA_AGREED)
4902 continue;
4903 #endif
4904 idx = le16toh(ba_tfd->tfd_index);
4905 sc->sc_tx_timer[qid] = 0;
4906 iwx_txq_advance(sc, ring, idx);
4907 iwx_clear_oactive(sc, ring);
4908 }
4909 }
4910
4911 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4912 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4913 struct iwx_rx_data *data)
4914 {
4915 struct ieee80211com *ic = &sc->sc_ic;
4916 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4917 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4918 uint32_t missed;
4919
4920 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4921 (vap->iv_state != IEEE80211_S_RUN))
4922 return;
4923
4924 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4925 BUS_DMASYNC_POSTREAD);
4926
4927 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
4928 "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
4929 __func__,
4930 le32toh(mbn->mac_id),
4931 le32toh(mbn->consec_missed_beacons_since_last_rx),
4932 le32toh(mbn->consec_missed_beacons),
4933 le32toh(mbn->num_expected_beacons),
4934 le32toh(mbn->num_recvd_beacons));
4935
4936 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4937 if (missed > vap->iv_bmissthreshold) {
4938 ieee80211_beacon_miss(ic);
4939 }
4940 }
4941
4942 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4943 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4944 {
4945 struct iwx_binding_cmd cmd;
4946 struct ieee80211com *ic = &sc->sc_ic;
4947 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4948 struct iwx_vap *ivp = IWX_VAP(vap);
4949 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4950 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4951 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4952 uint32_t status;
4953
4954 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4955 panic("binding already added");
4956 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4957 panic("binding already removed");
4958
4959 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4960 return EINVAL;
4961
4962 memset(&cmd, 0, sizeof(cmd));
4963
4964 cmd.id_and_color
4965 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4966 cmd.action = htole32(action);
4967 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4968
4969 cmd.macs[0] = htole32(mac_id);
4970 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4971 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4972
4973 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4974 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4975 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4976 else
4977 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4978
4979 status = 0;
4980 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4981 &cmd, &status);
4982 if (err == 0 && status != 0)
4983 err = EIO;
4984
4985 return err;
4986 }
4987
4988 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4989 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4990 {
4991 int ctlchan = ieee80211_chan2ieee(ic, chan);
4992 int midpoint = chan->ic_vht_ch_freq1;
4993
4994 /*
4995 * The FW is expected to check the control channel position only
4996 * when in HT/VHT and the channel width is not 20MHz. Return
4997 * this value as the default one:
4998 */
4999 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5000
5001 switch (ctlchan - midpoint) {
5002 case -6:
5003 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5004 break;
5005 case -2:
5006 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5007 break;
5008 case 2:
5009 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5010 break;
5011 case 6:
5012 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5013 break;
5014 default:
5015 break;
5016 }
5017
5018 return pos;
5019 }
5020
5021 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)5022 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5023 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5024 uint8_t vht_chan_width, int cmdver)
5025 {
5026 struct ieee80211com *ic = &sc->sc_ic;
5027 struct iwx_phy_context_cmd_uhb cmd;
5028 uint8_t active_cnt, idle_cnt;
5029 struct ieee80211_channel *chan = ctxt->channel;
5030
5031 memset(&cmd, 0, sizeof(cmd));
5032 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5033 ctxt->color));
5034 cmd.action = htole32(action);
5035
5036 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
5037 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5038 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5039 else
5040 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5041
5042 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5043 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5044 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5045
5046 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5047 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5048 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5049 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5050 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5051 if (IEEE80211_IS_CHAN_HT40D(chan))
5052 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5053 else
5054 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5055 } else {
5056 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5057 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5058 }
5059
5060 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5061 IWX_RLC_CONFIG_CMD) != 2) {
5062 idle_cnt = chains_static;
5063 active_cnt = chains_dynamic;
5064 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5065 IWX_PHY_RX_CHAIN_VALID_POS);
5066 cmd.rxchain_info |= htole32(idle_cnt <<
5067 IWX_PHY_RX_CHAIN_CNT_POS);
5068 cmd.rxchain_info |= htole32(active_cnt <<
5069 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5070 }
5071
5072 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5073 }
5074
5075 #if 0
5076 int
5077 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5078 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5079 uint8_t vht_chan_width, int cmdver)
5080 {
5081 struct ieee80211com *ic = &sc->sc_ic;
5082 struct iwx_phy_context_cmd cmd;
5083 uint8_t active_cnt, idle_cnt;
5084 struct ieee80211_channel *chan = ctxt->channel;
5085
5086 memset(&cmd, 0, sizeof(cmd));
5087 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5088 ctxt->color));
5089 cmd.action = htole32(action);
5090
5091 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5092 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5093 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5094 else
5095 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5096
5097 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5098 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5099 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5100 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5101 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5102 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5103 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5104 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5105 /* secondary chan above -> control chan below */
5106 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5107 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5108 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5109 /* secondary chan below -> control chan above */
5110 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5111 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5112 } else {
5113 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5114 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5115 }
5116 } else {
5117 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5118 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5119 }
5120
5121 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5122 IWX_RLC_CONFIG_CMD) != 2) {
5123 idle_cnt = chains_static;
5124 active_cnt = chains_dynamic;
5125 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5126 IWX_PHY_RX_CHAIN_VALID_POS);
5127 cmd.rxchain_info |= htole32(idle_cnt <<
5128 IWX_PHY_RX_CHAIN_CNT_POS);
5129 cmd.rxchain_info |= htole32(active_cnt <<
5130 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5131 }
5132
5133 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5134 }
5135 #endif
5136
5137 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5138 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5139 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5140 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5141 {
5142 int cmdver;
5143
5144 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5145 if (cmdver != 3 && cmdver != 4) {
5146 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5147 DEVNAME(sc));
5148 return ENOTSUP;
5149 }
5150
5151 /*
5152 * Intel increased the size of the fw_channel_info struct and neglected
5153 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5154 * member in the middle.
5155 * To keep things simple we use a separate function to handle the larger
5156 * variant of the phy context command.
5157 */
5158 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5159 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5160 chains_dynamic, action, sco, vht_chan_width, cmdver);
5161 } else
5162 panic("Unsupported old hardware contact thj@");
5163
5164 #if 0
5165 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5166 action, sco, vht_chan_width, cmdver);
5167 #endif
5168 }
5169
5170 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5171 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5172 {
5173 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5174 struct iwx_tfh_tfd *desc;
5175 struct iwx_tx_data *txdata;
5176 struct iwx_device_cmd *cmd;
5177 struct mbuf *m;
5178 bus_addr_t paddr;
5179 uint64_t addr;
5180 int err = 0, i, paylen, off/*, s*/;
5181 int idx, code, async, group_id;
5182 size_t hdrlen, datasz;
5183 uint8_t *data;
5184 int generation = sc->sc_generation;
5185 bus_dma_segment_t seg[10];
5186 int nsegs;
5187
5188 code = hcmd->id;
5189 async = hcmd->flags & IWX_CMD_ASYNC;
5190 idx = ring->cur;
5191
5192 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5193 paylen += hcmd->len[i];
5194 }
5195
5196 /* If this command waits for a response, allocate response buffer. */
5197 hcmd->resp_pkt = NULL;
5198 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5199 uint8_t *resp_buf;
5200 KASSERT(!async, ("async command want response"));
5201 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5202 ("wrong pkt len 1"));
5203 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5204 ("wrong pkt len 2"));
5205 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5206 return ENOSPC;
5207 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5208 M_NOWAIT | M_ZERO);
5209 if (resp_buf == NULL)
5210 return ENOMEM;
5211 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5212 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5213 } else {
5214 sc->sc_cmd_resp_pkt[idx] = NULL;
5215 }
5216
5217 desc = &ring->desc[idx];
5218 txdata = &ring->data[idx];
5219
5220 /*
5221 * XXX Intel inside (tm)
5222 * Firmware API versions >= 50 reject old-style commands in
5223 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5224 * that such commands were in the LONG_GROUP instead in order
5225 * for firmware to accept them.
5226 */
5227 if (iwx_cmd_groupid(code) == 0) {
5228 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5229 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5230 } else
5231 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5232
5233 group_id = iwx_cmd_groupid(code);
5234
5235 hdrlen = sizeof(cmd->hdr_wide);
5236 datasz = sizeof(cmd->data_wide);
5237
5238 if (paylen > datasz) {
5239 /* Command is too large to fit in pre-allocated space. */
5240 size_t totlen = hdrlen + paylen;
5241 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5242 printf("%s: firmware command too long (%zd bytes)\n",
5243 DEVNAME(sc), totlen);
5244 err = EINVAL;
5245 goto out;
5246 }
5247 if (totlen > IWX_RBUF_SIZE)
5248 panic("totlen > IWX_RBUF_SIZE");
5249 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5250 if (m == NULL) {
5251 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5252 DEVNAME(sc), IWX_RBUF_SIZE);
5253 err = ENOMEM;
5254 goto out;
5255 }
5256 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5257 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5258 seg, &nsegs, BUS_DMA_NOWAIT);
5259 if (nsegs > 20)
5260 panic("nsegs > 20");
5261 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5262 if (err) {
5263 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5264 DEVNAME(sc), totlen);
5265 m_freem(m);
5266 goto out;
5267 }
5268 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5269 cmd = mtod(m, struct iwx_device_cmd *);
5270 paddr = seg[0].ds_addr;
5271 } else {
5272 cmd = &ring->cmd[idx];
5273 paddr = txdata->cmd_paddr;
5274 }
5275
5276 memset(cmd, 0, sizeof(*cmd));
5277 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5278 cmd->hdr_wide.group_id = group_id;
5279 cmd->hdr_wide.qid = ring->qid;
5280 cmd->hdr_wide.idx = idx;
5281 cmd->hdr_wide.length = htole16(paylen);
5282 cmd->hdr_wide.version = iwx_cmd_version(code);
5283 data = cmd->data_wide;
5284
5285 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5286 if (hcmd->len[i] == 0)
5287 continue;
5288 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5289 off += hcmd->len[i];
5290 }
5291 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5292
5293 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5294 addr = htole64(paddr);
5295 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5296 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5297 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5298 paylen));
5299 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5300 IWX_FIRST_TB_SIZE);
5301 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5302 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5303 desc->num_tbs = htole16(2);
5304 } else
5305 desc->num_tbs = htole16(1);
5306
5307 if (paylen > datasz) {
5308 bus_dmamap_sync(ring->data_dmat, txdata->map,
5309 BUS_DMASYNC_PREWRITE);
5310 } else {
5311 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5312 BUS_DMASYNC_PREWRITE);
5313 }
5314 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5315 BUS_DMASYNC_PREWRITE);
5316
5317 /* Kick command ring. */
5318 ring->queued++;
5319 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5320 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5321 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5322 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5323
5324 if (!async) {
5325 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5326 if (err == 0) {
5327 /* if hardware is no longer up, return error */
5328 if (generation != sc->sc_generation) {
5329 err = ENXIO;
5330 goto out;
5331 }
5332
5333 /* Response buffer will be freed in iwx_free_resp(). */
5334 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5335 sc->sc_cmd_resp_pkt[idx] = NULL;
5336 } else if (generation == sc->sc_generation) {
5337 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5338 sc->sc_cmd_resp_pkt[idx] = NULL;
5339 }
5340 }
5341 out:
5342 return err;
5343 }
5344
5345 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5346 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5347 uint16_t len, const void *data)
5348 {
5349 struct iwx_host_cmd cmd = {
5350 .id = id,
5351 .len = { len, },
5352 .data = { data, },
5353 .flags = flags,
5354 };
5355
5356 return iwx_send_cmd(sc, &cmd);
5357 }
5358
5359 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5360 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5361 uint32_t *status)
5362 {
5363 struct iwx_rx_packet *pkt;
5364 struct iwx_cmd_response *resp;
5365 int err, resp_len;
5366
5367 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5368 cmd->flags |= IWX_CMD_WANT_RESP;
5369 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5370
5371 err = iwx_send_cmd(sc, cmd);
5372 if (err)
5373 return err;
5374
5375 pkt = cmd->resp_pkt;
5376 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5377 return EIO;
5378
5379 resp_len = iwx_rx_packet_payload_len(pkt);
5380 if (resp_len != sizeof(*resp)) {
5381 iwx_free_resp(sc, cmd);
5382 return EIO;
5383 }
5384
5385 resp = (void *)pkt->data;
5386 *status = le32toh(resp->status);
5387 iwx_free_resp(sc, cmd);
5388 return err;
5389 }
5390
5391 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5392 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5393 const void *data, uint32_t *status)
5394 {
5395 struct iwx_host_cmd cmd = {
5396 .id = id,
5397 .len = { len, },
5398 .data = { data, },
5399 };
5400
5401 return iwx_send_cmd_status(sc, &cmd, status);
5402 }
5403
5404 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5405 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5406 {
5407 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5408 ("hcmd flags !IWX_CMD_WANT_RESP"));
5409 free(hcmd->resp_pkt, M_DEVBUF);
5410 hcmd->resp_pkt = NULL;
5411 }
5412
5413 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5414 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5415 {
5416 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5417 struct iwx_tx_data *data;
5418
5419 if (qid != IWX_DQA_CMD_QUEUE) {
5420 return; /* Not a command ack. */
5421 }
5422
5423 data = &ring->data[idx];
5424
5425 if (data->m != NULL) {
5426 bus_dmamap_sync(ring->data_dmat, data->map,
5427 BUS_DMASYNC_POSTWRITE);
5428 bus_dmamap_unload(ring->data_dmat, data->map);
5429 m_freem(data->m);
5430 data->m = NULL;
5431 }
5432 wakeup(&ring->desc[idx]);
5433
5434 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5435 if (ring->queued == 0) {
5436 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5437 DEVNAME(sc), code));
5438 } else if (ring->queued > 0)
5439 ring->queued--;
5440 }
5441
5442 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5443 iwx_fw_rateidx_ofdm(uint8_t rval)
5444 {
5445 /* Firmware expects indices which match our 11a rate set. */
5446 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5447 int i;
5448
5449 for (i = 0; i < rs->rs_nrates; i++) {
5450 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5451 return i;
5452 }
5453
5454 return 0;
5455 }
5456
5457 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5458 iwx_fw_rateidx_cck(uint8_t rval)
5459 {
5460 /* Firmware expects indices which match our 11b rate set. */
5461 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5462 int i;
5463
5464 for (i = 0; i < rs->rs_nrates; i++) {
5465 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5466 return i;
5467 }
5468
5469 return 0;
5470 }
5471
5472 static int
iwx_min_basic_rate(struct ieee80211com * ic)5473 iwx_min_basic_rate(struct ieee80211com *ic)
5474 {
5475 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5476 struct ieee80211_node *ni = vap->iv_bss;
5477 struct ieee80211_rateset *rs = &ni->ni_rates;
5478 struct ieee80211_channel *c = ni->ni_chan;
5479 int i, min, rval;
5480
5481 min = -1;
5482
5483 if (c == IEEE80211_CHAN_ANYC) {
5484 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5485 return -1;
5486 }
5487
5488 for (i = 0; i < rs->rs_nrates; i++) {
5489 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5490 continue;
5491 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5492 if (min == -1)
5493 min = rval;
5494 else if (rval < min)
5495 min = rval;
5496 }
5497
5498 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5499 if (min == -1)
5500 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5501
5502 return min;
5503 }
5504
5505 /*
5506 * Determine the Tx command flags and Tx rate+flags to use.
5507 * Return the selected Tx rate.
5508 */
5509 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5510 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5511 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5512 struct mbuf *m)
5513 {
5514 struct ieee80211com *ic = &sc->sc_ic;
5515 struct ieee80211_node *ni = &in->in_ni;
5516 struct ieee80211_rateset *rs = &ni->ni_rates;
5517 const struct iwx_rate *rinfo = NULL;
5518 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5519 int ridx = iwx_min_basic_rate(ic);
5520 int min_ridx, rate_flags;
5521 uint8_t rval;
5522
5523 /* We're in the process of clearing the node, no channel already */
5524 if (ridx == -1)
5525 return NULL;
5526
5527 min_ridx = iwx_rval2ridx(ridx);
5528
5529 *flags = 0;
5530
5531 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5532 type != IEEE80211_FC0_TYPE_DATA) {
5533 /* for non-data, use the lowest supported rate */
5534 ridx = min_ridx;
5535 *flags |= IWX_TX_FLAGS_CMD_RATE;
5536 } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
5537 /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
5538 ridx = iwx_min_basic_rate(ic);
5539 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5540 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5541 & ~IEEE80211_RATE_MCS];
5542 } else {
5543 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5544 & IEEE80211_RATE_VAL);
5545 ridx = iwx_rval2ridx(rval);
5546 if (ridx < min_ridx)
5547 ridx = min_ridx;
5548 }
5549
5550 if (m->m_flags & M_EAPOL)
5551 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5552
5553 rinfo = &iwx_rates[ridx];
5554
5555 /*
5556 * Do not fill rate_n_flags if firmware controls the Tx rate.
5557 * For data frames we rely on Tx rate scaling in firmware by default.
5558 */
5559 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5560 *rate_n_flags = 0;
5561 return rinfo;
5562 }
5563
5564 /*
5565 * Forcing a CCK/OFDM legacy rate is important for management frames.
5566 * Association will only succeed if we do this correctly.
5567 */
5568
5569 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5570 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5571 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5572 if (IWX_RIDX_IS_CCK(ridx)) {
5573 if (sc->sc_rate_n_flags_version >= 2)
5574 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5575 else
5576 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5577 } else if (sc->sc_rate_n_flags_version >= 2)
5578 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5579
5580 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5581 & IEEE80211_RATE_VAL);
5582 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5583 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5584
5585 if (sc->sc_rate_n_flags_version >= 2) {
5586 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5587 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5588 IWX_RATE_LEGACY_RATE_MSK);
5589 } else {
5590 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5591 IWX_RATE_LEGACY_RATE_MSK);
5592 }
5593 } else
5594 rate_flags |= rinfo->plcp;
5595
5596 *rate_n_flags = rate_flags;
5597 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5598 __func__, __LINE__,*flags);
5599 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5600 __func__, __LINE__, *rate_n_flags);
5601
5602 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5603 print_ratenflags(__func__, __LINE__,
5604 *rate_n_flags, sc->sc_rate_n_flags_version);
5605
5606 return rinfo;
5607 }
5608
5609 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5610 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5611 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5612 {
5613 uint8_t filled_tfd_size, num_fetch_chunks;
5614 uint16_t len = byte_cnt;
5615 uint16_t bc_ent;
5616
5617 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5618 num_tbs * sizeof(struct iwx_tfh_tb);
5619 /*
5620 * filled_tfd_size contains the number of filled bytes in the TFD.
5621 * Dividing it by 64 will give the number of chunks to fetch
5622 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5623 * If, for example, TFD contains only 3 TBs then 32 bytes
5624 * of the TFD are used, and only one chunk of 64 bytes should
5625 * be fetched
5626 */
5627 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5628
5629 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5630 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5631 /* Starting from AX210, the HW expects bytes */
5632 bc_ent = htole16(len | (num_fetch_chunks << 14));
5633 scd_bc_tbl[idx].tfd_offset = bc_ent;
5634 } else {
5635 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5636 /* Before AX210, the HW expects DW */
5637 len = howmany(len, 4);
5638 bc_ent = htole16(len | (num_fetch_chunks << 12));
5639 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5640 }
5641
5642 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5643 }
5644
5645 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5646 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5647 {
5648 struct ieee80211com *ic = &sc->sc_ic;
5649 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5650 struct iwx_node *in = (void *)ni;
5651 struct iwx_tx_ring *ring;
5652 struct iwx_tx_data *data;
5653 struct iwx_tfh_tfd *desc;
5654 struct iwx_device_cmd *cmd;
5655 struct ieee80211_frame *wh;
5656 struct ieee80211_key *k = NULL;
5657 const struct iwx_rate *rinfo;
5658 uint64_t paddr;
5659 u_int hdrlen;
5660 uint32_t rate_n_flags;
5661 uint16_t num_tbs, flags, offload_assist = 0;
5662 int i, totlen, err, pad, qid;
5663 #define IWM_MAX_SCATTER 20
5664 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5665 int nsegs;
5666 struct mbuf *m1;
5667 size_t txcmd_size;
5668
5669 IWX_ASSERT_LOCKED(sc);
5670
5671 wh = mtod(m, struct ieee80211_frame *);
5672 hdrlen = ieee80211_anyhdrsize(wh);
5673
5674 qid = sc->first_data_qid;
5675
5676 /* Put QoS frames on the data queue which maps to their TID. */
5677 if (IEEE80211_QOS_HAS_SEQ(wh)) {
5678 uint16_t qos = ieee80211_gettid(wh);
5679 uint8_t tid = qos & IEEE80211_QOS_TID;
5680 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
5681
5682 /*
5683 * Note: we're currently putting all frames into one queue
5684 * except for A-MPDU queues. We should be able to choose
5685 * other WME queues but first we need to verify they've been
5686 * correctly setup for data.
5687 */
5688
5689 /*
5690 * Only QoS data goes into an A-MPDU queue;
5691 * don't add QoS null, the other data types, etc.
5692 */
5693 if (IEEE80211_AMPDU_RUNNING(tap) &&
5694 IEEE80211_IS_QOSDATA(wh) &&
5695 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5696 sc->aggqid[tid] != 0) {
5697 qid = sc->aggqid[tid];
5698 }
5699 }
5700
5701 ring = &sc->txq[qid];
5702 desc = &ring->desc[ring->cur];
5703 memset(desc, 0, sizeof(*desc));
5704 data = &ring->data[ring->cur];
5705
5706 cmd = &ring->cmd[ring->cur];
5707 cmd->hdr.code = IWX_TX_CMD;
5708 cmd->hdr.flags = 0;
5709 cmd->hdr.qid = ring->qid;
5710 cmd->hdr.idx = ring->cur;
5711
5712 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5713 if (rinfo == NULL)
5714 return EINVAL;
5715
5716 /* Offloaded sequence number assignment; non-AMPDU case */
5717 if ((m->m_flags & M_AMPDU_MPDU) == 0)
5718 ieee80211_output_seqno_assign(ni, -1, m);
5719
5720 /* Radiotap */
5721 if (ieee80211_radiotap_active_vap(vap)) {
5722 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5723
5724 tap->wt_flags = 0;
5725 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5726 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5727 tap->wt_rate = rinfo->rate;
5728 if (k != NULL)
5729 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5730 ieee80211_radiotap_tx(vap, m);
5731 }
5732
5733 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
5734 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5735 k = ieee80211_crypto_get_txkey(ni, m);
5736 if (k == NULL) {
5737 printf("%s: k is NULL!\n", __func__);
5738 m_freem(m);
5739 return (ENOBUFS);
5740 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5741 k->wk_keytsc++;
5742 } else {
5743 k->wk_cipher->ic_encap(k, m);
5744
5745 /* 802.11 headers may have moved */
5746 wh = mtod(m, struct ieee80211_frame *);
5747 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5748 }
5749 } else
5750 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5751
5752 totlen = m->m_pkthdr.len;
5753
5754 if (hdrlen & 3) {
5755 /* First segment length must be a multiple of 4. */
5756 pad = 4 - (hdrlen & 3);
5757 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5758 } else
5759 pad = 0;
5760
5761 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5762 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5763 memset(tx, 0, sizeof(*tx));
5764 tx->len = htole16(totlen);
5765 tx->offload_assist = htole32(offload_assist);
5766 tx->flags = htole16(flags);
5767 tx->rate_n_flags = htole32(rate_n_flags);
5768 memcpy(tx->hdr, wh, hdrlen);
5769 txcmd_size = sizeof(*tx);
5770 } else {
5771 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5772 memset(tx, 0, sizeof(*tx));
5773 tx->len = htole16(totlen);
5774 tx->offload_assist = htole16(offload_assist);
5775 tx->flags = htole32(flags);
5776 tx->rate_n_flags = htole32(rate_n_flags);
5777 memcpy(tx->hdr, wh, hdrlen);
5778 txcmd_size = sizeof(*tx);
5779 }
5780
5781 /* Trim 802.11 header. */
5782 m_adj(m, hdrlen);
5783
5784 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5785 &nsegs, BUS_DMA_NOWAIT);
5786 if (err && err != EFBIG) {
5787 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5788 m_freem(m);
5789 return err;
5790 }
5791 if (err) {
5792 /* Too many DMA segments, linearize mbuf. */
5793 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5794 if (m1 == NULL) {
5795 printf("%s: could not defrag mbufs\n", __func__);
5796 m_freem(m);
5797 return (ENOBUFS);
5798 }
5799 m = m1;
5800 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5801 segs, &nsegs, BUS_DMA_NOWAIT);
5802 if (err) {
5803 printf("%s: can't map mbuf (error %d)\n", __func__,
5804 err);
5805 m_freem(m);
5806 return (err);
5807 }
5808 }
5809 data->m = m;
5810 data->in = in;
5811
5812 /* Fill TX descriptor. */
5813 num_tbs = 2 + nsegs;
5814 desc->num_tbs = htole16(num_tbs);
5815
5816 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5817 paddr = htole64(data->cmd_paddr);
5818 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5819 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5820 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5821 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5822 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5823 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5824 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5825
5826 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5827 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5828
5829 /* Other DMA segments are for data payload. */
5830 for (i = 0; i < nsegs; i++) {
5831 seg = &segs[i];
5832 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5833 paddr = htole64(seg->ds_addr);
5834 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5835 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5836 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5837 }
5838
5839 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5840 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5841 BUS_DMASYNC_PREWRITE);
5842 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5843 BUS_DMASYNC_PREWRITE);
5844
5845 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5846
5847 /* Kick TX ring. */
5848 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5849 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5850 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5851
5852 /* Mark TX ring as full if we reach a certain threshold. */
5853 if (++ring->queued > iwx_himark) {
5854 sc->qfullmsk |= 1 << ring->qid;
5855 }
5856
5857 sc->sc_tx_timer[ring->qid] = 15;
5858
5859 return 0;
5860 }
5861
5862 static int
iwx_flush_sta_tids(struct iwx_softc * sc,int sta_id,uint16_t tids)5863 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5864 {
5865 struct iwx_rx_packet *pkt;
5866 struct iwx_tx_path_flush_cmd_rsp *resp;
5867 struct iwx_tx_path_flush_cmd flush_cmd = {
5868 .sta_id = htole32(sta_id),
5869 .tid_mask = htole16(tids),
5870 };
5871 struct iwx_host_cmd hcmd = {
5872 .id = IWX_TXPATH_FLUSH,
5873 .len = { sizeof(flush_cmd), },
5874 .data = { &flush_cmd, },
5875 .flags = IWX_CMD_WANT_RESP,
5876 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5877 };
5878 int err, resp_len, i, num_flushed_queues;
5879
5880 err = iwx_send_cmd(sc, &hcmd);
5881 if (err)
5882 return err;
5883
5884 pkt = hcmd.resp_pkt;
5885 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5886 err = EIO;
5887 goto out;
5888 }
5889
5890 resp_len = iwx_rx_packet_payload_len(pkt);
5891 /* Some firmware versions don't provide a response. */
5892 if (resp_len == 0)
5893 goto out;
5894 else if (resp_len != sizeof(*resp)) {
5895 err = EIO;
5896 goto out;
5897 }
5898
5899 resp = (void *)pkt->data;
5900
5901 if (le16toh(resp->sta_id) != sta_id) {
5902 err = EIO;
5903 goto out;
5904 }
5905
5906 num_flushed_queues = le16toh(resp->num_flushed_queues);
5907 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5908 err = EIO;
5909 goto out;
5910 }
5911
5912 for (i = 0; i < num_flushed_queues; i++) {
5913 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5914 uint16_t tid = le16toh(queue_info->tid);
5915 uint16_t read_after = le16toh(queue_info->read_after_flush);
5916 uint16_t qid = le16toh(queue_info->queue_num);
5917 struct iwx_tx_ring *txq;
5918
5919 if (qid >= nitems(sc->txq))
5920 continue;
5921
5922 txq = &sc->txq[qid];
5923 if (tid != txq->tid)
5924 continue;
5925
5926 iwx_txq_advance(sc, txq, read_after);
5927 }
5928 out:
5929 iwx_free_resp(sc, &hcmd);
5930 return err;
5931 }
5932
5933 #define IWX_FLUSH_WAIT_MS 2000
5934
5935 static int
iwx_drain_sta(struct iwx_softc * sc,struct iwx_node * in,int drain)5936 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5937 {
5938 struct iwx_add_sta_cmd cmd;
5939 int err;
5940 uint32_t status;
5941
5942 memset(&cmd, 0, sizeof(cmd));
5943 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5944 in->in_color));
5945 cmd.sta_id = IWX_STATION_ID;
5946 cmd.add_modify = IWX_STA_MODE_MODIFY;
5947 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5948 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5949
5950 status = IWX_ADD_STA_SUCCESS;
5951 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5952 sizeof(cmd), &cmd, &status);
5953 if (err) {
5954 printf("%s: could not update sta (error %d)\n",
5955 DEVNAME(sc), err);
5956 return err;
5957 }
5958
5959 switch (status & IWX_ADD_STA_STATUS_MASK) {
5960 case IWX_ADD_STA_SUCCESS:
5961 break;
5962 default:
5963 err = EIO;
5964 printf("%s: Couldn't %s draining for station\n",
5965 DEVNAME(sc), drain ? "enable" : "disable");
5966 break;
5967 }
5968
5969 return err;
5970 }
5971
5972 static int
iwx_flush_sta(struct iwx_softc * sc,struct iwx_node * in)5973 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5974 {
5975 int err;
5976
5977 IWX_ASSERT_LOCKED(sc);
5978
5979 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5980
5981 err = iwx_drain_sta(sc, in, 1);
5982 if (err)
5983 goto done;
5984
5985 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5986 if (err) {
5987 printf("%s: could not flush Tx path (error %d)\n",
5988 DEVNAME(sc), err);
5989 goto done;
5990 }
5991
5992 /*
5993 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5994 * fc drive rand has has been replaced in OpenBSD.
5995 */
5996
5997 err = iwx_drain_sta(sc, in, 0);
5998 done:
5999 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6000 return err;
6001 }
6002
6003 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
6004
6005 static int
iwx_beacon_filter_send_cmd(struct iwx_softc * sc,struct iwx_beacon_filter_cmd * cmd)6006 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6007 struct iwx_beacon_filter_cmd *cmd)
6008 {
6009 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6010 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6011 }
6012
6013 static int
iwx_update_beacon_abort(struct iwx_softc * sc,struct iwx_node * in,int enable)6014 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6015 {
6016 struct iwx_beacon_filter_cmd cmd = {
6017 IWX_BF_CMD_CONFIG_DEFAULTS,
6018 .bf_enable_beacon_filter = htole32(1),
6019 .ba_enable_beacon_abort = htole32(enable),
6020 };
6021
6022 if (!sc->sc_bf.bf_enabled)
6023 return 0;
6024
6025 sc->sc_bf.ba_enabled = enable;
6026 return iwx_beacon_filter_send_cmd(sc, &cmd);
6027 }
6028
6029 static void
iwx_power_build_cmd(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_power_cmd * cmd)6030 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6031 struct iwx_mac_power_cmd *cmd)
6032 {
6033 struct ieee80211com *ic = &sc->sc_ic;
6034 struct ieee80211_node *ni = &in->in_ni;
6035 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6036 int dtim_period, dtim_msec, keep_alive;
6037
6038 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6039 in->in_color));
6040 if (vap->iv_dtim_period)
6041 dtim_period = vap->iv_dtim_period;
6042 else
6043 dtim_period = 1;
6044
6045 /*
6046 * Regardless of power management state the driver must set
6047 * keep alive period. FW will use it for sending keep alive NDPs
6048 * immediately after association. Check that keep alive period
6049 * is at least 3 * DTIM.
6050 */
6051 dtim_msec = dtim_period * ni->ni_intval;
6052 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6053 keep_alive = roundup(keep_alive, 1000) / 1000;
6054 cmd->keep_alive_seconds = htole16(keep_alive);
6055
6056 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6057 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6058 }
6059
6060 static int
iwx_power_mac_update_mode(struct iwx_softc * sc,struct iwx_node * in)6061 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6062 {
6063 int err;
6064 int ba_enable;
6065 struct iwx_mac_power_cmd cmd;
6066
6067 memset(&cmd, 0, sizeof(cmd));
6068
6069 iwx_power_build_cmd(sc, in, &cmd);
6070
6071 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6072 sizeof(cmd), &cmd);
6073 if (err != 0)
6074 return err;
6075
6076 ba_enable = !!(cmd.flags &
6077 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6078 return iwx_update_beacon_abort(sc, in, ba_enable);
6079 }
6080
6081 static int
iwx_power_update_device(struct iwx_softc * sc)6082 iwx_power_update_device(struct iwx_softc *sc)
6083 {
6084 struct iwx_device_power_cmd cmd = { };
6085 struct ieee80211com *ic = &sc->sc_ic;
6086
6087 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6088 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6089
6090 return iwx_send_cmd_pdu(sc,
6091 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6092 }
6093 #if 0
6094 static int
6095 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6096 {
6097 struct iwx_beacon_filter_cmd cmd = {
6098 IWX_BF_CMD_CONFIG_DEFAULTS,
6099 .bf_enable_beacon_filter = htole32(1),
6100 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6101 };
6102 int err;
6103
6104 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6105 if (err == 0)
6106 sc->sc_bf.bf_enabled = 1;
6107
6108 return err;
6109 }
6110 #endif
6111 static int
iwx_disable_beacon_filter(struct iwx_softc * sc)6112 iwx_disable_beacon_filter(struct iwx_softc *sc)
6113 {
6114 struct iwx_beacon_filter_cmd cmd;
6115 int err;
6116
6117 memset(&cmd, 0, sizeof(cmd));
6118
6119 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6120 if (err == 0)
6121 sc->sc_bf.bf_enabled = 0;
6122
6123 return err;
6124 }
6125
6126 static int
iwx_add_sta_cmd(struct iwx_softc * sc,struct iwx_node * in,int update)6127 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6128 {
6129 struct iwx_add_sta_cmd add_sta_cmd;
6130 int err, i;
6131 uint32_t status, aggsize;
6132 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6133 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6134 struct ieee80211com *ic = &sc->sc_ic;
6135 struct ieee80211_node *ni = &in->in_ni;
6136 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6137
6138 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6139 panic("STA already added");
6140
6141 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6142
6143 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6144 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6145 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6146 } else {
6147 add_sta_cmd.sta_id = IWX_STATION_ID;
6148 add_sta_cmd.station_type = IWX_STA_LINK;
6149 }
6150 add_sta_cmd.mac_id_n_color
6151 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6152 if (!update) {
6153 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6154 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6155 etheranyaddr);
6156 else
6157 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6158 in->in_macaddr);
6159 }
6160 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6161 ether_sprintf(add_sta_cmd.addr)));
6162 add_sta_cmd.add_modify = update ? 1 : 0;
6163 add_sta_cmd.station_flags_msk
6164 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6165
6166 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6167 add_sta_cmd.station_flags_msk
6168 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6169 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6170
6171 if (iwx_mimo_enabled(sc)) {
6172 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6173 add_sta_cmd.station_flags |=
6174 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6175 } else {
6176 int hasmimo = 0;
6177 for (i = 0; i < htrs->rs_nrates; i++) {
6178 if (htrs->rs_rates[i] > 7) {
6179 hasmimo = 1;
6180 break;
6181 }
6182 }
6183 if (hasmimo) {
6184 add_sta_cmd.station_flags |=
6185 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6186 }
6187 }
6188 }
6189
6190 if (ni->ni_flags & IEEE80211_NODE_HT &&
6191 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6192 add_sta_cmd.station_flags |= htole32(
6193 IWX_STA_FLG_FAT_EN_40MHZ);
6194 }
6195
6196
6197 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6198 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6199 add_sta_cmd.station_flags |= htole32(
6200 IWX_STA_FLG_FAT_EN_80MHZ);
6201 }
6202 // XXX-misha: TODO get real ampdu size
6203 aggsize = max_aggsize;
6204 } else {
6205 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6206 IEEE80211_HTCAP_MAXRXAMPDU);
6207 }
6208
6209 if (aggsize > max_aggsize)
6210 aggsize = max_aggsize;
6211 add_sta_cmd.station_flags |= htole32((aggsize <<
6212 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6213 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6214
6215 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6216 IEEE80211_HTCAP_MPDUDENSITY)) {
6217 case IEEE80211_HTCAP_MPDUDENSITY_2:
6218 add_sta_cmd.station_flags
6219 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6220 break;
6221 case IEEE80211_HTCAP_MPDUDENSITY_4:
6222 add_sta_cmd.station_flags
6223 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6224 break;
6225 case IEEE80211_HTCAP_MPDUDENSITY_8:
6226 add_sta_cmd.station_flags
6227 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6228 break;
6229 case IEEE80211_HTCAP_MPDUDENSITY_16:
6230 add_sta_cmd.station_flags
6231 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6232 break;
6233 default:
6234 break;
6235 }
6236 }
6237
6238 status = IWX_ADD_STA_SUCCESS;
6239 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6240 &add_sta_cmd, &status);
6241 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6242 err = EIO;
6243
6244 return err;
6245 }
6246
6247 static int
iwx_rm_sta_cmd(struct iwx_softc * sc,struct iwx_node * in)6248 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6249 {
6250 struct ieee80211com *ic = &sc->sc_ic;
6251 struct iwx_rm_sta_cmd rm_sta_cmd;
6252 int err;
6253
6254 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6255 panic("sta already removed");
6256
6257 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6258 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6259 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6260 else
6261 rm_sta_cmd.sta_id = IWX_STATION_ID;
6262
6263 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6264 &rm_sta_cmd);
6265
6266 return err;
6267 }
6268
6269 static int
iwx_rm_sta(struct iwx_softc * sc,struct iwx_node * in)6270 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6271 {
6272 int err, i, cmd_ver;
6273
6274 err = iwx_flush_sta(sc, in);
6275 if (err) {
6276 printf("%s: could not flush Tx path (error %d)\n",
6277 DEVNAME(sc), err);
6278 return err;
6279 }
6280
6281 /*
6282 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6283 * before a station gets removed.
6284 */
6285 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6286 IWX_SCD_QUEUE_CONFIG_CMD);
6287 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6288 err = iwx_disable_mgmt_queue(sc);
6289 if (err)
6290 return err;
6291 for (i = IWX_FIRST_AGG_TX_QUEUE;
6292 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6293 struct iwx_tx_ring *ring = &sc->txq[i];
6294 if ((sc->qenablemsk & (1 << i)) == 0)
6295 continue;
6296 err = iwx_disable_txq(sc, IWX_STATION_ID,
6297 ring->qid, ring->tid);
6298 if (err) {
6299 printf("%s: could not disable Tx queue %d "
6300 "(error %d)\n", DEVNAME(sc), ring->qid,
6301 err);
6302 return err;
6303 }
6304 }
6305 }
6306
6307 err = iwx_rm_sta_cmd(sc, in);
6308 if (err) {
6309 printf("%s: could not remove STA (error %d)\n",
6310 DEVNAME(sc), err);
6311 return err;
6312 }
6313
6314 in->in_flags = 0;
6315
6316 sc->sc_rx_ba_sessions = 0;
6317 sc->ba_rx.start_tidmask = 0;
6318 sc->ba_rx.stop_tidmask = 0;
6319 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6320 sc->ba_tx.start_tidmask = 0;
6321 sc->ba_tx.stop_tidmask = 0;
6322 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6323 sc->qenablemsk &= ~(1 << i);
6324
6325 #if 0
6326 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6327 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6328 if (ba->ba_state != IEEE80211_BA_AGREED)
6329 continue;
6330 ieee80211_delba_request(ic, ni, 0, 1, i);
6331 }
6332 #endif
6333 /* Clear ampdu rx state (GOS-1525) */
6334 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6335 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6336 ba->ba_flags = 0;
6337 }
6338
6339 return 0;
6340 }
6341
6342 static uint8_t
iwx_umac_scan_fill_channels(struct iwx_softc * sc,struct iwx_scan_channel_cfg_umac * chan,size_t chan_nitems,int n_ssids,uint32_t channel_cfg_flags)6343 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6344 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6345 int n_ssids, uint32_t channel_cfg_flags)
6346 {
6347 struct ieee80211com *ic = &sc->sc_ic;
6348 struct ieee80211_scan_state *ss = ic->ic_scan;
6349 struct ieee80211_channel *c;
6350 uint8_t nchan;
6351 int j;
6352
6353 for (nchan = j = 0;
6354 j < ss->ss_last &&
6355 nchan < sc->sc_capa_n_scan_channels;
6356 j++) {
6357 uint8_t channel_num;
6358
6359 c = ss->ss_chans[j];
6360 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6361 if (isset(sc->sc_ucode_api,
6362 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6363 chan->v2.channel_num = channel_num;
6364 if (IEEE80211_IS_CHAN_2GHZ(c))
6365 chan->v2.band = IWX_PHY_BAND_24;
6366 else
6367 chan->v2.band = IWX_PHY_BAND_5;
6368 chan->v2.iter_count = 1;
6369 chan->v2.iter_interval = 0;
6370 } else {
6371 chan->v1.channel_num = channel_num;
6372 chan->v1.iter_count = 1;
6373 chan->v1.iter_interval = htole16(0);
6374 }
6375 chan->flags |= htole32(channel_cfg_flags);
6376 chan++;
6377 nchan++;
6378 }
6379
6380 return nchan;
6381 }
6382
6383 static int
iwx_fill_probe_req(struct iwx_softc * sc,struct iwx_scan_probe_req * preq)6384 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6385 {
6386 struct ieee80211com *ic = &sc->sc_ic;
6387 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6388 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6389 struct ieee80211_rateset *rs;
6390 size_t remain = sizeof(preq->buf);
6391 uint8_t *frm, *pos;
6392
6393 memset(preq, 0, sizeof(*preq));
6394
6395 if (remain < sizeof(*wh) + 2)
6396 return ENOBUFS;
6397
6398 /*
6399 * Build a probe request frame. Most of the following code is a
6400 * copy & paste of what is done in net80211.
6401 */
6402 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6403 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6404 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6405 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6406 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6407 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6408 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6409 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6410
6411 frm = (uint8_t *)(wh + 1);
6412 *frm++ = IEEE80211_ELEMID_SSID;
6413 *frm++ = 0;
6414 /* hardware inserts SSID */
6415
6416 /* Tell the firmware where the MAC header is. */
6417 preq->mac_header.offset = 0;
6418 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6419 remain -= frm - (uint8_t *)wh;
6420
6421 /* Fill in 2GHz IEs and tell firmware where they are. */
6422 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6423 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6424 if (remain < 4 + rs->rs_nrates)
6425 return ENOBUFS;
6426 } else if (remain < 2 + rs->rs_nrates)
6427 return ENOBUFS;
6428 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6429 pos = frm;
6430 frm = ieee80211_add_rates(frm, rs);
6431 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6432 frm = ieee80211_add_xrates(frm, rs);
6433 remain -= frm - pos;
6434
6435 if (isset(sc->sc_enabled_capa,
6436 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6437 if (remain < 3)
6438 return ENOBUFS;
6439 *frm++ = IEEE80211_ELEMID_DSPARMS;
6440 *frm++ = 1;
6441 *frm++ = 0;
6442 remain -= 3;
6443 }
6444 preq->band_data[0].len = htole16(frm - pos);
6445
6446 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6447 /* Fill in 5GHz IEs. */
6448 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6449 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6450 if (remain < 4 + rs->rs_nrates)
6451 return ENOBUFS;
6452 } else if (remain < 2 + rs->rs_nrates)
6453 return ENOBUFS;
6454 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6455 pos = frm;
6456 frm = ieee80211_add_rates(frm, rs);
6457 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6458 frm = ieee80211_add_xrates(frm, rs);
6459 preq->band_data[1].len = htole16(frm - pos);
6460 remain -= frm - pos;
6461 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6462 if (remain < 14)
6463 return ENOBUFS;
6464 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6465 remain -= frm - pos;
6466 preq->band_data[1].len = htole16(frm - pos);
6467 }
6468 }
6469
6470 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6471 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6472 pos = frm;
6473 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6474 if (remain < 28)
6475 return ENOBUFS;
6476 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6477 /* XXX add WME info? */
6478 remain -= frm - pos;
6479 }
6480
6481 preq->common_data.len = htole16(frm - pos);
6482
6483 return 0;
6484 }
6485
6486 static int
iwx_config_umac_scan_reduced(struct iwx_softc * sc)6487 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6488 {
6489 struct iwx_scan_config scan_cfg;
6490 struct iwx_host_cmd hcmd = {
6491 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6492 .len[0] = sizeof(scan_cfg),
6493 .data[0] = &scan_cfg,
6494 .flags = 0,
6495 };
6496 int cmdver;
6497
6498 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6499 printf("%s: firmware does not support reduced scan config\n",
6500 DEVNAME(sc));
6501 return ENOTSUP;
6502 }
6503
6504 memset(&scan_cfg, 0, sizeof(scan_cfg));
6505
6506 /*
6507 * SCAN_CFG version >= 5 implies that the broadcast
6508 * STA ID field is deprecated.
6509 */
6510 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6511 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6512 scan_cfg.bcast_sta_id = 0xff;
6513
6514 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6515 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6516
6517 return iwx_send_cmd(sc, &hcmd);
6518 }
6519
6520 static uint16_t
iwx_scan_umac_flags_v2(struct iwx_softc * sc,int bgscan)6521 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6522 {
6523 struct ieee80211com *ic = &sc->sc_ic;
6524 struct ieee80211_scan_state *ss = ic->ic_scan;
6525 uint16_t flags = 0;
6526
6527 if (ss->ss_nssid == 0) {
6528 DPRINTF(("%s: Passive scan started\n", __func__));
6529 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6530 }
6531
6532 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6533 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6534 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6535
6536 return flags;
6537 }
6538
6539 #define IWX_SCAN_DWELL_ACTIVE 10
6540 #define IWX_SCAN_DWELL_PASSIVE 110
6541
6542 /* adaptive dwell max budget time [TU] for full scan */
6543 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6544 /* adaptive dwell max budget time [TU] for directed scan */
6545 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6546 /* adaptive dwell default high band APs number */
6547 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6548 /* adaptive dwell default low band APs number */
6549 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6550 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6551 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6552 /* adaptive dwell number of APs override for p2p friendly GO channels */
6553 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6554 /* adaptive dwell number of APs override for social channels */
6555 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6556
6557 static void
iwx_scan_umac_dwell_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * general_params,int bgscan)6558 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6559 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6560 {
6561 uint32_t suspend_time, max_out_time;
6562 uint8_t active_dwell, passive_dwell;
6563
6564 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6565 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6566
6567 general_params->adwell_default_social_chn =
6568 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6569 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6570 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6571
6572 if (bgscan)
6573 general_params->adwell_max_budget =
6574 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6575 else
6576 general_params->adwell_max_budget =
6577 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6578
6579 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6580 if (bgscan) {
6581 max_out_time = htole32(120);
6582 suspend_time = htole32(120);
6583 } else {
6584 max_out_time = htole32(0);
6585 suspend_time = htole32(0);
6586 }
6587 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6588 htole32(max_out_time);
6589 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6590 htole32(suspend_time);
6591 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6592 htole32(max_out_time);
6593 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6594 htole32(suspend_time);
6595
6596 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6597 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6598 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6599 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6600 }
6601
6602 static void
iwx_scan_umac_fill_general_p_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * gp,uint16_t gen_flags,int bgscan)6603 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6604 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6605 {
6606 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6607
6608 gp->flags = htole16(gen_flags);
6609
6610 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6611 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6612 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6613 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6614
6615 gp->scan_start_mac_id = 0;
6616 }
6617
6618 static void
iwx_scan_umac_fill_ch_p_v6(struct iwx_softc * sc,struct iwx_scan_channel_params_v6 * cp,uint32_t channel_cfg_flags,int n_ssid)6619 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6620 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6621 int n_ssid)
6622 {
6623 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6624
6625 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6626 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6627
6628 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6629 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6630 }
6631
6632 static int
iwx_umac_scan_v14(struct iwx_softc * sc,int bgscan)6633 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6634 {
6635 struct ieee80211com *ic = &sc->sc_ic;
6636 struct ieee80211_scan_state *ss = ic->ic_scan;
6637 struct iwx_host_cmd hcmd = {
6638 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6639 .len = { 0, },
6640 .data = { NULL, },
6641 .flags = 0,
6642 };
6643 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6644 struct iwx_scan_req_params_v14 *scan_p;
6645 int err, async = bgscan, n_ssid = 0;
6646 uint16_t gen_flags;
6647 uint32_t bitmap_ssid = 0;
6648
6649 IWX_ASSERT_LOCKED(sc);
6650
6651 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6652
6653 scan_p = &cmd->scan_params;
6654
6655 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6656 cmd->uid = htole32(0);
6657
6658 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6659 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6660 gen_flags, bgscan);
6661
6662 scan_p->periodic_params.schedule[0].interval = htole16(0);
6663 scan_p->periodic_params.schedule[0].iter_count = 1;
6664
6665 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6666 if (err) {
6667 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6668 err);
6669 return err;
6670 }
6671
6672 for (int i=0; i < ss->ss_nssid; i++) {
6673 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6674 scan_p->probe_params.direct_scan[i].len =
6675 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6676 DPRINTF(("%s: Active scan started for ssid ", __func__));
6677 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6678 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6679 n_ssid++;
6680 bitmap_ssid |= (1 << i);
6681 }
6682 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6683
6684 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6685 n_ssid);
6686
6687 hcmd.len[0] = sizeof(*cmd);
6688 hcmd.data[0] = (void *)cmd;
6689 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6690
6691 err = iwx_send_cmd(sc, &hcmd);
6692 return err;
6693 }
6694
6695 static void
iwx_mcc_update(struct iwx_softc * sc,struct iwx_mcc_chub_notif * notif)6696 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6697 {
6698 char alpha2[3];
6699
6700 snprintf(alpha2, sizeof(alpha2), "%c%c",
6701 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6702
6703 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6704 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6705
6706 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6707 }
6708
6709 uint8_t
iwx_ridx2rate(struct ieee80211_rateset * rs,int ridx)6710 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6711 {
6712 int i;
6713 uint8_t rval;
6714
6715 for (i = 0; i < rs->rs_nrates; i++) {
6716 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6717 if (rval == iwx_rates[ridx].rate)
6718 return rs->rs_rates[i];
6719 }
6720
6721 return 0;
6722 }
6723
6724 static int
iwx_rval2ridx(int rval)6725 iwx_rval2ridx(int rval)
6726 {
6727 int ridx;
6728
6729 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6730 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6731 continue;
6732 if (rval == iwx_rates[ridx].rate)
6733 break;
6734 }
6735
6736 return ridx;
6737 }
6738
6739 static void
iwx_ack_rates(struct iwx_softc * sc,struct iwx_node * in,int * cck_rates,int * ofdm_rates)6740 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6741 int *ofdm_rates)
6742 {
6743 struct ieee80211_node *ni = &in->in_ni;
6744 struct ieee80211_rateset *rs = &ni->ni_rates;
6745 int lowest_present_ofdm = -1;
6746 int lowest_present_cck = -1;
6747 uint8_t cck = 0;
6748 uint8_t ofdm = 0;
6749 int i;
6750
6751 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6752 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6753 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6754 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6755 continue;
6756 cck |= (1 << i);
6757 if (lowest_present_cck == -1 || lowest_present_cck > i)
6758 lowest_present_cck = i;
6759 }
6760 }
6761 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6762 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6763 continue;
6764 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6765 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6766 lowest_present_ofdm = i;
6767 }
6768
6769 /*
6770 * Now we've got the basic rates as bitmaps in the ofdm and cck
6771 * variables. This isn't sufficient though, as there might not
6772 * be all the right rates in the bitmap. E.g. if the only basic
6773 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6774 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6775 *
6776 * [...] a STA responding to a received frame shall transmit
6777 * its Control Response frame [...] at the highest rate in the
6778 * BSSBasicRateSet parameter that is less than or equal to the
6779 * rate of the immediately previous frame in the frame exchange
6780 * sequence ([...]) and that is of the same modulation class
6781 * ([...]) as the received frame. If no rate contained in the
6782 * BSSBasicRateSet parameter meets these conditions, then the
6783 * control frame sent in response to a received frame shall be
6784 * transmitted at the highest mandatory rate of the PHY that is
6785 * less than or equal to the rate of the received frame, and
6786 * that is of the same modulation class as the received frame.
6787 *
6788 * As a consequence, we need to add all mandatory rates that are
6789 * lower than all of the basic rates to these bitmaps.
6790 */
6791
6792 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6793 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6794 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6795 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6796 /* 6M already there or needed so always add */
6797 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6798
6799 /*
6800 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6801 * Note, however:
6802 * - if no CCK rates are basic, it must be ERP since there must
6803 * be some basic rates at all, so they're OFDM => ERP PHY
6804 * (or we're in 5 GHz, and the cck bitmap will never be used)
6805 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6806 * - if 5.5M is basic, 1M and 2M are mandatory
6807 * - if 2M is basic, 1M is mandatory
6808 * - if 1M is basic, that's the only valid ACK rate.
6809 * As a consequence, it's not as complicated as it sounds, just add
6810 * any lower rates to the ACK rate bitmap.
6811 */
6812 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6813 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6814 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6815 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6816 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6817 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6818 /* 1M already there or needed so always add */
6819 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6820
6821 *cck_rates = cck;
6822 *ofdm_rates = ofdm;
6823 }
6824
6825 static void
iwx_mac_ctxt_cmd_common(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_ctx_cmd * cmd,uint32_t action)6826 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6827 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6828 {
6829 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6830 struct ieee80211com *ic = &sc->sc_ic;
6831 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6832 struct ieee80211_node *ni = vap->iv_bss;
6833 int cck_ack_rates, ofdm_ack_rates;
6834
6835 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6836 in->in_color));
6837 cmd->action = htole32(action);
6838
6839 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6840 return;
6841
6842 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6843 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6844 else if (ic->ic_opmode == IEEE80211_M_STA)
6845 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6846 else
6847 panic("unsupported operating mode %d", ic->ic_opmode);
6848 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6849
6850 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6851 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6852 ether_sprintf(cmd->node_addr)));
6853 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6854 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6855 return;
6856 }
6857
6858 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6859 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6860 ether_sprintf(cmd->bssid_addr)));
6861 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6862 cmd->cck_rates = htole32(cck_ack_rates);
6863 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6864
6865 cmd->cck_short_preamble
6866 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6867 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6868 cmd->short_slot
6869 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6870 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6871
6872 struct chanAccParams chp;
6873 ieee80211_wme_vap_getparams(vap, &chp);
6874
6875 for (int i = 0; i < WME_NUM_AC; i++) {
6876 int txf = iwx_ac_to_tx_fifo[i];
6877 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6878 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6879 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6880 cmd->ac[txf].fifos_mask = (1 << txf);
6881 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6882
6883 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6884 }
6885
6886 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6887 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6888 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6889 }
6890
6891 if (ni->ni_flags & IEEE80211_NODE_HT) {
6892 switch (vap->iv_curhtprotmode) {
6893 case IEEE80211_HTINFO_OPMODE_PURE:
6894 break;
6895 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6896 case IEEE80211_HTINFO_OPMODE_MIXED:
6897 cmd->protection_flags |=
6898 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6899 IWX_MAC_PROT_FLG_FAT_PROT);
6900 break;
6901 case IEEE80211_HTINFO_OPMODE_HT20PR:
6902 if (in->in_phyctxt &&
6903 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6904 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6905 cmd->protection_flags |=
6906 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6907 IWX_MAC_PROT_FLG_FAT_PROT);
6908 }
6909 break;
6910 default:
6911 break;
6912 }
6913 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6914 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6915 }
6916
6917 if (ic->ic_flags & IEEE80211_F_USEPROT)
6918 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6919 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6920 #undef IWX_EXP2
6921 }
6922
6923 static void
iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_data_sta * sta,int assoc)6924 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6925 struct iwx_mac_data_sta *sta, int assoc)
6926 {
6927 struct ieee80211_node *ni = &in->in_ni;
6928 struct ieee80211com *ic = &sc->sc_ic;
6929 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6930 uint32_t dtim_off;
6931 uint64_t tsf;
6932 int dtim_period;
6933
6934 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6935 tsf = le64toh(ni->ni_tstamp.tsf);
6936 dtim_period = vap->iv_dtim_period;
6937
6938 sta->is_assoc = htole32(assoc);
6939
6940 if (assoc) {
6941 sta->dtim_time = htole32(tsf + dtim_off);
6942 sta->dtim_tsf = htole64(tsf + dtim_off);
6943 // XXX: unset in iwm
6944 sta->assoc_beacon_arrive_time = 0;
6945 }
6946 sta->bi = htole32(ni->ni_intval);
6947 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6948 sta->data_policy = htole32(0);
6949 sta->listen_interval = htole32(10);
6950 sta->assoc_id = htole32(ni->ni_associd);
6951 }
6952
6953 static int
iwx_mac_ctxt_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action,int assoc)6954 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6955 int assoc)
6956 {
6957 struct ieee80211com *ic = &sc->sc_ic;
6958 struct ieee80211_node *ni = &in->in_ni;
6959 struct iwx_mac_ctx_cmd cmd;
6960 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6961
6962 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6963 panic("MAC already added");
6964 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6965 panic("MAC already removed");
6966
6967 memset(&cmd, 0, sizeof(cmd));
6968
6969 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6970
6971 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6972 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6973 sizeof(cmd), &cmd);
6974 }
6975
6976 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6977 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6978 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6979 IWX_MAC_FILTER_ACCEPT_GRP |
6980 IWX_MAC_FILTER_IN_BEACON |
6981 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6982 IWX_MAC_FILTER_IN_CRC32);
6983 // XXX: dtim period is in vap
6984 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6985 /*
6986 * Allow beacons to pass through as long as we are not
6987 * associated or we do not have dtim period information.
6988 */
6989 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6990 }
6991 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6992 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6993 }
6994
6995 static int
iwx_clear_statistics(struct iwx_softc * sc)6996 iwx_clear_statistics(struct iwx_softc *sc)
6997 {
6998 struct iwx_statistics_cmd scmd = {
6999 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7000 };
7001 struct iwx_host_cmd cmd = {
7002 .id = IWX_STATISTICS_CMD,
7003 .len[0] = sizeof(scmd),
7004 .data[0] = &scmd,
7005 .flags = IWX_CMD_WANT_RESP,
7006 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
7007 };
7008 int err;
7009
7010 err = iwx_send_cmd(sc, &cmd);
7011 if (err)
7012 return err;
7013
7014 iwx_free_resp(sc, &cmd);
7015 return 0;
7016 }
7017
7018 static int
iwx_scan(struct iwx_softc * sc)7019 iwx_scan(struct iwx_softc *sc)
7020 {
7021 int err;
7022 err = iwx_umac_scan_v14(sc, 0);
7023
7024 if (err) {
7025 printf("%s: could not initiate scan\n", DEVNAME(sc));
7026 return err;
7027 }
7028 return 0;
7029 }
7030
7031 static int
iwx_bgscan(struct ieee80211com * ic)7032 iwx_bgscan(struct ieee80211com *ic)
7033 {
7034 struct iwx_softc *sc = ic->ic_softc;
7035 int err;
7036
7037 err = iwx_umac_scan_v14(sc, 1);
7038 if (err) {
7039 printf("%s: could not initiate scan\n", DEVNAME(sc));
7040 return err;
7041 }
7042 return 0;
7043 }
7044
7045 static int
iwx_enable_mgmt_queue(struct iwx_softc * sc)7046 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7047 {
7048 int err;
7049
7050 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7051
7052 /*
7053 * Non-QoS frames use the "MGMT" TID and queue.
7054 * Other TIDs and data queues are reserved for QoS data frames.
7055 */
7056 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7057 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7058 if (err) {
7059 printf("%s: could not enable Tx queue %d (error %d)\n",
7060 DEVNAME(sc), sc->first_data_qid, err);
7061 return err;
7062 }
7063
7064 return 0;
7065 }
7066
7067 static int
iwx_disable_mgmt_queue(struct iwx_softc * sc)7068 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7069 {
7070 int err, cmd_ver;
7071
7072 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7073 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7074 IWX_SCD_QUEUE_CONFIG_CMD);
7075 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7076 return 0;
7077
7078 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7079
7080 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7081 IWX_MGMT_TID);
7082 if (err) {
7083 printf("%s: could not disable Tx queue %d (error %d)\n",
7084 DEVNAME(sc), sc->first_data_qid, err);
7085 return err;
7086 }
7087
7088 return 0;
7089 }
7090
7091 static int
iwx_rs_rval2idx(uint8_t rval)7092 iwx_rs_rval2idx(uint8_t rval)
7093 {
7094 /* Firmware expects indices which match our 11g rate set. */
7095 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7096 int i;
7097
7098 for (i = 0; i < rs->rs_nrates; i++) {
7099 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7100 return i;
7101 }
7102
7103 return -1;
7104 }
7105
7106 static uint16_t
iwx_rs_ht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int rsidx)7107 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7108 {
7109 uint16_t htrates = 0;
7110 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7111 int i;
7112
7113 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7114 for (i = 0; i < htrs->rs_nrates; i++) {
7115 if (htrs->rs_rates[i] <= 7)
7116 htrates |= (1 << htrs->rs_rates[i]);
7117 }
7118 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7119 for (i = 0; i < htrs->rs_nrates; i++) {
7120 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7121 htrates |= (1 << (htrs->rs_rates[i] - 8));
7122 }
7123 } else
7124 panic(("iwx_rs_ht_rates"));
7125
7126 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7127 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7128
7129 return htrates;
7130 }
7131
7132 uint16_t
iwx_rs_vht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int num_ss)7133 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7134 {
7135 uint16_t rx_mcs;
7136 int max_mcs = -1;
7137 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7138 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7139 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7140 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7141 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7142
7143 switch (rx_mcs) {
7144 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7145 break;
7146 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7147 max_mcs = 7;
7148 break;
7149 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7150 max_mcs = 8;
7151 break;
7152 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7153 /* Disable VHT MCS 9 for 20MHz-only stations. */
7154 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7155 max_mcs = 8;
7156 else
7157 max_mcs = 9;
7158 break;
7159 default:
7160 /* Should not happen; Values above cover the possible range. */
7161 panic("invalid VHT Rx MCS value %u", rx_mcs);
7162 }
7163
7164 return ((1 << (max_mcs + 1)) - 1);
7165 }
7166
7167 static int
iwx_rs_init_v3(struct iwx_softc * sc,struct iwx_node * in)7168 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7169 {
7170 #if 1
7171 panic("iwx: Trying to init rate set on untested version");
7172 #else
7173 struct ieee80211_node *ni = &in->in_ni;
7174 struct ieee80211_rateset *rs = &ni->ni_rates;
7175 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7176 uint32_t cmd_id;
7177 int i;
7178 size_t cmd_size = sizeof(cfg_cmd);
7179
7180 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7181
7182 for (i = 0; i < rs->rs_nrates; i++) {
7183 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7184 int idx = iwx_rs_rval2idx(rval);
7185 if (idx == -1)
7186 return EINVAL;
7187 cfg_cmd.non_ht_rates |= (1 << idx);
7188 }
7189
7190 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7191 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7192 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7193 htole16(iwx_rs_vht_rates(sc, ni, 1));
7194 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7195 htole16(iwx_rs_vht_rates(sc, ni, 2));
7196 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7197 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7198 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7199 htole16(iwx_rs_ht_rates(sc, ni,
7200 IEEE80211_HT_RATESET_SISO));
7201 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7202 htole16(iwx_rs_ht_rates(sc, ni,
7203 IEEE80211_HT_RATESET_MIMO2));
7204 } else
7205 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7206
7207 cfg_cmd.sta_id = IWX_STATION_ID;
7208 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7209 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7210 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7211 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7212 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7213 else
7214 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7215 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7216 if (ni->ni_flags & IEEE80211_NODE_VHT)
7217 cfg_cmd.max_mpdu_len = htole16(3895);
7218 else
7219 cfg_cmd.max_mpdu_len = htole16(3839);
7220 if (ni->ni_flags & IEEE80211_NODE_HT) {
7221 if (ieee80211_node_supports_ht_sgi20(ni)) {
7222 cfg_cmd.sgi_ch_width_supp |= (1 <<
7223 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7224 }
7225 if (ieee80211_node_supports_ht_sgi40(ni)) {
7226 cfg_cmd.sgi_ch_width_supp |= (1 <<
7227 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7228 }
7229 }
7230 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7231 ieee80211_node_supports_vht_sgi80(ni))
7232 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7233
7234 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7235 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7236 #endif
7237 }
7238
7239 static int
iwx_rs_init_v4(struct iwx_softc * sc,struct iwx_node * in)7240 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7241 {
7242 struct ieee80211_node *ni = &in->in_ni;
7243 struct ieee80211_rateset *rs = &ni->ni_rates;
7244 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7245 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7246 uint32_t cmd_id;
7247 int i;
7248 int sgi80 = 0;
7249 size_t cmd_size = sizeof(cfg_cmd);
7250
7251 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7252
7253 for (i = 0; i < rs->rs_nrates; i++) {
7254 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7255 int idx = iwx_rs_rval2idx(rval);
7256 if (idx == -1)
7257 return EINVAL;
7258 cfg_cmd.non_ht_rates |= (1 << idx);
7259 }
7260 for (i = 0; i < htrs->rs_nrates; i++) {
7261 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7262 }
7263
7264 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7265 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7266 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7267 htole16(iwx_rs_vht_rates(sc, ni, 1));
7268 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7269 htole16(iwx_rs_vht_rates(sc, ni, 2));
7270
7271 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7272 __func__, __LINE__,
7273 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7274 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7275 __func__, __LINE__,
7276 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7277 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7278 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7279 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7280 htole16(iwx_rs_ht_rates(sc, ni,
7281 IEEE80211_HT_RATESET_SISO));
7282 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7283 htole16(iwx_rs_ht_rates(sc, ni,
7284 IEEE80211_HT_RATESET_MIMO2));
7285
7286 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7287 __func__, __LINE__,
7288 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7289 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7290 __func__, __LINE__,
7291 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7292 } else
7293 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7294
7295 cfg_cmd.sta_id = IWX_STATION_ID;
7296 #if 0
7297 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7298 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7299 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7300 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7301 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7302 else
7303 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7304 #endif
7305 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7306 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7307 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7308 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7309 } else {
7310 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7311 }
7312
7313 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7314 if (ni->ni_flags & IEEE80211_NODE_VHT)
7315 cfg_cmd.max_mpdu_len = htole16(3895);
7316 else
7317 cfg_cmd.max_mpdu_len = htole16(3839);
7318 if (ni->ni_flags & IEEE80211_NODE_HT) {
7319 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7320 cfg_cmd.sgi_ch_width_supp |= (1 <<
7321 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7322 }
7323 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7324 cfg_cmd.sgi_ch_width_supp |= (1 <<
7325 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7326 }
7327 }
7328 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7329 IEEE80211_VHTCAP_SHORT_GI_80);
7330 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7331 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7332 }
7333
7334 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7335 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7336 }
7337
7338 static int
iwx_rs_init(struct iwx_softc * sc,struct iwx_node * in)7339 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7340 {
7341 int cmd_ver;
7342
7343 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7344 IWX_TLC_MNG_CONFIG_CMD);
7345 if (cmd_ver == 4)
7346 return iwx_rs_init_v4(sc, in);
7347 else
7348 return iwx_rs_init_v3(sc, in);
7349 }
7350
7351
7352 /**
7353 * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
7354 *
7355 * This populates the given txrate node with the TX rate control notification.
7356 *
7357 * @param sc driver softc
7358 * @param notif firmware notification
7359 * @param ni ieee80211_node update
7360 * @returns true if updated, false if not
7361 */
7362 static bool
iwx_rs_update_node_txrate(struct iwx_softc * sc,const struct iwx_tlc_update_notif * notif,struct ieee80211_node * ni)7363 iwx_rs_update_node_txrate(struct iwx_softc *sc,
7364 const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
7365 {
7366 struct ieee80211com *ic = &sc->sc_ic;
7367 /* XXX TODO: create an inline function in if_iwxreg.h? */
7368 static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
7369 static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
7370
7371 uint32_t rate_n_flags;
7372 uint32_t type;
7373
7374 /* Extract the rate and command version */
7375 rate_n_flags = le32toh(notif->rate);
7376
7377 if (sc->sc_rate_n_flags_version != 2) {
7378 net80211_ic_printf(ic,
7379 "%s: unsupported rate_n_flags version (%d)\n",
7380 __func__,
7381 sc->sc_rate_n_flags_version);
7382 return (false);
7383 }
7384
7385 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7386 print_ratenflags(__func__, __LINE__,
7387 rate_n_flags, sc->sc_rate_n_flags_version);
7388
7389 type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7390 switch (type) {
7391 case IWX_RATE_MCS_CCK_MSK:
7392 ieee80211_node_set_txrate_dot11rate(ni,
7393 cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7394 return (true);
7395 case IWX_RATE_MCS_LEGACY_OFDM_MSK:
7396 ieee80211_node_set_txrate_dot11rate(ni,
7397 ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7398 return (true);
7399 case IWX_RATE_MCS_HT_MSK:
7400 /*
7401 * TODO: the current API doesn't include channel width
7402 * and other flags, so we can't accurately store them yet!
7403 *
7404 * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
7405 * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
7406 * LDPC: (flags & (1 << 16))
7407 */
7408 ieee80211_node_set_txrate_ht_mcsrate(ni,
7409 IWX_RATE_HT_MCS_INDEX(rate_n_flags));
7410 return (true);
7411 case IWX_RATE_MCS_VHT_MSK:
7412 /* TODO: same comment on channel width, etc above */
7413 ieee80211_node_set_txrate_vht_rate(ni,
7414 IWX_RATE_VHT_MCS_CODE(rate_n_flags),
7415 IWX_RATE_VHT_MCS_NSS(rate_n_flags));
7416 return (true);
7417 default:
7418 net80211_ic_printf(ic,
7419 "%s: unsupported chosen rate type in "
7420 "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
7421 type >> IWX_RATE_MCS_MOD_TYPE_POS);
7422 return (false);
7423 }
7424
7425 /* Default: if we get here, we didn't successfully update anything */
7426 return (false);
7427 }
7428
7429 /**
7430 * @brief Process a firmware rate control update and update net80211.
7431 *
7432 * Since firmware is doing rate control, this just needs to update
7433 * the txrate in the ieee80211_node entry.
7434 */
7435 static void
iwx_rs_update(struct iwx_softc * sc,struct iwx_tlc_update_notif * notif)7436 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7437 {
7438 struct ieee80211com *ic = &sc->sc_ic;
7439 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7440 /* XXX TODO: get a node ref! */
7441 struct ieee80211_node *ni = (void *)vap->iv_bss;
7442
7443 /*
7444 * For now the iwx driver only supports a single vdev with a single
7445 * node; it doesn't yet support ibss/hostap/multiple vdevs.
7446 */
7447 if (notif->sta_id != IWX_STATION_ID ||
7448 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7449 return;
7450
7451 iwx_rs_update_node_txrate(sc, notif, ni);
7452 }
7453
7454 static int
iwx_phy_send_rlc(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,uint8_t chains_static,uint8_t chains_dynamic)7455 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7456 uint8_t chains_static, uint8_t chains_dynamic)
7457 {
7458 struct iwx_rlc_config_cmd cmd;
7459 uint32_t cmd_id;
7460 uint8_t active_cnt, idle_cnt;
7461
7462 memset(&cmd, 0, sizeof(cmd));
7463
7464 idle_cnt = chains_static;
7465 active_cnt = chains_dynamic;
7466
7467 cmd.phy_id = htole32(phyctxt->id);
7468 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7469 IWX_PHY_RX_CHAIN_VALID_POS);
7470 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7471 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7472 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7473
7474 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7475 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7476 }
7477
7478 static int
iwx_phy_ctxt_update(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)7479 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7480 struct ieee80211_channel *chan, uint8_t chains_static,
7481 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7482 uint8_t vht_chan_width)
7483 {
7484 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7485 int err;
7486
7487 if (chan == IEEE80211_CHAN_ANYC) {
7488 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7489 DEVNAME(sc));
7490 return EIO;
7491 }
7492
7493 if (isset(sc->sc_enabled_capa,
7494 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7495 (phyctxt->channel->ic_flags & band_flags) !=
7496 (chan->ic_flags & band_flags)) {
7497 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7498 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7499 vht_chan_width);
7500 if (err) {
7501 printf("%s: could not remove PHY context "
7502 "(error %d)\n", DEVNAME(sc), err);
7503 return err;
7504 }
7505 phyctxt->channel = chan;
7506 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7507 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7508 vht_chan_width);
7509 if (err) {
7510 printf("%s: could not add PHY context "
7511 "(error %d)\n", DEVNAME(sc), err);
7512 return err;
7513 }
7514 } else {
7515 phyctxt->channel = chan;
7516 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7517 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7518 vht_chan_width);
7519 if (err) {
7520 printf("%s: could not update PHY context (error %d)\n",
7521 DEVNAME(sc), err);
7522 return err;
7523 }
7524 }
7525
7526 phyctxt->sco = sco;
7527 phyctxt->vht_chan_width = vht_chan_width;
7528
7529 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7530 phyctxt->channel->ic_ieee));
7531 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7532 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7533 phyctxt->vht_chan_width));
7534
7535 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7536 IWX_RLC_CONFIG_CMD) == 2)
7537 return iwx_phy_send_rlc(sc, phyctxt,
7538 chains_static, chains_dynamic);
7539
7540 return 0;
7541 }
7542
7543 static int
iwx_auth(struct ieee80211vap * vap,struct iwx_softc * sc)7544 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7545 {
7546 struct ieee80211com *ic = &sc->sc_ic;
7547 struct iwx_node *in;
7548 struct iwx_vap *ivp = IWX_VAP(vap);
7549 struct ieee80211_node *ni;
7550 uint32_t duration;
7551 int generation = sc->sc_generation, err;
7552
7553 IWX_ASSERT_LOCKED(sc);
7554
7555 ni = ieee80211_ref_node(vap->iv_bss);
7556 in = IWX_NODE(ni);
7557
7558 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7559 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7560 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7561 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7562 if (err)
7563 return err;
7564 } else {
7565 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7566 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7567 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7568 if (err)
7569 return err;
7570 }
7571 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7572 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7573 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7574 ether_sprintf(in->in_macaddr)));
7575
7576 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7577 if (err) {
7578 printf("%s: could not add MAC context (error %d)\n",
7579 DEVNAME(sc), err);
7580 return err;
7581 }
7582 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7583
7584 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7585 if (err) {
7586 printf("%s: could not add binding (error %d)\n",
7587 DEVNAME(sc), err);
7588 goto rm_mac_ctxt;
7589 }
7590 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7591
7592 err = iwx_add_sta_cmd(sc, in, 0);
7593 if (err) {
7594 printf("%s: could not add sta (error %d)\n",
7595 DEVNAME(sc), err);
7596 goto rm_binding;
7597 }
7598 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7599
7600 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7601 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7602 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7603 IWX_TX_RING_COUNT);
7604 if (err)
7605 goto rm_sta;
7606 return 0;
7607 }
7608
7609 err = iwx_enable_mgmt_queue(sc);
7610 if (err)
7611 goto rm_sta;
7612
7613 err = iwx_clear_statistics(sc);
7614 if (err)
7615 goto rm_mgmt_queue;
7616
7617 /*
7618 * Prevent the FW from wandering off channel during association
7619 * by "protecting" the session with a time event.
7620 */
7621 if (in->in_ni.ni_intval)
7622 duration = in->in_ni.ni_intval * 9;
7623 else
7624 duration = 900;
7625 return iwx_schedule_session_protection(sc, in, duration);
7626
7627 rm_mgmt_queue:
7628 if (generation == sc->sc_generation)
7629 iwx_disable_mgmt_queue(sc);
7630 rm_sta:
7631 if (generation == sc->sc_generation) {
7632 iwx_rm_sta_cmd(sc, in);
7633 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7634 }
7635 rm_binding:
7636 if (generation == sc->sc_generation) {
7637 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7638 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7639 }
7640 rm_mac_ctxt:
7641 if (generation == sc->sc_generation) {
7642 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7643 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7644 }
7645 return err;
7646 }
7647
7648 static int
iwx_deauth(struct iwx_softc * sc)7649 iwx_deauth(struct iwx_softc *sc)
7650 {
7651 struct ieee80211com *ic = &sc->sc_ic;
7652 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7653 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7654 int err;
7655
7656 IWX_ASSERT_LOCKED(sc);
7657
7658 iwx_unprotect_session(sc, in);
7659
7660 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7661 err = iwx_rm_sta(sc, in);
7662 if (err)
7663 return err;
7664 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7665 }
7666
7667 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7668 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7669 if (err) {
7670 printf("%s: could not remove binding (error %d)\n",
7671 DEVNAME(sc), err);
7672 return err;
7673 }
7674 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7675 }
7676
7677 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7678 IWX_FLAG_MAC_ACTIVE));
7679 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7680 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7681 if (err) {
7682 printf("%s: could not remove MAC context (error %d)\n",
7683 DEVNAME(sc), err);
7684 return err;
7685 }
7686 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7687 }
7688
7689 /* Move unused PHY context to a default channel. */
7690 //TODO uncommented in obsd, but stays on the way of auth->auth
7691 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7692 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7693 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7694 if (err)
7695 return err;
7696
7697 return 0;
7698 }
7699
7700 static int
iwx_run(struct ieee80211vap * vap,struct iwx_softc * sc)7701 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7702 {
7703 struct ieee80211com *ic = &sc->sc_ic;
7704 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7705 struct ieee80211_node *ni = &in->in_ni;
7706 struct iwx_vap *ivp = IWX_VAP(vap);
7707 int err;
7708
7709 IWX_ASSERT_LOCKED(sc);
7710
7711 if (ni->ni_flags & IEEE80211_NODE_HT) {
7712 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7713 uint8_t sco, vht_chan_width;
7714 sco = IEEE80211_HTOP0_SCO_SCN;
7715 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7716 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7717 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7718 else
7719 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7720 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7721 ivp->phy_ctxt->channel, chains, chains,
7722 0, sco, vht_chan_width);
7723 if (err) {
7724 printf("%s: failed to update PHY\n", DEVNAME(sc));
7725 return err;
7726 }
7727 }
7728
7729 /* Update STA again to apply HT and VHT settings. */
7730 err = iwx_add_sta_cmd(sc, in, 1);
7731 if (err) {
7732 printf("%s: could not update STA (error %d)\n",
7733 DEVNAME(sc), err);
7734 return err;
7735 }
7736
7737 /* We have now been assigned an associd by the AP. */
7738 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7739 if (err) {
7740 printf("%s: failed to update MAC\n", DEVNAME(sc));
7741 return err;
7742 }
7743
7744 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7745 if (err) {
7746 printf("%s: could not set sf full on (error %d)\n",
7747 DEVNAME(sc), err);
7748 return err;
7749 }
7750
7751 err = iwx_allow_mcast(sc);
7752 if (err) {
7753 printf("%s: could not allow mcast (error %d)\n",
7754 DEVNAME(sc), err);
7755 return err;
7756 }
7757
7758 err = iwx_power_update_device(sc);
7759 if (err) {
7760 printf("%s: could not send power command (error %d)\n",
7761 DEVNAME(sc), err);
7762 return err;
7763 }
7764 #ifdef notyet
7765 /*
7766 * Disabled for now. Default beacon filter settings
7767 * prevent net80211 from getting ERP and HT protection
7768 * updates from beacons.
7769 */
7770 err = iwx_enable_beacon_filter(sc, in);
7771 if (err) {
7772 printf("%s: could not enable beacon filter\n",
7773 DEVNAME(sc));
7774 return err;
7775 }
7776 #endif
7777 err = iwx_power_mac_update_mode(sc, in);
7778 if (err) {
7779 printf("%s: could not update MAC power (error %d)\n",
7780 DEVNAME(sc), err);
7781 return err;
7782 }
7783
7784 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7785 return 0;
7786
7787 err = iwx_rs_init(sc, in);
7788 if (err) {
7789 printf("%s: could not init rate scaling (error %d)\n",
7790 DEVNAME(sc), err);
7791 return err;
7792 }
7793
7794 return 0;
7795 }
7796
7797 static int
iwx_run_stop(struct iwx_softc * sc)7798 iwx_run_stop(struct iwx_softc *sc)
7799 {
7800 struct ieee80211com *ic = &sc->sc_ic;
7801 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7802 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7803 struct ieee80211_node *ni = &in->in_ni;
7804 int err, i;
7805
7806 IWX_ASSERT_LOCKED(sc);
7807
7808 err = iwx_flush_sta(sc, in);
7809 if (err) {
7810 printf("%s: could not flush Tx path (error %d)\n",
7811 DEVNAME(sc), err);
7812 return err;
7813 }
7814
7815 /*
7816 * Stop Rx BA sessions now. We cannot rely on the BA task
7817 * for this when moving out of RUN state since it runs in a
7818 * separate thread.
7819 * Note that in->in_ni (struct ieee80211_node) already represents
7820 * our new access point in case we are roaming between APs.
7821 * This means we cannot rely on struct ieee802111_node to tell
7822 * us which BA sessions exist.
7823 */
7824 // TODO agg
7825 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7826 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7827 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7828 continue;
7829 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7830 }
7831
7832 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7833 if (err)
7834 return err;
7835
7836 err = iwx_disable_beacon_filter(sc);
7837 if (err) {
7838 printf("%s: could not disable beacon filter (error %d)\n",
7839 DEVNAME(sc), err);
7840 return err;
7841 }
7842
7843 /* Mark station as disassociated. */
7844 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7845 if (err) {
7846 printf("%s: failed to update MAC\n", DEVNAME(sc));
7847 return err;
7848 }
7849
7850 return 0;
7851 }
7852
7853 static struct ieee80211_node *
iwx_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])7854 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7855 {
7856 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7857 M_NOWAIT | M_ZERO);
7858 }
7859
7860 #if 0
7861 int
7862 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7863 struct ieee80211_key *k)
7864 {
7865 struct iwx_softc *sc = ic->ic_softc;
7866 struct iwx_node *in = (void *)ni;
7867 struct iwx_setkey_task_arg *a;
7868 int err;
7869
7870 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7871 /* Fallback to software crypto for other ciphers. */
7872 err = ieee80211_set_key(ic, ni, k);
7873 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7874 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7875 return err;
7876 }
7877
7878 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7879 return ENOSPC;
7880
7881 a = &sc->setkey_arg[sc->setkey_cur];
7882 a->sta_id = IWX_STATION_ID;
7883 a->ni = ni;
7884 a->k = k;
7885 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7886 sc->setkey_nkeys++;
7887 iwx_add_task(sc, systq, &sc->setkey_task);
7888 return EBUSY;
7889 }
7890
7891 int
7892 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7893 struct ieee80211_key *k)
7894 {
7895 struct ieee80211com *ic = &sc->sc_ic;
7896 struct iwx_node *in = (void *)ni;
7897 struct iwx_add_sta_key_cmd cmd;
7898 uint32_t status;
7899 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7900 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7901 int err;
7902
7903 /*
7904 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7905 * Currently we only implement station mode where 'ni' is always
7906 * ic->ic_bss so there is no need to validate arguments beyond this:
7907 */
7908 KASSERT(ni == ic->ic_bss);
7909
7910 memset(&cmd, 0, sizeof(cmd));
7911
7912 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7913 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7914 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7915 IWX_STA_KEY_FLG_KEYID_MSK));
7916 if (k->k_flags & IEEE80211_KEY_GROUP) {
7917 cmd.common.key_offset = 1;
7918 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7919 } else
7920 cmd.common.key_offset = 0;
7921
7922 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7923 cmd.common.sta_id = sta_id;
7924
7925 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7926
7927 status = IWX_ADD_STA_SUCCESS;
7928 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7929 &status);
7930 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7931 return ECANCELED;
7932 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7933 err = EIO;
7934 if (err) {
7935 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7936 IEEE80211_REASON_AUTH_LEAVE);
7937 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7938 return err;
7939 }
7940
7941 if (k->k_flags & IEEE80211_KEY_GROUP)
7942 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7943 else
7944 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7945
7946 if ((in->in_flags & want_keymask) == want_keymask) {
7947 DPRINTF(("marking port %s valid\n",
7948 ether_sprintf(ni->ni_macaddr)));
7949 ni->ni_port_valid = 1;
7950 ieee80211_set_link_state(ic, LINK_STATE_UP);
7951 }
7952
7953 return 0;
7954 }
7955
7956 void
7957 iwx_setkey_task(void *arg)
7958 {
7959 struct iwx_softc *sc = arg;
7960 struct iwx_setkey_task_arg *a;
7961 int err = 0, s = splnet();
7962
7963 while (sc->setkey_nkeys > 0) {
7964 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7965 break;
7966 a = &sc->setkey_arg[sc->setkey_tail];
7967 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7968 a->sta_id = 0;
7969 a->ni = NULL;
7970 a->k = NULL;
7971 sc->setkey_tail = (sc->setkey_tail + 1) %
7972 nitems(sc->setkey_arg);
7973 sc->setkey_nkeys--;
7974 }
7975
7976 refcnt_rele_wake(&sc->task_refs);
7977 splx(s);
7978 }
7979
7980 void
7981 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7982 struct ieee80211_key *k)
7983 {
7984 struct iwx_softc *sc = ic->ic_softc;
7985 struct iwx_add_sta_key_cmd cmd;
7986
7987 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7988 /* Fallback to software crypto for other ciphers. */
7989 ieee80211_delete_key(ic, ni, k);
7990 return;
7991 }
7992
7993 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7994 return;
7995
7996 memset(&cmd, 0, sizeof(cmd));
7997
7998 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7999 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8000 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8001 IWX_STA_KEY_FLG_KEYID_MSK));
8002 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8003 if (k->k_flags & IEEE80211_KEY_GROUP)
8004 cmd.common.key_offset = 1;
8005 else
8006 cmd.common.key_offset = 0;
8007 cmd.common.sta_id = IWX_STATION_ID;
8008
8009 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8010 }
8011 #endif
8012
8013 static int
iwx_newstate_sub(struct ieee80211vap * vap,enum ieee80211_state nstate)8014 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
8015 {
8016 struct ieee80211com *ic = vap->iv_ic;
8017 struct iwx_softc *sc = ic->ic_softc;
8018 enum ieee80211_state ostate = vap->iv_state;
8019 int err = 0;
8020
8021 IWX_LOCK(sc);
8022
8023 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
8024 switch (ostate) {
8025 case IEEE80211_S_RUN:
8026 err = iwx_run_stop(sc);
8027 if (err)
8028 goto out;
8029 /* FALLTHROUGH */
8030 case IEEE80211_S_ASSOC:
8031 case IEEE80211_S_AUTH:
8032 if (nstate <= IEEE80211_S_AUTH) {
8033 err = iwx_deauth(sc);
8034 if (err)
8035 goto out;
8036 }
8037 /* FALLTHROUGH */
8038 case IEEE80211_S_SCAN:
8039 case IEEE80211_S_INIT:
8040 default:
8041 break;
8042 }
8043 //
8044 // /* Die now if iwx_stop() was called while we were sleeping. */
8045 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8046 // refcnt_rele_wake(&sc->task_refs);
8047 // splx(s);
8048 // return;
8049 // }
8050 }
8051
8052 switch (nstate) {
8053 case IEEE80211_S_INIT:
8054 break;
8055
8056 case IEEE80211_S_SCAN:
8057 break;
8058
8059 case IEEE80211_S_AUTH:
8060 err = iwx_auth(vap, sc);
8061 break;
8062
8063 case IEEE80211_S_ASSOC:
8064 break;
8065
8066 case IEEE80211_S_RUN:
8067 err = iwx_run(vap, sc);
8068 break;
8069 default:
8070 break;
8071 }
8072
8073 out:
8074 IWX_UNLOCK(sc);
8075
8076 return (err);
8077 }
8078
8079 static int
iwx_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)8080 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8081 {
8082 struct iwx_vap *ivp = IWX_VAP(vap);
8083 struct ieee80211com *ic = vap->iv_ic;
8084 enum ieee80211_state ostate = vap->iv_state;
8085 int err;
8086
8087 /*
8088 * Prevent attempts to transition towards the same state, unless
8089 * we are scanning in which case a SCAN -> SCAN transition
8090 * triggers another scan iteration. And AUTH -> AUTH is needed
8091 * to support band-steering.
8092 */
8093 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8094 nstate != IEEE80211_S_AUTH)
8095 return 0;
8096 IEEE80211_UNLOCK(ic);
8097 err = iwx_newstate_sub(vap, nstate);
8098 IEEE80211_LOCK(ic);
8099 if (err == 0)
8100 err = ivp->iv_newstate(vap, nstate, arg);
8101
8102 return (err);
8103 }
8104
8105 static void
iwx_endscan(struct iwx_softc * sc)8106 iwx_endscan(struct iwx_softc *sc)
8107 {
8108 struct ieee80211com *ic = &sc->sc_ic;
8109 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8110
8111 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8112 return;
8113
8114 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8115
8116 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8117 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8118 }
8119
8120 /*
8121 * Aging and idle timeouts for the different possible scenarios
8122 * in default configuration
8123 */
8124 static const uint32_t
8125 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8126 {
8127 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8128 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8129 },
8130 {
8131 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8132 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8133 },
8134 {
8135 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8136 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8137 },
8138 {
8139 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8140 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8141 },
8142 {
8143 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8144 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8145 },
8146 };
8147
8148 /*
8149 * Aging and idle timeouts for the different possible scenarios
8150 * in single BSS MAC configuration.
8151 */
8152 static const uint32_t
8153 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8154 {
8155 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8156 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8157 },
8158 {
8159 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8160 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8161 },
8162 {
8163 htole32(IWX_SF_MCAST_AGING_TIMER),
8164 htole32(IWX_SF_MCAST_IDLE_TIMER)
8165 },
8166 {
8167 htole32(IWX_SF_BA_AGING_TIMER),
8168 htole32(IWX_SF_BA_IDLE_TIMER)
8169 },
8170 {
8171 htole32(IWX_SF_TX_RE_AGING_TIMER),
8172 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8173 },
8174 };
8175
8176 static void
iwx_fill_sf_command(struct iwx_softc * sc,struct iwx_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)8177 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8178 struct ieee80211_node *ni)
8179 {
8180 int i, j, watermark;
8181
8182 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8183
8184 /*
8185 * If we are in association flow - check antenna configuration
8186 * capabilities of the AP station, and choose the watermark accordingly.
8187 */
8188 if (ni) {
8189 if (ni->ni_flags & IEEE80211_NODE_HT) {
8190 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8191 int hasmimo = 0;
8192 for (i = 0; i < htrs->rs_nrates; i++) {
8193 if (htrs->rs_rates[i] > 7) {
8194 hasmimo = 1;
8195 break;
8196 }
8197 }
8198 if (hasmimo)
8199 watermark = IWX_SF_W_MARK_MIMO2;
8200 else
8201 watermark = IWX_SF_W_MARK_SISO;
8202 } else {
8203 watermark = IWX_SF_W_MARK_LEGACY;
8204 }
8205 /* default watermark value for unassociated mode. */
8206 } else {
8207 watermark = IWX_SF_W_MARK_MIMO2;
8208 }
8209 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8210
8211 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8212 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8213 sf_cmd->long_delay_timeouts[i][j] =
8214 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8215 }
8216 }
8217
8218 if (ni) {
8219 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8220 sizeof(iwx_sf_full_timeout));
8221 } else {
8222 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8223 sizeof(iwx_sf_full_timeout_def));
8224 }
8225
8226 }
8227
8228 static int
iwx_sf_config(struct iwx_softc * sc,int new_state)8229 iwx_sf_config(struct iwx_softc *sc, int new_state)
8230 {
8231 struct ieee80211com *ic = &sc->sc_ic;
8232 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8233 struct ieee80211_node *ni = vap->iv_bss;
8234 struct iwx_sf_cfg_cmd sf_cmd = {
8235 .state = htole32(new_state),
8236 };
8237 int err = 0;
8238
8239 switch (new_state) {
8240 case IWX_SF_UNINIT:
8241 case IWX_SF_INIT_OFF:
8242 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8243 break;
8244 case IWX_SF_FULL_ON:
8245 iwx_fill_sf_command(sc, &sf_cmd, ni);
8246 break;
8247 default:
8248 return EINVAL;
8249 }
8250
8251 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8252 sizeof(sf_cmd), &sf_cmd);
8253 return err;
8254 }
8255
8256 static int
iwx_send_bt_init_conf(struct iwx_softc * sc)8257 iwx_send_bt_init_conf(struct iwx_softc *sc)
8258 {
8259 struct iwx_bt_coex_cmd bt_cmd;
8260
8261 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8262
8263 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8264 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8265 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8266
8267
8268 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8269 &bt_cmd);
8270 }
8271
8272 static int
iwx_send_soc_conf(struct iwx_softc * sc)8273 iwx_send_soc_conf(struct iwx_softc *sc)
8274 {
8275 struct iwx_soc_configuration_cmd cmd;
8276 int err;
8277 uint32_t cmd_id, flags = 0;
8278
8279 memset(&cmd, 0, sizeof(cmd));
8280
8281 /*
8282 * In VER_1 of this command, the discrete value is considered
8283 * an integer; In VER_2, it's a bitmask. Since we have only 2
8284 * values in VER_1, this is backwards-compatible with VER_2,
8285 * as long as we don't set any other flag bits.
8286 */
8287 if (!sc->sc_integrated) { /* VER_1 */
8288 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8289 } else { /* VER_2 */
8290 uint8_t scan_cmd_ver;
8291 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8292 flags |= (sc->sc_ltr_delay &
8293 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8294 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8295 IWX_SCAN_REQ_UMAC);
8296 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8297 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8298 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8299 }
8300 cmd.flags = htole32(flags);
8301
8302 cmd.latency = htole32(sc->sc_xtal_latency);
8303
8304 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8305 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8306 if (err)
8307 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8308 return err;
8309 }
8310
8311 static int
iwx_send_update_mcc_cmd(struct iwx_softc * sc,const char * alpha2)8312 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8313 {
8314 struct iwx_mcc_update_cmd mcc_cmd;
8315 struct iwx_host_cmd hcmd = {
8316 .id = IWX_MCC_UPDATE_CMD,
8317 .flags = IWX_CMD_WANT_RESP,
8318 .data = { &mcc_cmd },
8319 };
8320 struct iwx_rx_packet *pkt;
8321 struct iwx_mcc_update_resp *resp;
8322 size_t resp_len;
8323 int err;
8324
8325 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8326 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8327 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8328 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8329 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8330 else
8331 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8332
8333 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8334 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8335
8336 err = iwx_send_cmd(sc, &hcmd);
8337 if (err)
8338 return err;
8339
8340 pkt = hcmd.resp_pkt;
8341 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8342 err = EIO;
8343 goto out;
8344 }
8345
8346 resp_len = iwx_rx_packet_payload_len(pkt);
8347 if (resp_len < sizeof(*resp)) {
8348 err = EIO;
8349 goto out;
8350 }
8351
8352 resp = (void *)pkt->data;
8353 if (resp_len != sizeof(*resp) +
8354 resp->n_channels * sizeof(resp->channels[0])) {
8355 err = EIO;
8356 goto out;
8357 }
8358
8359 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8360 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8361
8362 out:
8363 iwx_free_resp(sc, &hcmd);
8364
8365 return err;
8366 }
8367
8368 static int
iwx_send_temp_report_ths_cmd(struct iwx_softc * sc)8369 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8370 {
8371 struct iwx_temp_report_ths_cmd cmd;
8372 int err;
8373
8374 /*
8375 * In order to give responsibility for critical-temperature-kill
8376 * and TX backoff to FW we need to send an empty temperature
8377 * reporting command at init time.
8378 */
8379 memset(&cmd, 0, sizeof(cmd));
8380
8381 err = iwx_send_cmd_pdu(sc,
8382 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8383 0, sizeof(cmd), &cmd);
8384 if (err)
8385 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8386 DEVNAME(sc), err);
8387
8388 return err;
8389 }
8390
8391 static int
iwx_init_hw(struct iwx_softc * sc)8392 iwx_init_hw(struct iwx_softc *sc)
8393 {
8394 struct ieee80211com *ic = &sc->sc_ic;
8395 int err = 0, i;
8396
8397 err = iwx_run_init_mvm_ucode(sc, 0);
8398 if (err)
8399 return err;
8400
8401 if (!iwx_nic_lock(sc))
8402 return EBUSY;
8403
8404 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8405 if (err) {
8406 printf("%s: could not init tx ant config (error %d)\n",
8407 DEVNAME(sc), err);
8408 goto err;
8409 }
8410
8411 if (sc->sc_tx_with_siso_diversity) {
8412 err = iwx_send_phy_cfg_cmd(sc);
8413 if (err) {
8414 printf("%s: could not send phy config (error %d)\n",
8415 DEVNAME(sc), err);
8416 goto err;
8417 }
8418 }
8419
8420 err = iwx_send_bt_init_conf(sc);
8421 if (err) {
8422 printf("%s: could not init bt coex (error %d)\n",
8423 DEVNAME(sc), err);
8424 return err;
8425 }
8426
8427 err = iwx_send_soc_conf(sc);
8428 if (err) {
8429 printf("%s: iwx_send_soc_conf failed\n", __func__);
8430 return err;
8431 }
8432
8433 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8434 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8435 err = iwx_send_dqa_cmd(sc);
8436 if (err) {
8437 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8438 "failed (error %d)\n", __func__, err);
8439 return err;
8440 }
8441 }
8442 // TODO phyctxt
8443 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8444 /*
8445 * The channel used here isn't relevant as it's
8446 * going to be overwritten in the other flows.
8447 * For now use the first channel we have.
8448 */
8449 sc->sc_phyctxt[i].id = i;
8450 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8451 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8452 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8453 if (err) {
8454 printf("%s: could not add phy context %d (error %d)\n",
8455 DEVNAME(sc), i, err);
8456 goto err;
8457 }
8458 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8459 IWX_RLC_CONFIG_CMD) == 2) {
8460 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8461 if (err) {
8462 printf("%s: could not configure RLC for PHY "
8463 "%d (error %d)\n", DEVNAME(sc), i, err);
8464 goto err;
8465 }
8466 }
8467 }
8468
8469 err = iwx_config_ltr(sc);
8470 if (err) {
8471 printf("%s: PCIe LTR configuration failed (error %d)\n",
8472 DEVNAME(sc), err);
8473 }
8474
8475 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8476 err = iwx_send_temp_report_ths_cmd(sc);
8477 if (err) {
8478 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8479 __func__);
8480 goto err;
8481 }
8482 }
8483
8484 err = iwx_power_update_device(sc);
8485 if (err) {
8486 printf("%s: could not send power command (error %d)\n",
8487 DEVNAME(sc), err);
8488 goto err;
8489 }
8490
8491 if (sc->sc_nvm.lar_enabled) {
8492 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8493 if (err) {
8494 printf("%s: could not init LAR (error %d)\n",
8495 DEVNAME(sc), err);
8496 goto err;
8497 }
8498 }
8499
8500 err = iwx_config_umac_scan_reduced(sc);
8501 if (err) {
8502 printf("%s: could not configure scan (error %d)\n",
8503 DEVNAME(sc), err);
8504 goto err;
8505 }
8506
8507 err = iwx_disable_beacon_filter(sc);
8508 if (err) {
8509 printf("%s: could not disable beacon filter (error %d)\n",
8510 DEVNAME(sc), err);
8511 goto err;
8512 }
8513
8514 err:
8515 iwx_nic_unlock(sc);
8516 return err;
8517 }
8518
8519 /* Allow multicast from our BSSID. */
8520 static int
iwx_allow_mcast(struct iwx_softc * sc)8521 iwx_allow_mcast(struct iwx_softc *sc)
8522 {
8523 struct ieee80211com *ic = &sc->sc_ic;
8524 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8525 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8526 struct iwx_mcast_filter_cmd *cmd;
8527 size_t size;
8528 int err;
8529
8530 size = roundup(sizeof(*cmd), 4);
8531 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8532 if (cmd == NULL)
8533 return ENOMEM;
8534 cmd->filter_own = 1;
8535 cmd->port_id = 0;
8536 cmd->count = 0;
8537 cmd->pass_all = 1;
8538 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8539
8540 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8541 0, size, cmd);
8542 free(cmd, M_DEVBUF);
8543 return err;
8544 }
8545
8546 static int
iwx_init(struct iwx_softc * sc)8547 iwx_init(struct iwx_softc *sc)
8548 {
8549 int err, generation;
8550 generation = ++sc->sc_generation;
8551 iwx_preinit(sc);
8552
8553 err = iwx_start_hw(sc);
8554 if (err) {
8555 printf("%s: iwx_start_hw failed\n", __func__);
8556 return err;
8557 }
8558
8559 err = iwx_init_hw(sc);
8560 if (err) {
8561 if (generation == sc->sc_generation)
8562 iwx_stop_device(sc);
8563 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8564 return err;
8565 }
8566
8567 sc->sc_flags |= IWX_FLAG_HW_INITED;
8568 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8569
8570 return 0;
8571 }
8572
8573 static void
iwx_start(struct iwx_softc * sc)8574 iwx_start(struct iwx_softc *sc)
8575 {
8576 struct ieee80211_node *ni;
8577 struct mbuf *m;
8578
8579 IWX_ASSERT_LOCKED(sc);
8580
8581 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8582 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8583 if (iwx_tx(sc, m, ni) != 0) {
8584 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8585 continue;
8586 }
8587 }
8588 }
8589
8590 static void
iwx_stop(struct iwx_softc * sc)8591 iwx_stop(struct iwx_softc *sc)
8592 {
8593 struct ieee80211com *ic = &sc->sc_ic;
8594 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8595 struct iwx_vap *ivp = IWX_VAP(vap);
8596
8597 iwx_stop_device(sc);
8598
8599 /* Reset soft state. */
8600 sc->sc_generation++;
8601 ivp->phy_ctxt = NULL;
8602
8603 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8604 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8605 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8606 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8607 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8608 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8609 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8610 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8611
8612 sc->sc_rx_ba_sessions = 0;
8613 sc->ba_rx.start_tidmask = 0;
8614 sc->ba_rx.stop_tidmask = 0;
8615 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8616 sc->ba_tx.start_tidmask = 0;
8617 sc->ba_tx.stop_tidmask = 0;
8618 }
8619
8620 static void
iwx_watchdog(void * arg)8621 iwx_watchdog(void *arg)
8622 {
8623 struct iwx_softc *sc = arg;
8624 struct ieee80211com *ic = &sc->sc_ic;
8625 int i;
8626
8627 /*
8628 * We maintain a separate timer for each Tx queue because
8629 * Tx aggregation queues can get "stuck" while other queues
8630 * keep working. The Linux driver uses a similar workaround.
8631 */
8632 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8633 if (sc->sc_tx_timer[i] > 0) {
8634 if (--sc->sc_tx_timer[i] == 0) {
8635 printf("%s: device timeout\n", DEVNAME(sc));
8636
8637 iwx_nic_error(sc);
8638 iwx_dump_driver_status(sc);
8639 ieee80211_restart_all(ic);
8640 return;
8641 }
8642 }
8643 }
8644 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8645 }
8646
8647 /*
8648 * Note: This structure is read from the device with IO accesses,
8649 * and the reading already does the endian conversion. As it is
8650 * read with uint32_t-sized accesses, any members with a different size
8651 * need to be ordered correctly though!
8652 */
8653 struct iwx_error_event_table {
8654 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8655 uint32_t error_id; /* type of error */
8656 uint32_t trm_hw_status0; /* TRM HW status */
8657 uint32_t trm_hw_status1; /* TRM HW status */
8658 uint32_t blink2; /* branch link */
8659 uint32_t ilink1; /* interrupt link */
8660 uint32_t ilink2; /* interrupt link */
8661 uint32_t data1; /* error-specific data */
8662 uint32_t data2; /* error-specific data */
8663 uint32_t data3; /* error-specific data */
8664 uint32_t bcon_time; /* beacon timer */
8665 uint32_t tsf_low; /* network timestamp function timer */
8666 uint32_t tsf_hi; /* network timestamp function timer */
8667 uint32_t gp1; /* GP1 timer register */
8668 uint32_t gp2; /* GP2 timer register */
8669 uint32_t fw_rev_type; /* firmware revision type */
8670 uint32_t major; /* uCode version major */
8671 uint32_t minor; /* uCode version minor */
8672 uint32_t hw_ver; /* HW Silicon version */
8673 uint32_t brd_ver; /* HW board version */
8674 uint32_t log_pc; /* log program counter */
8675 uint32_t frame_ptr; /* frame pointer */
8676 uint32_t stack_ptr; /* stack pointer */
8677 uint32_t hcmd; /* last host command header */
8678 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8679 * rxtx_flag */
8680 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8681 * host_flag */
8682 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8683 * enc_flag */
8684 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8685 * time_flag */
8686 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8687 * wico interrupt */
8688 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8689 uint32_t wait_event; /* wait event() caller address */
8690 uint32_t l2p_control; /* L2pControlField */
8691 uint32_t l2p_duration; /* L2pDurationField */
8692 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8693 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8694 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8695 * (LMPM_PMG_SEL) */
8696 uint32_t u_timestamp; /* indicate when the date and time of the
8697 * compilation */
8698 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8699 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8700
8701 /*
8702 * UMAC error struct - relevant starting from family 8000 chip.
8703 * Note: This structure is read from the device with IO accesses,
8704 * and the reading already does the endian conversion. As it is
8705 * read with u32-sized accesses, any members with a different size
8706 * need to be ordered correctly though!
8707 */
8708 struct iwx_umac_error_event_table {
8709 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8710 uint32_t error_id; /* type of error */
8711 uint32_t blink1; /* branch link */
8712 uint32_t blink2; /* branch link */
8713 uint32_t ilink1; /* interrupt link */
8714 uint32_t ilink2; /* interrupt link */
8715 uint32_t data1; /* error-specific data */
8716 uint32_t data2; /* error-specific data */
8717 uint32_t data3; /* error-specific data */
8718 uint32_t umac_major;
8719 uint32_t umac_minor;
8720 uint32_t frame_pointer; /* core register 27*/
8721 uint32_t stack_pointer; /* core register 28 */
8722 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8723 uint32_t nic_isr_pref; /* ISR status register */
8724 } __packed;
8725
8726 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8727 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8728
8729 static void
iwx_nic_umac_error(struct iwx_softc * sc)8730 iwx_nic_umac_error(struct iwx_softc *sc)
8731 {
8732 struct iwx_umac_error_event_table table;
8733 uint32_t base;
8734
8735 base = sc->sc_uc.uc_umac_error_event_table;
8736
8737 if (base < 0x400000) {
8738 printf("%s: Invalid error log pointer 0x%08x\n",
8739 DEVNAME(sc), base);
8740 return;
8741 }
8742
8743 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8744 printf("%s: reading errlog failed\n", DEVNAME(sc));
8745 return;
8746 }
8747
8748 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8749 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8750 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8751 sc->sc_flags, table.valid);
8752 }
8753
8754 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8755 iwx_desc_lookup(table.error_id));
8756 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8757 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8758 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8759 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8760 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8761 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8762 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8763 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8764 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8765 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8766 table.frame_pointer);
8767 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8768 table.stack_pointer);
8769 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8770 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8771 table.nic_isr_pref);
8772 }
8773
8774 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8775 static struct {
8776 const char *name;
8777 uint8_t num;
8778 } advanced_lookup[] = {
8779 { "NMI_INTERRUPT_WDG", 0x34 },
8780 { "SYSASSERT", 0x35 },
8781 { "UCODE_VERSION_MISMATCH", 0x37 },
8782 { "BAD_COMMAND", 0x38 },
8783 { "BAD_COMMAND", 0x39 },
8784 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8785 { "FATAL_ERROR", 0x3D },
8786 { "NMI_TRM_HW_ERR", 0x46 },
8787 { "NMI_INTERRUPT_TRM", 0x4C },
8788 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8789 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8790 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8791 { "NMI_INTERRUPT_HOST", 0x66 },
8792 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8793 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8794 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8795 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8796 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8797 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8798 { "ADVANCED_SYSASSERT", 0 },
8799 };
8800
8801 static const char *
iwx_desc_lookup(uint32_t num)8802 iwx_desc_lookup(uint32_t num)
8803 {
8804 int i;
8805
8806 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8807 if (advanced_lookup[i].num ==
8808 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8809 return advanced_lookup[i].name;
8810
8811 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8812 return advanced_lookup[i].name;
8813 }
8814
8815 /*
8816 * Support for dumping the error log seemed like a good idea ...
8817 * but it's mostly hex junk and the only sensible thing is the
8818 * hw/ucode revision (which we know anyway). Since it's here,
8819 * I'll just leave it in, just in case e.g. the Intel guys want to
8820 * help us decipher some "ADVANCED_SYSASSERT" later.
8821 */
8822 static void
iwx_nic_error(struct iwx_softc * sc)8823 iwx_nic_error(struct iwx_softc *sc)
8824 {
8825 struct iwx_error_event_table table;
8826 uint32_t base;
8827
8828 printf("%s: dumping device error log\n", DEVNAME(sc));
8829 printf("%s: GOS-3758: 1\n", __func__);
8830 base = sc->sc_uc.uc_lmac_error_event_table[0];
8831 printf("%s: GOS-3758: 2\n", __func__);
8832 if (base < 0x400000) {
8833 printf("%s: Invalid error log pointer 0x%08x\n",
8834 DEVNAME(sc), base);
8835 return;
8836 }
8837
8838 printf("%s: GOS-3758: 3\n", __func__);
8839 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8840 printf("%s: reading errlog failed\n", DEVNAME(sc));
8841 return;
8842 }
8843
8844 printf("%s: GOS-3758: 4\n", __func__);
8845 if (!table.valid) {
8846 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8847 return;
8848 }
8849
8850 printf("%s: GOS-3758: 5\n", __func__);
8851 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8852 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8853 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8854 sc->sc_flags, table.valid);
8855 }
8856
8857 printf("%s: GOS-3758: 6\n", __func__);
8858 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8859 iwx_desc_lookup(table.error_id));
8860 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8861 table.trm_hw_status0);
8862 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8863 table.trm_hw_status1);
8864 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8865 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8866 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8867 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8868 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8869 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8870 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8871 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8872 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8873 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8874 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8875 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8876 table.fw_rev_type);
8877 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8878 table.major);
8879 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8880 table.minor);
8881 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8882 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8883 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8884 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8885 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8886 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8887 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8888 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8889 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8890 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8891 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8892 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8893 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8894 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8895 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8896 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8897 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8898
8899 if (sc->sc_uc.uc_umac_error_event_table)
8900 iwx_nic_umac_error(sc);
8901 }
8902
8903 static void
iwx_dump_driver_status(struct iwx_softc * sc)8904 iwx_dump_driver_status(struct iwx_softc *sc)
8905 {
8906 struct ieee80211com *ic = &sc->sc_ic;
8907 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8908 enum ieee80211_state state = vap->iv_state;
8909 int i;
8910
8911 printf("driver status:\n");
8912 for (i = 0; i < nitems(sc->txq); i++) {
8913 struct iwx_tx_ring *ring = &sc->txq[i];
8914 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8915 "cur_hw=%-3d queued=%-3d\n",
8916 i, ring->qid, ring->cur, ring->cur_hw,
8917 ring->queued);
8918 }
8919 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8920 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8921 }
8922
8923 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8924 do { \
8925 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8926 _var_ = (void *)((_pkt_)+1); \
8927 } while (/*CONSTCOND*/0)
8928
8929 static int
iwx_rx_pkt_valid(struct iwx_rx_packet * pkt)8930 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8931 {
8932 int qid, idx, code;
8933
8934 qid = pkt->hdr.qid & ~0x80;
8935 idx = pkt->hdr.idx;
8936 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8937
8938 return (!(qid == 0 && idx == 0 && code == 0) &&
8939 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8940 }
8941
8942 static void
iwx_rx_pkt(struct iwx_softc * sc,struct iwx_rx_data * data,struct mbuf * ml)8943 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8944 {
8945 struct ieee80211com *ic = &sc->sc_ic;
8946 struct iwx_rx_packet *pkt, *nextpkt;
8947 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8948 struct mbuf *m0, *m;
8949 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8950 int qid, idx, code, handled = 1;
8951
8952 m0 = data->m;
8953 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8954 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8955 qid = pkt->hdr.qid;
8956 idx = pkt->hdr.idx;
8957 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8958
8959 if (!iwx_rx_pkt_valid(pkt))
8960 break;
8961
8962 /*
8963 * XXX Intel inside (tm)
8964 * Any commands in the LONG_GROUP could actually be in the
8965 * LEGACY group. Firmware API versions >= 50 reject commands
8966 * in group 0, forcing us to use this hack.
8967 */
8968 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8969 struct iwx_tx_ring *ring = &sc->txq[qid];
8970 struct iwx_tx_data *txdata = &ring->data[idx];
8971 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8972 code = iwx_cmd_opcode(code);
8973 }
8974
8975 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8976 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8977 break;
8978
8979 // TODO ???
8980 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8981 /* Take mbuf m0 off the RX ring. */
8982 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8983 break;
8984 }
8985 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8986 }
8987
8988 switch (code) {
8989 case IWX_REPLY_RX_PHY_CMD:
8990 /* XXX-THJ: I've not managed to hit this path in testing */
8991 iwx_rx_rx_phy_cmd(sc, pkt, data);
8992 break;
8993
8994 case IWX_REPLY_RX_MPDU_CMD: {
8995 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8996 nextoff = offset +
8997 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8998 nextpkt = (struct iwx_rx_packet *)
8999 (m0->m_data + nextoff);
9000 /* AX210 devices ship only one packet per Rx buffer. */
9001 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9002 nextoff + minsz >= IWX_RBUF_SIZE ||
9003 !iwx_rx_pkt_valid(nextpkt)) {
9004 /* No need to copy last frame in buffer. */
9005 if (offset > 0)
9006 m_adj(m0, offset);
9007 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
9008 m0 = NULL; /* stack owns m0 now; abort loop */
9009 } else {
9010 /*
9011 * Create an mbuf which points to the current
9012 * packet. Always copy from offset zero to
9013 * preserve m_pkthdr.
9014 */
9015 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
9016 if (m == NULL) {
9017 m_freem(m0);
9018 m0 = NULL;
9019 break;
9020 }
9021 m_adj(m, offset);
9022 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
9023 }
9024 break;
9025 }
9026
9027 // case IWX_BAR_FRAME_RELEASE:
9028 // iwx_rx_bar_frame_release(sc, pkt, ml);
9029 // break;
9030 //
9031 case IWX_TX_CMD:
9032 iwx_rx_tx_cmd(sc, pkt, data);
9033 break;
9034
9035 case IWX_BA_NOTIF:
9036 iwx_rx_compressed_ba(sc, pkt);
9037 break;
9038
9039 case IWX_MISSED_BEACONS_NOTIFICATION:
9040 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
9041 "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
9042 __func__);
9043 iwx_rx_bmiss(sc, pkt, data);
9044 break;
9045
9046 case IWX_MFUART_LOAD_NOTIFICATION:
9047 break;
9048
9049 case IWX_ALIVE: {
9050 struct iwx_alive_resp_v4 *resp4;
9051 struct iwx_alive_resp_v5 *resp5;
9052 struct iwx_alive_resp_v6 *resp6;
9053
9054 DPRINTF(("%s: firmware alive\n", __func__));
9055 sc->sc_uc.uc_ok = 0;
9056
9057 /*
9058 * For v5 and above, we can check the version, for older
9059 * versions we need to check the size.
9060 */
9061 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9062 IWX_ALIVE) == 6) {
9063 SYNC_RESP_STRUCT(resp6, pkt);
9064 if (iwx_rx_packet_payload_len(pkt) !=
9065 sizeof(*resp6)) {
9066 sc->sc_uc.uc_intr = 1;
9067 wakeup(&sc->sc_uc);
9068 break;
9069 }
9070 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9071 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9072 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9073 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9074 sc->sc_uc.uc_log_event_table = le32toh(
9075 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9076 sc->sc_uc.uc_umac_error_event_table = le32toh(
9077 resp6->umac_data.dbg_ptrs.error_info_addr);
9078 sc->sc_sku_id[0] =
9079 le32toh(resp6->sku_id.data[0]);
9080 sc->sc_sku_id[1] =
9081 le32toh(resp6->sku_id.data[1]);
9082 sc->sc_sku_id[2] =
9083 le32toh(resp6->sku_id.data[2]);
9084 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9085 sc->sc_uc.uc_ok = 1;
9086 }
9087 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9088 IWX_ALIVE) == 5) {
9089 SYNC_RESP_STRUCT(resp5, pkt);
9090 if (iwx_rx_packet_payload_len(pkt) !=
9091 sizeof(*resp5)) {
9092 sc->sc_uc.uc_intr = 1;
9093 wakeup(&sc->sc_uc);
9094 break;
9095 }
9096 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9097 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9098 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9099 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9100 sc->sc_uc.uc_log_event_table = le32toh(
9101 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9102 sc->sc_uc.uc_umac_error_event_table = le32toh(
9103 resp5->umac_data.dbg_ptrs.error_info_addr);
9104 sc->sc_sku_id[0] =
9105 le32toh(resp5->sku_id.data[0]);
9106 sc->sc_sku_id[1] =
9107 le32toh(resp5->sku_id.data[1]);
9108 sc->sc_sku_id[2] =
9109 le32toh(resp5->sku_id.data[2]);
9110 if (resp5->status == IWX_ALIVE_STATUS_OK)
9111 sc->sc_uc.uc_ok = 1;
9112 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9113 SYNC_RESP_STRUCT(resp4, pkt);
9114 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9115 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9116 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9117 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9118 sc->sc_uc.uc_log_event_table = le32toh(
9119 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9120 sc->sc_uc.uc_umac_error_event_table = le32toh(
9121 resp4->umac_data.dbg_ptrs.error_info_addr);
9122 if (resp4->status == IWX_ALIVE_STATUS_OK)
9123 sc->sc_uc.uc_ok = 1;
9124 } else
9125 printf("unknown payload version");
9126
9127 sc->sc_uc.uc_intr = 1;
9128 wakeup(&sc->sc_uc);
9129 break;
9130 }
9131
9132 case IWX_STATISTICS_NOTIFICATION: {
9133 struct iwx_notif_statistics *stats;
9134 SYNC_RESP_STRUCT(stats, pkt);
9135 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9136 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9137 break;
9138 }
9139
9140 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9141 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9142 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9143 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9144 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9145 break;
9146
9147 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9148 IWX_CT_KILL_NOTIFICATION): {
9149 struct iwx_ct_kill_notif *notif;
9150 SYNC_RESP_STRUCT(notif, pkt);
9151 printf("%s: device at critical temperature (%u degC), "
9152 "stopping device\n",
9153 DEVNAME(sc), le16toh(notif->temperature));
9154 sc->sc_flags |= IWX_FLAG_HW_ERR;
9155 ieee80211_restart_all(ic);
9156 break;
9157 }
9158
9159 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9160 IWX_SCD_QUEUE_CONFIG_CMD):
9161 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9162 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9163 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9164 IWX_SESSION_PROTECTION_CMD):
9165 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9166 IWX_NVM_GET_INFO):
9167 case IWX_ADD_STA_KEY:
9168 case IWX_PHY_CONFIGURATION_CMD:
9169 case IWX_TX_ANT_CONFIGURATION_CMD:
9170 case IWX_ADD_STA:
9171 case IWX_MAC_CONTEXT_CMD:
9172 case IWX_REPLY_SF_CFG_CMD:
9173 case IWX_POWER_TABLE_CMD:
9174 case IWX_LTR_CONFIG:
9175 case IWX_PHY_CONTEXT_CMD:
9176 case IWX_BINDING_CONTEXT_CMD:
9177 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9178 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9179 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9180 case IWX_REPLY_BEACON_FILTERING_CMD:
9181 case IWX_MAC_PM_POWER_TABLE:
9182 case IWX_TIME_QUOTA_CMD:
9183 case IWX_REMOVE_STA:
9184 case IWX_TXPATH_FLUSH:
9185 case IWX_BT_CONFIG:
9186 case IWX_MCC_UPDATE_CMD:
9187 case IWX_TIME_EVENT_CMD:
9188 case IWX_STATISTICS_CMD:
9189 case IWX_SCD_QUEUE_CFG: {
9190 size_t pkt_len;
9191
9192 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9193 break;
9194
9195 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9196 BUS_DMASYNC_POSTREAD);
9197
9198 pkt_len = sizeof(pkt->len_n_flags) +
9199 iwx_rx_packet_len(pkt);
9200
9201 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9202 pkt_len < sizeof(*pkt) ||
9203 pkt_len > sc->sc_cmd_resp_len[idx]) {
9204 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9205 sc->sc_cmd_resp_pkt[idx] = NULL;
9206 break;
9207 }
9208
9209 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9210 BUS_DMASYNC_POSTREAD);
9211 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9212 break;
9213 }
9214
9215 case IWX_INIT_COMPLETE_NOTIF:
9216 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9217 wakeup(&sc->sc_init_complete);
9218 break;
9219
9220 case IWX_SCAN_COMPLETE_UMAC: {
9221 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9222 struct iwx_umac_scan_complete *notif __attribute__((unused));
9223 SYNC_RESP_STRUCT(notif, pkt);
9224 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9225 notif->status));
9226 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9227 iwx_endscan(sc);
9228 break;
9229 }
9230
9231 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9232 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9233 __func__));
9234 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9235 SYNC_RESP_STRUCT(notif, pkt);
9236 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9237 notif->status));
9238 iwx_endscan(sc);
9239 break;
9240 }
9241
9242 case IWX_MCC_CHUB_UPDATE_CMD: {
9243 struct iwx_mcc_chub_notif *notif;
9244 SYNC_RESP_STRUCT(notif, pkt);
9245 iwx_mcc_update(sc, notif);
9246 break;
9247 }
9248
9249 case IWX_REPLY_ERROR: {
9250 struct iwx_error_resp *resp;
9251 SYNC_RESP_STRUCT(resp, pkt);
9252 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9253 DEVNAME(sc), le32toh(resp->error_type),
9254 resp->cmd_id);
9255 break;
9256 }
9257
9258 case IWX_TIME_EVENT_NOTIFICATION: {
9259 struct iwx_time_event_notif *notif;
9260 uint32_t action;
9261 SYNC_RESP_STRUCT(notif, pkt);
9262
9263 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9264 break;
9265 action = le32toh(notif->action);
9266 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9267 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9268 break;
9269 }
9270
9271 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9272 IWX_SESSION_PROTECTION_NOTIF): {
9273 struct iwx_session_prot_notif *notif;
9274 uint32_t status, start, conf_id;
9275
9276 SYNC_RESP_STRUCT(notif, pkt);
9277
9278 status = le32toh(notif->status);
9279 start = le32toh(notif->start);
9280 conf_id = le32toh(notif->conf_id);
9281 /* Check for end of successful PROTECT_CONF_ASSOC. */
9282 if (status == 1 && start == 0 &&
9283 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9284 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9285 break;
9286 }
9287
9288 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9289 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9290 break;
9291
9292 /*
9293 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9294 * messages. Just ignore them for now.
9295 */
9296 case IWX_DEBUG_LOG_MSG:
9297 break;
9298
9299 case IWX_MCAST_FILTER_CMD:
9300 break;
9301
9302 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9303 break;
9304
9305 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9306 break;
9307
9308 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9309 break;
9310
9311 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9312 IWX_NVM_ACCESS_COMPLETE):
9313 break;
9314
9315 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9316 break; /* happens in monitor mode; ignore for now */
9317
9318 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9319 break;
9320
9321 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9322 IWX_TLC_MNG_UPDATE_NOTIF): {
9323 struct iwx_tlc_update_notif *notif;
9324 SYNC_RESP_STRUCT(notif, pkt);
9325 (void)notif;
9326 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9327 iwx_rs_update(sc, notif);
9328 break;
9329 }
9330
9331 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9332 break;
9333
9334 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9335 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9336 break;
9337
9338 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9339 IWX_PNVM_INIT_COMPLETE):
9340 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9341 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9342 wakeup(&sc->sc_init_complete);
9343 break;
9344
9345 default:
9346 handled = 0;
9347 /* XXX wulf: Get rid of bluetooth-related spam */
9348 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9349 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9350 break;
9351 printf("%s: unhandled firmware response 0x%x/0x%x "
9352 "rx ring %d[%d]\n",
9353 DEVNAME(sc), code, pkt->len_n_flags,
9354 (qid & ~0x80), idx);
9355 break;
9356 }
9357
9358 /*
9359 * uCode sets bit 0x80 when it originates the notification,
9360 * i.e. when the notification is not a direct response to a
9361 * command sent by the driver.
9362 * For example, uCode issues IWX_REPLY_RX when it sends a
9363 * received frame to the driver.
9364 */
9365 if (handled && !(qid & (1 << 7))) {
9366 iwx_cmd_done(sc, qid, idx, code);
9367 }
9368
9369 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9370
9371 /* AX210 devices ship only one packet per Rx buffer. */
9372 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9373 break;
9374 }
9375
9376 if (m0 && m0 != data->m)
9377 m_freem(m0);
9378 }
9379
9380 static void
iwx_notif_intr(struct iwx_softc * sc)9381 iwx_notif_intr(struct iwx_softc *sc)
9382 {
9383 struct mbuf m;
9384 uint16_t hw;
9385
9386 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9387 BUS_DMASYNC_POSTREAD);
9388
9389 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9390 uint16_t *status = sc->rxq.stat_dma.vaddr;
9391 hw = le16toh(*status) & 0xfff;
9392 } else
9393 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9394 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9395 while (sc->rxq.cur != hw) {
9396 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9397
9398 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9399 BUS_DMASYNC_POSTREAD);
9400
9401 iwx_rx_pkt(sc, data, &m);
9402 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9403 }
9404
9405 /*
9406 * Tell the firmware what we have processed.
9407 * Seems like the hardware gets upset unless we align the write by 8??
9408 */
9409 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9410 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9411 }
9412
9413 #if 0
9414 int
9415 iwx_intr(void *arg)
9416 {
9417 struct iwx_softc *sc = arg;
9418 struct ieee80211com *ic = &sc->sc_ic;
9419 struct ifnet *ifp = IC2IFP(ic);
9420 int r1, r2, rv = 0;
9421
9422 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9423
9424 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9425 uint32_t *ict = sc->ict_dma.vaddr;
9426 int tmp;
9427
9428 tmp = htole32(ict[sc->ict_cur]);
9429 if (!tmp)
9430 goto out_ena;
9431
9432 /*
9433 * ok, there was something. keep plowing until we have all.
9434 */
9435 r1 = r2 = 0;
9436 while (tmp) {
9437 r1 |= tmp;
9438 ict[sc->ict_cur] = 0;
9439 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9440 tmp = htole32(ict[sc->ict_cur]);
9441 }
9442
9443 /* this is where the fun begins. don't ask */
9444 if (r1 == 0xffffffff)
9445 r1 = 0;
9446
9447 /* i am not expected to understand this */
9448 if (r1 & 0xc0000)
9449 r1 |= 0x8000;
9450 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9451 } else {
9452 r1 = IWX_READ(sc, IWX_CSR_INT);
9453 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9454 goto out;
9455 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9456 }
9457 if (r1 == 0 && r2 == 0) {
9458 goto out_ena;
9459 }
9460
9461 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9462
9463 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9464 #if 0
9465 int i;
9466 /* Firmware has now configured the RFH. */
9467 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9468 iwx_update_rx_desc(sc, &sc->rxq, i);
9469 #endif
9470 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9471 }
9472
9473
9474 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9475 iwx_check_rfkill(sc);
9476 rv = 1;
9477 goto out_ena;
9478 }
9479
9480 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9481 if (ifp->if_flags & IFF_DEBUG) {
9482 iwx_nic_error(sc);
9483 iwx_dump_driver_status(sc);
9484 }
9485 printf("%s: fatal firmware error\n", DEVNAME(sc));
9486 ieee80211_restart_all(ic);
9487 rv = 1;
9488 goto out;
9489
9490 }
9491
9492 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9493 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9494 iwx_stop(sc);
9495 rv = 1;
9496 goto out;
9497 }
9498
9499 /* firmware chunk loaded */
9500 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9501 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9502
9503 sc->sc_fw_chunk_done = 1;
9504 wakeup(&sc->sc_fw);
9505 }
9506
9507 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9508 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9509 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9510 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9511 }
9512 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9513 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9514 }
9515
9516 /* Disable periodic interrupt; we use it as just a one-shot. */
9517 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9518
9519 /*
9520 * Enable periodic interrupt in 8 msec only if we received
9521 * real RX interrupt (instead of just periodic int), to catch
9522 * any dangling Rx interrupt. If it was just the periodic
9523 * interrupt, there was no dangling Rx activity, and no need
9524 * to extend the periodic interrupt; one-shot is enough.
9525 */
9526 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9527 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9528 IWX_CSR_INT_PERIODIC_ENA);
9529
9530 iwx_notif_intr(sc);
9531 }
9532
9533 rv = 1;
9534
9535 out_ena:
9536 iwx_restore_interrupts(sc);
9537 out:
9538 return rv;
9539 }
9540 #endif
9541
9542 static void
iwx_intr_msix(void * arg)9543 iwx_intr_msix(void *arg)
9544 {
9545 struct iwx_softc *sc = arg;
9546 struct ieee80211com *ic = &sc->sc_ic;
9547 uint32_t inta_fh, inta_hw;
9548 int vector = 0;
9549
9550 IWX_LOCK(sc);
9551
9552 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9553 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9554 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9555 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9556 inta_fh &= sc->sc_fh_mask;
9557 inta_hw &= sc->sc_hw_mask;
9558
9559 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9560 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9561 iwx_notif_intr(sc);
9562 }
9563
9564 /* firmware chunk loaded */
9565 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9566 sc->sc_fw_chunk_done = 1;
9567 wakeup(&sc->sc_fw);
9568 }
9569
9570 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9571 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9572 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9573 if (sc->sc_debug) {
9574 iwx_nic_error(sc);
9575 iwx_dump_driver_status(sc);
9576 }
9577 printf("%s: fatal firmware error\n", DEVNAME(sc));
9578 ieee80211_restart_all(ic);
9579 goto out;
9580 }
9581
9582 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9583 iwx_check_rfkill(sc);
9584 }
9585
9586 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9587 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9588 sc->sc_flags |= IWX_FLAG_HW_ERR;
9589 iwx_stop(sc);
9590 goto out;
9591 }
9592
9593 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9594 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9595 "%s:%d WARNING: Skipping rx desc update\n",
9596 __func__, __LINE__);
9597 #if 0
9598 /*
9599 * XXX-THJ: we don't have the dma segment handy. This is hacked
9600 * out in the fc release, return to it if we ever get this
9601 * warning.
9602 */
9603 /* Firmware has now configured the RFH. */
9604 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9605 iwx_update_rx_desc(sc, &sc->rxq, i);
9606 #endif
9607 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9608 }
9609
9610 /*
9611 * Before sending the interrupt the HW disables it to prevent
9612 * a nested interrupt. This is done by writing 1 to the corresponding
9613 * bit in the mask register. After handling the interrupt, it should be
9614 * re-enabled by clearing this bit. This register is defined as
9615 * write 1 clear (W1C) register, meaning that it's being clear
9616 * by writing 1 to the bit.
9617 */
9618 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9619 out:
9620 IWX_UNLOCK(sc);
9621 return;
9622 }
9623
9624 /*
9625 * The device info table below contains device-specific config overrides.
9626 * The most important parameter derived from this table is the name of the
9627 * firmware image to load.
9628 *
9629 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9630 * The "old" table matches devices based on PCI vendor/product IDs only.
9631 * The "new" table extends this with various device parameters derived
9632 * from MAC type, and RF type.
9633 *
9634 * In iwlwifi "old" and "new" tables share the same array, where "old"
9635 * entries contain dummy values for data defined only for "new" entries.
9636 * As of 2022, Linux developers are still in the process of moving entries
9637 * from "old" to "new" style and it looks like this effort has stalled in
9638 * in some work-in-progress state for quite a while. Linux commits moving
9639 * entries from "old" to "new" have at times been reverted due to regressions.
9640 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9641 * devices in the same driver.
9642 *
9643 * Our table below contains mostly "new" entries declared in iwlwifi
9644 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9645 * Other devices are matched based on PCI vendor/product ID as usual,
9646 * unless matching specific PCI subsystem vendor/product IDs is required.
9647 *
9648 * Some "old"-style entries are required to identify the firmware image to use.
9649 * Others might be used to print a specific marketing name into Linux dmesg,
9650 * but we can't be sure whether the corresponding devices would be matched
9651 * correctly in the absence of their entries. So we include them just in case.
9652 */
9653
9654 struct iwx_dev_info {
9655 uint16_t device;
9656 uint16_t subdevice;
9657 uint16_t mac_type;
9658 uint16_t rf_type;
9659 uint8_t mac_step;
9660 uint8_t rf_id;
9661 uint8_t no_160;
9662 uint8_t cores;
9663 uint8_t cdb;
9664 uint8_t jacket;
9665 const struct iwx_device_cfg *cfg;
9666 };
9667
9668 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9669 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9670 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9671 .mac_type = _mac_type, .rf_type = _rf_type, \
9672 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9673 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9674
9675 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9676 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9677 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9678 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9679
9680 /*
9681 * When adding entries to this table keep in mind that entries must
9682 * be listed in the same order as in the Linux driver. Code walks this
9683 * table backwards and uses the first matching entry it finds.
9684 * Device firmware must be available in fw_update(8).
9685 */
9686 static const struct iwx_dev_info iwx_dev_info_table[] = {
9687 /* So with HR */
9688 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9689 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9690 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9691 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9692 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9693 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9694 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9695 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9696 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9697 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9698 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9699 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9700 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9701 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9702 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9703 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9704 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9705 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9706 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9707 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9708 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9709 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9710 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9711 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9712 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9713 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9714 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9715 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9716 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9717 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9718 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9719 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9720 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9721 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9722 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9723 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9724
9725 /* So with GF2 */
9726 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9727 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9728 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9729 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9730 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9731 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9732 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9733 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9734 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9735 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9736 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9737 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9738
9739 /* Qu with Jf, C step */
9740 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9741 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9742 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9743 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9744 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9745 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9746 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9747 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9748 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9749 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9750 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9751 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9752 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9753 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9754 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9755 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9756 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9757 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9758 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9759 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9760 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9761 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9762 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9763 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9764 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9765 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9766 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9767 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9768 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9769 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9770 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9771 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9772 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9773 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9774 IWX_CFG_ANY,
9775 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9776 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9777 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9778 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9779 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9780 IWX_CFG_ANY,
9781 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9782
9783 /* QuZ with Jf */
9784 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9785 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9786 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9787 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9788 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9789 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9790 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9791 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9792 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9793 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9794 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9795 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9796 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9797 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9798 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9799 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9800 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9801 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9802 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9803 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9804 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9805 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9806 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9807 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9808 IWX_CFG_ANY,
9809 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9810 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9811 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9812 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9813 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9814 IWX_CFG_ANY,
9815 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9816
9817 /* Qu with Hr, B step */
9818 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9819 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9820 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9821 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9822 iwx_qu_b0_hr1_b0), /* AX101 */
9823 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9824 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9825 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9826 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9827 iwx_qu_b0_hr_b0), /* AX203 */
9828
9829 /* Qu with Hr, C step */
9830 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9831 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9832 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9833 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9834 iwx_qu_c0_hr1_b0), /* AX101 */
9835 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9836 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9837 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9838 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9839 iwx_qu_c0_hr_b0), /* AX203 */
9840 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9841 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9842 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9843 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9844 iwx_qu_c0_hr_b0), /* AX201 */
9845
9846 /* QuZ with Hr */
9847 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9848 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9849 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9850 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9851 iwx_quz_a0_hr1_b0), /* AX101 */
9852 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9853 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9854 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9855 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9856 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9857
9858 /* SoF with JF2 */
9859 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9860 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9861 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9862 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9863 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9864 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9865 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9866 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9867 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9868 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9869
9870 /* SoF with JF */
9871 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9872 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9873 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9874 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9875 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9876 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9877 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9878 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9879 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9880 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9881 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9882 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9883 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9884 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9885 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9886 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9887 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9888 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9889 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9890 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9891
9892 /* So with Hr */
9893 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9894 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9895 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9896 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9897 iwx_cfg_so_a0_hr_b0), /* AX203 */
9898 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9899 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9900 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9901 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9902 iwx_cfg_so_a0_hr_b0), /* ax101 */
9903 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9904 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9905 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9906 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9907 iwx_cfg_so_a0_hr_b0), /* ax201 */
9908
9909 /* So-F with Hr */
9910 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9911 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9912 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9913 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9914 iwx_cfg_so_a0_hr_b0), /* AX203 */
9915 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9916 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9917 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9918 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9919 iwx_cfg_so_a0_hr_b0), /* AX101 */
9920 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9921 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9922 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9923 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9924 iwx_cfg_so_a0_hr_b0), /* AX201 */
9925
9926 /* So-F with GF */
9927 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9928 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9929 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9930 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9931 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9932 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9933 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9934 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9935 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9936 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9937
9938 /* So with GF */
9939 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9940 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9941 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9942 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9943 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9944 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9945 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9946 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9947 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9948 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9949
9950 /* So with JF2 */
9951 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9952 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9953 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9954 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9955 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9956 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9957 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9958 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9959 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9960 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9961
9962 /* So with JF */
9963 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9964 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9965 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9966 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9967 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9968 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9969 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9970 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9971 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9972 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9973 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9974 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9975 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9976 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9977 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9978 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9979 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9980 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9981 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9982 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9983 };
9984
9985 static int
iwx_preinit(struct iwx_softc * sc)9986 iwx_preinit(struct iwx_softc *sc)
9987 {
9988 struct ieee80211com *ic = &sc->sc_ic;
9989 int err;
9990
9991 err = iwx_prepare_card_hw(sc);
9992 if (err) {
9993 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9994 return err;
9995 }
9996
9997 if (sc->attached) {
9998 return 0;
9999 }
10000
10001 err = iwx_start_hw(sc);
10002 if (err) {
10003 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10004 return err;
10005 }
10006
10007 err = iwx_run_init_mvm_ucode(sc, 1);
10008 iwx_stop_device(sc);
10009 if (err) {
10010 printf("%s: failed to stop device\n", DEVNAME(sc));
10011 return err;
10012 }
10013
10014 /* Print version info and MAC address on first successful fw load. */
10015 sc->attached = 1;
10016 if (sc->sc_pnvm_ver) {
10017 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10018 "address %s\n",
10019 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10020 sc->sc_fwver, sc->sc_pnvm_ver,
10021 ether_sprintf(sc->sc_nvm.hw_addr));
10022 } else {
10023 printf("%s: hw rev 0x%x, fw %s, address %s\n",
10024 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10025 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10026 }
10027
10028 /* not all hardware can do 5GHz band */
10029 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10030 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10031 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10032
10033 return 0;
10034 }
10035
10036 static void
iwx_attach_hook(void * self)10037 iwx_attach_hook(void *self)
10038 {
10039 struct iwx_softc *sc = (void *)self;
10040 struct ieee80211com *ic = &sc->sc_ic;
10041 int err;
10042
10043 IWX_LOCK(sc);
10044 err = iwx_preinit(sc);
10045 IWX_UNLOCK(sc);
10046 if (err != 0)
10047 goto out;
10048
10049 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
10050 ic->ic_channels);
10051
10052 ieee80211_ifattach(ic);
10053 ic->ic_vap_create = iwx_vap_create;
10054 ic->ic_vap_delete = iwx_vap_delete;
10055 ic->ic_raw_xmit = iwx_raw_xmit;
10056 ic->ic_node_alloc = iwx_node_alloc;
10057 ic->ic_scan_start = iwx_scan_start;
10058 ic->ic_scan_end = iwx_scan_end;
10059 ic->ic_update_mcast = iwx_update_mcast;
10060 ic->ic_getradiocaps = iwx_init_channel_map;
10061
10062 ic->ic_set_channel = iwx_set_channel;
10063 ic->ic_scan_curchan = iwx_scan_curchan;
10064 ic->ic_scan_mindwell = iwx_scan_mindwell;
10065 ic->ic_wme.wme_update = iwx_wme_update;
10066 ic->ic_parent = iwx_parent;
10067 ic->ic_transmit = iwx_transmit;
10068
10069 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10070 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10071 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10072 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10073
10074 sc->sc_addba_request = ic->ic_addba_request;
10075 ic->ic_addba_request = iwx_addba_request;
10076 sc->sc_addba_response = ic->ic_addba_response;
10077 ic->ic_addba_response = iwx_addba_response;
10078
10079 iwx_radiotap_attach(sc);
10080 ieee80211_announce(ic);
10081 out:
10082 config_intrhook_disestablish(&sc->sc_preinit_hook);
10083 }
10084
10085 const struct iwx_device_cfg *
iwx_find_device_cfg(struct iwx_softc * sc)10086 iwx_find_device_cfg(struct iwx_softc *sc)
10087 {
10088 uint16_t sdev_id, mac_type, rf_type;
10089 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10090 int i;
10091
10092 sdev_id = pci_get_subdevice(sc->sc_dev);
10093 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10094 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10095 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10096 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10097 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10098
10099 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10100 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10101 cores = IWX_SUBDEVICE_CORES(sdev_id);
10102
10103 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10104 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10105
10106 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10107 dev_info->device != sc->sc_pid)
10108 continue;
10109
10110 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10111 dev_info->subdevice != sdev_id)
10112 continue;
10113
10114 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10115 dev_info->mac_type != mac_type)
10116 continue;
10117
10118 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10119 dev_info->mac_step != mac_step)
10120 continue;
10121
10122 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10123 dev_info->rf_type != rf_type)
10124 continue;
10125
10126 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10127 dev_info->cdb != cdb)
10128 continue;
10129
10130 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10131 dev_info->jacket != jacket)
10132 continue;
10133
10134 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10135 dev_info->rf_id != rf_id)
10136 continue;
10137
10138 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10139 dev_info->no_160 != no_160)
10140 continue;
10141
10142 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10143 dev_info->cores != cores)
10144 continue;
10145
10146 return dev_info->cfg;
10147 }
10148
10149 return NULL;
10150 }
10151
10152 static int
iwx_probe(device_t dev)10153 iwx_probe(device_t dev)
10154 {
10155 int i;
10156
10157 for (i = 0; i < nitems(iwx_devices); i++) {
10158 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10159 pci_get_device(dev) == iwx_devices[i].device) {
10160 device_set_desc(dev, iwx_devices[i].name);
10161
10162 /*
10163 * Due to significant existing deployments using
10164 * iwlwifi lower the priority of iwx.
10165 *
10166 * This inverts the advice in bus.h where drivers
10167 * supporting newer hardware should return
10168 * BUS_PROBE_DEFAULT and drivers for older devices
10169 * return BUS_PROBE_LOW_PRIORITY.
10170 *
10171 */
10172 return (BUS_PROBE_LOW_PRIORITY);
10173 }
10174 }
10175
10176 return (ENXIO);
10177 }
10178
10179 static int
iwx_attach(device_t dev)10180 iwx_attach(device_t dev)
10181 {
10182 struct iwx_softc *sc = device_get_softc(dev);
10183 struct ieee80211com *ic = &sc->sc_ic;
10184 const struct iwx_device_cfg *cfg;
10185 int err;
10186 int txq_i, i, j;
10187 size_t ctxt_info_size;
10188 int rid;
10189 int count;
10190 int error;
10191 sc->sc_dev = dev;
10192 sc->sc_pid = pci_get_device(dev);
10193 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10194
10195 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10196 IWX_LOCK_INIT(sc);
10197 mbufq_init(&sc->sc_snd, ifqmaxlen);
10198 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10199 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10200 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10201 taskqueue_thread_enqueue, &sc->sc_tq);
10202 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10203 if (error != 0) {
10204 device_printf(dev, "can't start taskq thread, error %d\n",
10205 error);
10206 return (ENXIO);
10207 }
10208
10209 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10210 if (sc->sc_cap_off == 0) {
10211 device_printf(dev, "PCIe capability structure not found!\n");
10212 return (ENXIO);
10213 }
10214
10215 /*
10216 * We disable the RETRY_TIMEOUT register (0x41) to keep
10217 * PCI Tx retries from interfering with C3 CPU state.
10218 */
10219 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10220
10221 if (pci_msix_count(dev)) {
10222 sc->sc_msix = 1;
10223 } else {
10224 device_printf(dev, "no MSI-X found\n");
10225 return (ENXIO);
10226 }
10227
10228 pci_enable_busmaster(dev);
10229 rid = PCIR_BAR(0);
10230 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10231 RF_ACTIVE);
10232 if (sc->sc_mem == NULL) {
10233 device_printf(sc->sc_dev, "can't map mem space\n");
10234 return (ENXIO);
10235 }
10236 sc->sc_st = rman_get_bustag(sc->sc_mem);
10237 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10238
10239 count = 1;
10240 rid = 0;
10241 if (pci_alloc_msix(dev, &count) == 0)
10242 rid = 1;
10243 DPRINTF(("%s: count=%d\n", __func__, count));
10244 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10245 (rid != 0 ? 0 : RF_SHAREABLE));
10246 if (sc->sc_irq == NULL) {
10247 device_printf(dev, "can't map interrupt\n");
10248 return (ENXIO);
10249 }
10250 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10251 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10252 if (error != 0) {
10253 device_printf(dev, "can't establish interrupt\n");
10254 return (ENXIO);
10255 }
10256
10257 /* Clear pending interrupts. */
10258 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10259 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10260 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10261
10262 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10263 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10264 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10265 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10266
10267 /*
10268 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10269 * changed, and now the revision step also includes bit 0-1 (no more
10270 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10271 * in the old format.
10272 */
10273 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10274 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10275
10276 switch (sc->sc_pid) {
10277 case PCI_PRODUCT_INTEL_WL_22500_1:
10278 sc->sc_fwname = IWX_CC_A_FW;
10279 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10280 sc->sc_integrated = 0;
10281 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10282 sc->sc_low_latency_xtal = 0;
10283 sc->sc_xtal_latency = 0;
10284 sc->sc_tx_with_siso_diversity = 0;
10285 sc->sc_uhb_supported = 0;
10286 break;
10287 case PCI_PRODUCT_INTEL_WL_22500_2:
10288 case PCI_PRODUCT_INTEL_WL_22500_5:
10289 /* These devices should be QuZ only. */
10290 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10291 device_printf(dev, "unsupported AX201 adapter\n");
10292 return (ENXIO);
10293 }
10294 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10295 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10296 sc->sc_integrated = 1;
10297 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10298 sc->sc_low_latency_xtal = 0;
10299 sc->sc_xtal_latency = 500;
10300 sc->sc_tx_with_siso_diversity = 0;
10301 sc->sc_uhb_supported = 0;
10302 break;
10303 case PCI_PRODUCT_INTEL_WL_22500_3:
10304 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10305 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10306 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10307 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10308 else
10309 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10310 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10311 sc->sc_integrated = 1;
10312 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10313 sc->sc_low_latency_xtal = 0;
10314 sc->sc_xtal_latency = 500;
10315 sc->sc_tx_with_siso_diversity = 0;
10316 sc->sc_uhb_supported = 0;
10317 break;
10318 case PCI_PRODUCT_INTEL_WL_22500_4:
10319 case PCI_PRODUCT_INTEL_WL_22500_7:
10320 case PCI_PRODUCT_INTEL_WL_22500_8:
10321 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10322 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10323 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10324 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10325 else
10326 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10327 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10328 sc->sc_integrated = 1;
10329 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10330 sc->sc_low_latency_xtal = 0;
10331 sc->sc_xtal_latency = 1820;
10332 sc->sc_tx_with_siso_diversity = 0;
10333 sc->sc_uhb_supported = 0;
10334 break;
10335 case PCI_PRODUCT_INTEL_WL_22500_6:
10336 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10337 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10338 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10339 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10340 else
10341 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10342 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10343 sc->sc_integrated = 1;
10344 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10345 sc->sc_low_latency_xtal = 1;
10346 sc->sc_xtal_latency = 12000;
10347 sc->sc_tx_with_siso_diversity = 0;
10348 sc->sc_uhb_supported = 0;
10349 break;
10350 case PCI_PRODUCT_INTEL_WL_22500_9:
10351 case PCI_PRODUCT_INTEL_WL_22500_10:
10352 case PCI_PRODUCT_INTEL_WL_22500_11:
10353 case PCI_PRODUCT_INTEL_WL_22500_13:
10354 /* _14 is an MA device, not yet supported */
10355 case PCI_PRODUCT_INTEL_WL_22500_15:
10356 case PCI_PRODUCT_INTEL_WL_22500_16:
10357 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10358 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10359 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10360 sc->sc_integrated = 0;
10361 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10362 sc->sc_low_latency_xtal = 0;
10363 sc->sc_xtal_latency = 0;
10364 sc->sc_tx_with_siso_diversity = 0;
10365 sc->sc_uhb_supported = 1;
10366 break;
10367 case PCI_PRODUCT_INTEL_WL_22500_12:
10368 case PCI_PRODUCT_INTEL_WL_22500_17:
10369 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10370 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10371 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10372 sc->sc_integrated = 1;
10373 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10374 sc->sc_low_latency_xtal = 1;
10375 sc->sc_xtal_latency = 12000;
10376 sc->sc_tx_with_siso_diversity = 0;
10377 sc->sc_uhb_supported = 0;
10378 sc->sc_imr_enabled = 1;
10379 break;
10380 default:
10381 device_printf(dev, "unknown adapter type\n");
10382 return (ENXIO);
10383 }
10384
10385 cfg = iwx_find_device_cfg(sc);
10386 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10387 if (cfg) {
10388 sc->sc_fwname = cfg->fw_name;
10389 sc->sc_pnvm_name = cfg->pnvm_name;
10390 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10391 sc->sc_uhb_supported = cfg->uhb_supported;
10392 if (cfg->xtal_latency) {
10393 sc->sc_xtal_latency = cfg->xtal_latency;
10394 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10395 }
10396 }
10397
10398 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10399
10400 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10401 sc->sc_umac_prph_offset = 0x300000;
10402 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10403 } else
10404 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10405
10406 /* Allocate DMA memory for loading firmware. */
10407 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10408 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10409 else
10410 ctxt_info_size = sizeof(struct iwx_context_info);
10411 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10412 ctxt_info_size, 1);
10413 if (err) {
10414 device_printf(dev,
10415 "could not allocate memory for loading firmware\n");
10416 return (ENXIO);
10417 }
10418
10419 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10420 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10421 sizeof(struct iwx_prph_scratch), 1);
10422 if (err) {
10423 device_printf(dev,
10424 "could not allocate prph scratch memory\n");
10425 goto fail1;
10426 }
10427
10428 /*
10429 * Allocate prph information. The driver doesn't use this.
10430 * We use the second half of this page to give the device
10431 * some dummy TR/CR tail pointers - which shouldn't be
10432 * necessary as we don't use this, but the hardware still
10433 * reads/writes there and we can't let it go do that with
10434 * a NULL pointer.
10435 */
10436 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10437 ("iwx_prph_info has wrong size"));
10438 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10439 PAGE_SIZE, 1);
10440 if (err) {
10441 device_printf(dev,
10442 "could not allocate prph info memory\n");
10443 goto fail1;
10444 }
10445 }
10446
10447 /* Allocate interrupt cause table (ICT).*/
10448 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10449 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10450 if (err) {
10451 device_printf(dev, "could not allocate ICT table\n");
10452 goto fail1;
10453 }
10454
10455 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10456 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10457 if (err) {
10458 device_printf(dev, "could not allocate TX ring %d\n",
10459 txq_i);
10460 goto fail4;
10461 }
10462 }
10463
10464 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10465 if (err) {
10466 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10467 goto fail4;
10468 }
10469
10470 #ifdef IWX_DEBUG
10471 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10472 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10473 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10474
10475 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10476 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10477 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10478 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10479 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10480 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10481
10482 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10484 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10485
10486 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10487 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10488 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10489 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10490 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10491 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10492 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10493 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10494 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10495 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10497 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10498 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10499 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10500 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10501 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10502 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10503 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10504 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10505 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10506 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10507 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10508 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10509 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10510 #endif
10511 ic->ic_softc = sc;
10512 ic->ic_name = device_get_nameunit(sc->sc_dev);
10513 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10514 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10515
10516 /* Set device capabilities. */
10517 ic->ic_caps =
10518 IEEE80211_C_STA |
10519 IEEE80211_C_MONITOR |
10520 IEEE80211_C_WPA | /* WPA/RSN */
10521 IEEE80211_C_WME |
10522 IEEE80211_C_PMGT |
10523 IEEE80211_C_SHSLOT | /* short slot time supported */
10524 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10525 IEEE80211_C_BGSCAN /* capable of bg scanning */
10526 ;
10527 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10528 /* Enable seqno offload */
10529 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
10530 /* Don't send null data frames; let firmware do it */
10531 ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
10532
10533 ic->ic_txstream = 2;
10534 ic->ic_rxstream = 2;
10535 ic->ic_htcaps |= IEEE80211_HTC_HT
10536 | IEEE80211_HTCAP_SMPS_OFF
10537 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10538 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10539 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10540 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10541 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10542 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10543
10544 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10545
10546 /*
10547 * XXX: setupcurchan() expects vhtcaps to be non-zero
10548 * https://bugs.freebsd.org/274156
10549 */
10550 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10551 | IEEE80211_VHTCAP_SHORT_GI_80
10552 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10553 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10554 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10555
10556 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10557 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10558 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10559 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10560 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10561 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10562 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10563 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10564 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10565 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10566 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10567
10568 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10569 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10570 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10571 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10572 rxba->sc = sc;
10573 for (j = 0; j < nitems(rxba->entries); j++)
10574 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10575 }
10576
10577 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10578 sc->sc_preinit_hook.ich_arg = sc;
10579 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10580 device_printf(dev,
10581 "config_intrhook_establish failed\n");
10582 goto fail4;
10583 }
10584
10585 return (0);
10586
10587 fail4:
10588 while (--txq_i >= 0)
10589 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10590 iwx_free_rx_ring(sc, &sc->rxq);
10591 if (sc->ict_dma.vaddr != NULL)
10592 iwx_dma_contig_free(&sc->ict_dma);
10593
10594 fail1:
10595 iwx_dma_contig_free(&sc->ctxt_info_dma);
10596 iwx_dma_contig_free(&sc->prph_scratch_dma);
10597 iwx_dma_contig_free(&sc->prph_info_dma);
10598 return (ENXIO);
10599 }
10600
10601 static int
iwx_detach(device_t dev)10602 iwx_detach(device_t dev)
10603 {
10604 struct iwx_softc *sc = device_get_softc(dev);
10605 int txq_i;
10606
10607 iwx_stop_device(sc);
10608
10609 taskqueue_drain_all(sc->sc_tq);
10610 taskqueue_free(sc->sc_tq);
10611
10612 ieee80211_ifdetach(&sc->sc_ic);
10613
10614 callout_drain(&sc->watchdog_to);
10615
10616 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10617 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10618 iwx_free_rx_ring(sc, &sc->rxq);
10619
10620 if (sc->sc_fwp != NULL) {
10621 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10622 sc->sc_fwp = NULL;
10623 }
10624
10625 if (sc->sc_pnvm != NULL) {
10626 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10627 sc->sc_pnvm = NULL;
10628 }
10629
10630 if (sc->sc_irq != NULL) {
10631 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10632 bus_release_resource(dev, SYS_RES_IRQ,
10633 rman_get_rid(sc->sc_irq), sc->sc_irq);
10634 pci_release_msi(dev);
10635 }
10636 if (sc->sc_mem != NULL)
10637 bus_release_resource(dev, SYS_RES_MEMORY,
10638 rman_get_rid(sc->sc_mem), sc->sc_mem);
10639
10640 IWX_LOCK_DESTROY(sc);
10641
10642 return (0);
10643 }
10644
10645 static void
iwx_radiotap_attach(struct iwx_softc * sc)10646 iwx_radiotap_attach(struct iwx_softc *sc)
10647 {
10648 struct ieee80211com *ic = &sc->sc_ic;
10649
10650 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10651 "->%s begin\n", __func__);
10652
10653 ieee80211_radiotap_attach(ic,
10654 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10655 IWX_TX_RADIOTAP_PRESENT,
10656 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10657 IWX_RX_RADIOTAP_PRESENT);
10658
10659 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10660 "->%s end\n", __func__);
10661 }
10662
10663 struct ieee80211vap *
iwx_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])10664 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10665 enum ieee80211_opmode opmode, int flags,
10666 const uint8_t bssid[IEEE80211_ADDR_LEN],
10667 const uint8_t mac[IEEE80211_ADDR_LEN])
10668 {
10669 struct iwx_vap *ivp;
10670 struct ieee80211vap *vap;
10671
10672 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10673 return NULL;
10674 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10675 vap = &ivp->iv_vap;
10676 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10677 vap->iv_bmissthreshold = 10; /* override default */
10678 /* Override with driver methods. */
10679 ivp->iv_newstate = vap->iv_newstate;
10680 vap->iv_newstate = iwx_newstate;
10681
10682 ivp->id = IWX_DEFAULT_MACID;
10683 ivp->color = IWX_DEFAULT_COLOR;
10684
10685 ivp->have_wme = TRUE;
10686 ivp->ps_disabled = FALSE;
10687
10688 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10689 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10690
10691 /* h/w crypto support */
10692 vap->iv_key_alloc = iwx_key_alloc;
10693 vap->iv_key_delete = iwx_key_delete;
10694 vap->iv_key_set = iwx_key_set;
10695 vap->iv_key_update_begin = iwx_key_update_begin;
10696 vap->iv_key_update_end = iwx_key_update_end;
10697
10698 ieee80211_ratectl_init(vap);
10699 /* Complete setup. */
10700 ieee80211_vap_attach(vap, ieee80211_media_change,
10701 ieee80211_media_status, mac);
10702 ic->ic_opmode = opmode;
10703
10704 return vap;
10705 }
10706
10707 static void
iwx_vap_delete(struct ieee80211vap * vap)10708 iwx_vap_delete(struct ieee80211vap *vap)
10709 {
10710 struct iwx_vap *ivp = IWX_VAP(vap);
10711
10712 ieee80211_ratectl_deinit(vap);
10713 ieee80211_vap_detach(vap);
10714 free(ivp, M_80211_VAP);
10715 }
10716
10717 static void
iwx_parent(struct ieee80211com * ic)10718 iwx_parent(struct ieee80211com *ic)
10719 {
10720 struct iwx_softc *sc = ic->ic_softc;
10721 IWX_LOCK(sc);
10722
10723 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10724 iwx_stop(sc);
10725 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10726 } else {
10727 iwx_init(sc);
10728 ieee80211_start_all(ic);
10729 }
10730 IWX_UNLOCK(sc);
10731 }
10732
10733 static int
iwx_suspend(device_t dev)10734 iwx_suspend(device_t dev)
10735 {
10736 struct iwx_softc *sc = device_get_softc(dev);
10737 struct ieee80211com *ic = &sc->sc_ic;
10738
10739 /*
10740 * Suspend everything first, then shutdown hardware if it's
10741 * still up.
10742 */
10743 ieee80211_suspend_all(ic);
10744
10745 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10746 iwx_stop(sc);
10747 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10748 }
10749 return (0);
10750 }
10751
10752 static int
iwx_resume(device_t dev)10753 iwx_resume(device_t dev)
10754 {
10755 struct iwx_softc *sc = device_get_softc(dev);
10756 struct ieee80211com *ic = &sc->sc_ic;
10757
10758 /*
10759 * We disable the RETRY_TIMEOUT register (0x41) to keep
10760 * PCI Tx retries from interfering with C3 CPU state.
10761 */
10762 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10763
10764 IWX_LOCK(sc);
10765
10766 /* Stop the hardware here if it's still thought of as "up" */
10767 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10768 iwx_stop(sc);
10769 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10770 }
10771
10772 IWX_UNLOCK(sc);
10773
10774 /* Start the VAPs, which will bring the hardware back up again */
10775 ieee80211_resume_all(ic);
10776 return (0);
10777 }
10778
10779 static void
iwx_scan_start(struct ieee80211com * ic)10780 iwx_scan_start(struct ieee80211com *ic)
10781 {
10782 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10783 struct iwx_softc *sc = ic->ic_softc;
10784 int err;
10785
10786 IWX_LOCK(sc);
10787 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10788 err = iwx_scan(sc);
10789 else
10790 err = iwx_bgscan(ic);
10791 IWX_UNLOCK(sc);
10792 if (err)
10793 ieee80211_cancel_scan(vap);
10794
10795 return;
10796 }
10797
10798 static void
iwx_update_mcast(struct ieee80211com * ic)10799 iwx_update_mcast(struct ieee80211com *ic)
10800 {
10801 }
10802
10803 static void
iwx_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)10804 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10805 {
10806 }
10807
10808 static void
iwx_scan_mindwell(struct ieee80211_scan_state * ss)10809 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10810 {
10811 }
10812
10813 static void
iwx_scan_end(struct ieee80211com * ic)10814 iwx_scan_end(struct ieee80211com *ic)
10815 {
10816 iwx_endscan(ic->ic_softc);
10817 }
10818
10819 static void
iwx_set_channel(struct ieee80211com * ic)10820 iwx_set_channel(struct ieee80211com *ic)
10821 {
10822 #if 0
10823 struct iwx_softc *sc = ic->ic_softc;
10824 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10825
10826 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10827 iwx_phy_ctxt_task((void *)sc);
10828 #endif
10829 }
10830
10831 static void
iwx_endscan_cb(void * arg,int pending)10832 iwx_endscan_cb(void *arg, int pending)
10833 {
10834 struct iwx_softc *sc = arg;
10835 struct ieee80211com *ic = &sc->sc_ic;
10836
10837 DPRINTF(("scan ended\n"));
10838 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10839 }
10840
10841 static int
iwx_wme_update(struct ieee80211com * ic)10842 iwx_wme_update(struct ieee80211com *ic)
10843 {
10844 return 0;
10845 }
10846
10847 static int
iwx_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)10848 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10849 const struct ieee80211_bpf_params *params)
10850 {
10851 struct ieee80211com *ic = ni->ni_ic;
10852 struct iwx_softc *sc = ic->ic_softc;
10853 int err;
10854
10855 IWX_LOCK(sc);
10856 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10857 err = iwx_tx(sc, m, ni);
10858 IWX_UNLOCK(sc);
10859 return err;
10860 } else {
10861 IWX_UNLOCK(sc);
10862 return EIO;
10863 }
10864 }
10865
10866 static int
iwx_transmit(struct ieee80211com * ic,struct mbuf * m)10867 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10868 {
10869 struct iwx_softc *sc = ic->ic_softc;
10870 int error;
10871
10872 // TODO: mbufq_enqueue in iwm
10873 // TODO dequeue in iwm_start, counters, locking
10874 IWX_LOCK(sc);
10875 error = mbufq_enqueue(&sc->sc_snd, m);
10876 if (error) {
10877 IWX_UNLOCK(sc);
10878 return (error);
10879 }
10880
10881 iwx_start(sc);
10882 IWX_UNLOCK(sc);
10883 return (0);
10884 }
10885
10886 static int
iwx_ampdu_rx_start(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap,int baparamset,int batimeout,int baseqctl)10887 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10888 int baparamset, int batimeout, int baseqctl)
10889 {
10890 struct ieee80211com *ic = ni->ni_ic;
10891 struct iwx_softc *sc = ic->ic_softc;
10892 int tid;
10893
10894 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10895 sc->ni_rx_ba[tid].ba_winstart =
10896 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10897 sc->ni_rx_ba[tid].ba_winsize =
10898 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10899 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10900
10901 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10902 tid >= IWX_MAX_TID_COUNT)
10903 return ENOSPC;
10904
10905 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10906 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10907 return EBUSY;
10908 }
10909 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10910
10911 sc->ba_rx.start_tidmask |= (1 << tid);
10912 DPRINTF(("%s: tid=%i\n", __func__, tid));
10913 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10914 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10915 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10916
10917 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10918
10919 // TODO:misha move to ba_task (serialize)
10920 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10921
10922 return (0);
10923 }
10924
10925 static void
iwx_ampdu_rx_stop(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap)10926 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10927 {
10928 return;
10929 }
10930
10931 /**
10932 * @brief Called by net80211 to request an A-MPDU session be established.
10933 *
10934 * This is called by net80211 to see if an A-MPDU session can be established.
10935 * However, the iwx(4) firmware will take care of establishing the BA
10936 * session for us. net80211 doesn't have to send any action frames here;
10937 * it just needs to plumb up the ampdu session once the BA has been sent.
10938 *
10939 * If we return 0 here then the firmware will set up the state but net80211
10940 * will not; so it's on us to actually complete it via a call to
10941 * ieee80211_ampdu_tx_request_active_ext() .
10942 *
10943 * @param ni ieee80211_node to establish A-MPDU session for
10944 * @param tap pointer to the per-TID state struct
10945 * @param dialogtoken dialogtoken field from the BA request
10946 * @param baparamset baparamset field from the BA request
10947 * @param batimeout batimeout field from the BA request
10948 *
10949 * @returns 0 so net80211 doesn't send the BA action frame to establish A-MPDU.
10950 */
10951 static int
iwx_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)10952 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10953 int dialogtoken, int baparamset, int batimeout)
10954 {
10955 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10956 int tid;
10957
10958 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10959 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
10960 "%s: queuing AMPDU start on tid %i\n", __func__, tid);
10961
10962 /* There's no nice way right now to tell net80211 that we're in the
10963 * middle of an asynchronous ADDBA setup session. So, bump the timeout
10964 * to hz ticks, hopefully we'll get a response by then.
10965 */
10966 tap->txa_nextrequest = ticks + hz;
10967
10968 IWX_LOCK(sc);
10969 sc->ba_tx.start_tidmask |= (1 << tid);
10970 IWX_UNLOCK(sc);
10971
10972 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10973
10974 return (0);
10975 }
10976
10977
10978 static int
iwx_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)10979 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10980 int code, int baparamset, int batimeout)
10981 {
10982 return 0;
10983 }
10984
10985 static void
iwx_key_update_begin(struct ieee80211vap * vap)10986 iwx_key_update_begin(struct ieee80211vap *vap)
10987 {
10988 return;
10989 }
10990
10991 static void
iwx_key_update_end(struct ieee80211vap * vap)10992 iwx_key_update_end(struct ieee80211vap *vap)
10993 {
10994 return;
10995 }
10996
10997 static int
iwx_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)10998 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
10999 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
11000 {
11001
11002 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
11003 return (1);
11004 }
11005
11006 if (ieee80211_is_key_unicast(vap, k)) {
11007 *keyix = 0; /* NB: use key index 0 for ucast key */
11008 } else if (ieee80211_is_key_global(vap, k)) {
11009 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
11010 } else {
11011 net80211_vap_printf(vap, "%s: invalid crypto key type\n",
11012 __func__);
11013 return (0);
11014 }
11015 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
11016 return (1);
11017 }
11018
11019 static int
iwx_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)11020 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
11021 {
11022 struct ieee80211com *ic = vap->iv_ic;
11023 struct iwx_softc *sc = ic->ic_softc;
11024 struct iwx_add_sta_key_cmd cmd;
11025 uint32_t status;
11026 int err;
11027 int id;
11028
11029 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
11030 return 1;
11031 }
11032
11033 /*
11034 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
11035 * Currently we only implement station mode where 'ni' is always
11036 * ic->ic_bss so there is no need to validate arguments beyond this:
11037 */
11038
11039 memset(&cmd, 0, sizeof(cmd));
11040
11041 if (ieee80211_is_key_global(vap, k)) {
11042 id = ieee80211_crypto_get_key_wepidx(vap, k);
11043 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding group key\n",
11044 __func__);
11045 } else if (ieee80211_is_key_unicast(vap, k)) {
11046 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding key\n",
11047 __func__);
11048 id = 0; /* net80211 currently only supports unicast key 0 */
11049 } else {
11050 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11051 return (ENXIO);
11052 }
11053
11054 IWX_LOCK(sc);
11055
11056 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
11057 IWX_STA_KEY_FLG_WEP_KEY_MAP |
11058 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
11059 IWX_STA_KEY_FLG_KEYID_MSK));
11060 if (ieee80211_is_key_global(vap, k)) {
11061 cmd.common.key_offset = 1;
11062 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
11063 } else if (ieee80211_is_key_unicast(vap, k)) {
11064 cmd.common.key_offset = 0;
11065 } else {
11066 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11067 IWX_UNLOCK(sc);
11068 return (ENXIO);
11069 }
11070 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
11071 k->wk_keylen));
11072 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: key: id=%d, len=%i, key=%*D\n",
11073 __func__, id, k->wk_keylen, k->wk_keylen,
11074 (const unsigned char *) k->wk_key, "");
11075 cmd.common.sta_id = IWX_STATION_ID;
11076
11077 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
11078 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: k->wk_keytsc=%lu\n", __func__,
11079 k->wk_keytsc);
11080
11081 status = IWX_ADD_STA_SUCCESS;
11082 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
11083 &status);
11084 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
11085 err = EIO;
11086 if (err) {
11087 net80211_vap_printf(vap,
11088 "%s: can't set wpa2 keys (error %d)\n", __func__, err);
11089 IWX_UNLOCK(sc);
11090 return err;
11091 } else
11092 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT,
11093 "%s: key added successfully\n", __func__);
11094 IWX_UNLOCK(sc);
11095 return (1);
11096 }
11097
11098 static int
iwx_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)11099 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11100 {
11101 /*
11102 * Note: since there's no key allocations to track - it's either
11103 * the 4 static WEP keys or the single unicast key - there's nothing
11104 * else to do here.
11105 *
11106 * This would need some further work to support IBSS/mesh/AP modes.
11107 */
11108 return (1);
11109 }
11110
11111 static device_method_t iwx_pci_methods[] = {
11112 /* Device interface */
11113 DEVMETHOD(device_probe, iwx_probe),
11114 DEVMETHOD(device_attach, iwx_attach),
11115 DEVMETHOD(device_detach, iwx_detach),
11116 DEVMETHOD(device_suspend, iwx_suspend),
11117 DEVMETHOD(device_resume, iwx_resume),
11118
11119 DEVMETHOD_END
11120 };
11121
11122 static driver_t iwx_pci_driver = {
11123 "iwx",
11124 iwx_pci_methods,
11125 sizeof (struct iwx_softc)
11126 };
11127
11128 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11129 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11130 iwx_devices, nitems(iwx_devices));
11131 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11132 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11133 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11134