1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/bus.h>
159 #include <machine/endian.h>
160 #include <machine/resource.h>
161
162 #include <dev/pci/pcireg.h>
163 #include <dev/pci/pcivar.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_dl.h>
170 #include <net/if_media.h>
171
172 #include <netinet/in.h>
173 #include <netinet/if_ether.h>
174
175 #include <net80211/ieee80211_var.h>
176 #include <net80211/ieee80211_radiotap.h>
177 #include <net80211/ieee80211_regdomain.h>
178 #include <net80211/ieee80211_ratectl.h>
179 #include <net80211/ieee80211_vht.h>
180
181 int iwx_himark = 224;
182 int iwx_lomark = 192;
183
184 #define IWX_FBSD_RSP_V3 3
185 #define IWX_FBSD_RSP_V4 4
186
187 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
188 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
189
190 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
191 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
192
193 #include <dev/iwx/if_iwxreg.h>
194 #include <dev/iwx/if_iwxvar.h>
195
196 #include <dev/iwx/if_iwx_debug.h>
197
198 #define PCI_CFG_RETRY_TIMEOUT 0x41
199
200 #define PCI_VENDOR_INTEL 0x8086
201 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
202 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
203 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
210 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
211 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
215 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
216 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
218
219 static const struct iwx_devices {
220 uint16_t device;
221 char *name;
222 } iwx_devices[] = {
223 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
224 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
225 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
232 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
233 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
237 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
238 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
240 };
241
242 static const uint8_t iwx_nvm_channels_8000[] = {
243 /* 2.4 GHz */
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 /* 5 GHz */
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250
251 static const uint8_t iwx_nvm_channels_uhb[] = {
252 /* 2.4 GHz */
253 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
254 /* 5 GHz */
255 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
256 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
257 149, 153, 157, 161, 165, 169, 173, 177, 181,
258 /* 6-7 GHz */
259 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
260 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
261 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
262 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
263 };
264
265 #define IWX_NUM_2GHZ_CHANNELS 14
266 #define IWX_NUM_5GHZ_CHANNELS 37
267
268 const struct iwx_rate {
269 uint16_t rate;
270 uint8_t plcp;
271 uint8_t ht_plcp;
272 } iwx_rates[] = {
273 /* Legacy */ /* HT */
274 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
275 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
279 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
280 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
281 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
282 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
283 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
284 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
285 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
286 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
287 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
288 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
289 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
290 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
291 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
292 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
293 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
294 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
295 };
296 #define IWX_RIDX_CCK 0
297 #define IWX_RIDX_OFDM 4
298 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
299 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
300 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
301 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
302
303 /* Convert an MCS index into an iwx_rates[] index. */
304 const int iwx_mcs2ridx[] = {
305 IWX_RATE_MCS_0_INDEX,
306 IWX_RATE_MCS_1_INDEX,
307 IWX_RATE_MCS_2_INDEX,
308 IWX_RATE_MCS_3_INDEX,
309 IWX_RATE_MCS_4_INDEX,
310 IWX_RATE_MCS_5_INDEX,
311 IWX_RATE_MCS_6_INDEX,
312 IWX_RATE_MCS_7_INDEX,
313 IWX_RATE_MCS_8_INDEX,
314 IWX_RATE_MCS_9_INDEX,
315 IWX_RATE_MCS_10_INDEX,
316 IWX_RATE_MCS_11_INDEX,
317 IWX_RATE_MCS_12_INDEX,
318 IWX_RATE_MCS_13_INDEX,
319 IWX_RATE_MCS_14_INDEX,
320 IWX_RATE_MCS_15_INDEX,
321 };
322
323 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
324 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
326 #if 0
327 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
328 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
329 #endif
330 static int iwx_apply_debug_destination(struct iwx_softc *);
331 static void iwx_set_ltr(struct iwx_softc *);
332 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
333 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
334 const struct iwx_fw_sects *);
335 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
336 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
337 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
338 struct iwx_context_info_dram *);
339 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
340 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
341 const uint8_t *, size_t);
342 static int iwx_set_default_calib(struct iwx_softc *, const void *);
343 static void iwx_fw_info_free(struct iwx_fw_info *);
344 static int iwx_read_firmware(struct iwx_softc *);
345 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
346 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
347 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
348 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
349 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
350 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
351 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
352 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
353 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
354 static int iwx_nic_lock(struct iwx_softc *);
355 static void iwx_nic_assert_locked(struct iwx_softc *);
356 static void iwx_nic_unlock(struct iwx_softc *);
357 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
358 uint32_t);
359 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
360 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
362 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
363 bus_size_t, bus_size_t);
364 static void iwx_dma_contig_free(struct iwx_dma_info *);
365 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
366 static void iwx_disable_rx_dma(struct iwx_softc *);
367 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
368 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
370 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
371 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_enable_rfkill_int(struct iwx_softc *);
373 static int iwx_check_rfkill(struct iwx_softc *);
374 static void iwx_enable_interrupts(struct iwx_softc *);
375 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
376 #if 0
377 static void iwx_restore_interrupts(struct iwx_softc *);
378 #endif
379 static void iwx_disable_interrupts(struct iwx_softc *);
380 static void iwx_ict_reset(struct iwx_softc *);
381 static int iwx_set_hw_ready(struct iwx_softc *);
382 static int iwx_prepare_card_hw(struct iwx_softc *);
383 static int iwx_force_power_gating(struct iwx_softc *);
384 static void iwx_apm_config(struct iwx_softc *);
385 static int iwx_apm_init(struct iwx_softc *);
386 static void iwx_apm_stop(struct iwx_softc *);
387 static int iwx_allow_mcast(struct iwx_softc *);
388 static void iwx_init_msix_hw(struct iwx_softc *);
389 static void iwx_conf_msix_hw(struct iwx_softc *, int);
390 static int iwx_clear_persistence_bit(struct iwx_softc *);
391 static int iwx_start_hw(struct iwx_softc *);
392 static void iwx_stop_device(struct iwx_softc *);
393 static void iwx_nic_config(struct iwx_softc *);
394 static int iwx_nic_rx_init(struct iwx_softc *);
395 static int iwx_nic_init(struct iwx_softc *);
396 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
397 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
398 static void iwx_post_alive(struct iwx_softc *);
399 static int iwx_schedule_session_protection(struct iwx_softc *,
400 struct iwx_node *, uint32_t);
401 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
402 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
403 struct ieee80211_channel[]);
404 static int iwx_mimo_enabled(struct iwx_softc *);
405 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
406 uint16_t);
407 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
408 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
409 uint16_t, uint16_t, int, int);
410 static void iwx_sta_tx_agg_start(struct iwx_softc *,
411 struct ieee80211_node *, uint8_t);
412 static void iwx_ba_rx_task(void *, int);
413 static void iwx_ba_tx_task(void *, int);
414 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
415 static int iwx_is_valid_mac_addr(const uint8_t *);
416 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
417 static int iwx_nvm_get(struct iwx_softc *);
418 static int iwx_load_firmware(struct iwx_softc *);
419 static int iwx_start_fw(struct iwx_softc *);
420 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
421 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
422 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
423 static int iwx_load_pnvm(struct iwx_softc *);
424 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
425 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
426 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
427 static int iwx_send_dqa_cmd(struct iwx_softc *);
428 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
429 static int iwx_config_ltr(struct iwx_softc *);
430 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
431 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
432 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
433 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
434 struct iwx_rx_data *);
435 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
436 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
437 #if 0
438 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
439 struct ieee80211_node *, struct ieee80211_rxinfo *);
440 #endif
441 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
442 int, int, uint32_t, uint8_t);
443 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
444 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
445 struct iwx_tx_data *);
446 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
447 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
448 struct iwx_rx_data *);
449 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
450 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
451 struct iwx_rx_data *);
452 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
453 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
454 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
455 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
456 #if 0
457 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
459 #endif
460 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
461 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
462 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
463 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
464 const void *);
465 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
466 uint32_t *);
467 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
468 const void *, uint32_t *);
469 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
470 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
471 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
472 static uint32_t iwx_fw_rateidx_cck(uint8_t);
473 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
474 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
475 struct mbuf *);
476 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
477 uint16_t, uint16_t);
478 static int iwx_tx(struct iwx_softc *, struct mbuf *,
479 struct ieee80211_node *);
480 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
481 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
482 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
483 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
484 struct iwx_beacon_filter_cmd *);
485 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
486 int);
487 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
488 struct iwx_mac_power_cmd *);
489 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
490 static int iwx_power_update_device(struct iwx_softc *);
491 #if 0
492 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
493 #endif
494 static int iwx_disable_beacon_filter(struct iwx_softc *);
495 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
496 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
497 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
498 static int iwx_fill_probe_req(struct iwx_softc *,
499 struct iwx_scan_probe_req *);
500 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
501 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
502 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
503 struct iwx_scan_general_params_v10 *, int);
504 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
505 struct iwx_scan_general_params_v10 *, uint16_t, int);
506 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
507 struct iwx_scan_channel_params_v6 *, uint32_t, int);
508 static int iwx_umac_scan_v14(struct iwx_softc *, int);
509 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
510 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
511 static int iwx_rval2ridx(int);
512 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
513 int *);
514 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
515 struct iwx_mac_ctx_cmd *, uint32_t);
516 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
517 struct iwx_mac_data_sta *, int);
518 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
519 uint32_t, int);
520 static int iwx_clear_statistics(struct iwx_softc *);
521 static int iwx_scan(struct iwx_softc *);
522 static int iwx_bgscan(struct ieee80211com *);
523 static int iwx_enable_mgmt_queue(struct iwx_softc *);
524 static int iwx_disable_mgmt_queue(struct iwx_softc *);
525 static int iwx_rs_rval2idx(uint8_t);
526 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
527 int);
528 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
529 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
530 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
532 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
533 uint8_t, uint8_t);
534 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
535 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
536 uint8_t);
537 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
538 static int iwx_deauth(struct iwx_softc *);
539 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
540 static int iwx_run_stop(struct iwx_softc *);
541 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
542 const uint8_t[IEEE80211_ADDR_LEN]);
543 #if 0
544 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
545 struct ieee80211_key *);
546 void iwx_setkey_task(void *);
547 void iwx_delete_key(struct ieee80211com *,
548 struct ieee80211_node *, struct ieee80211_key *);
549 #endif
550 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
551 static void iwx_endscan(struct iwx_softc *);
552 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
553 struct ieee80211_node *);
554 static int iwx_sf_config(struct iwx_softc *, int);
555 static int iwx_send_bt_init_conf(struct iwx_softc *);
556 static int iwx_send_soc_conf(struct iwx_softc *);
557 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
558 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
559 static int iwx_init_hw(struct iwx_softc *);
560 static int iwx_init(struct iwx_softc *);
561 static void iwx_stop(struct iwx_softc *);
562 static void iwx_watchdog(void *);
563 static const char *iwx_desc_lookup(uint32_t);
564 static void iwx_nic_error(struct iwx_softc *);
565 static void iwx_dump_driver_status(struct iwx_softc *);
566 static void iwx_nic_umac_error(struct iwx_softc *);
567 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
568 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
569 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
570 struct mbuf *);
571 static void iwx_notif_intr(struct iwx_softc *);
572 #if 0
573 /* XXX-THJ - I don't have hardware for this */
574 static int iwx_intr(void *);
575 #endif
576 static void iwx_intr_msix(void *);
577 static int iwx_preinit(struct iwx_softc *);
578 static void iwx_attach_hook(void *);
579 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
580 static int iwx_probe(device_t);
581 static int iwx_attach(device_t);
582 static int iwx_detach(device_t);
583
584 /* FreeBSD specific glue */
585 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
586 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
587
588 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
589 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
590
591 #if IWX_DEBUG
592 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
593 #else
594 #define DPRINTF(x) do { ; } while (0)
595 #endif
596
597 /* FreeBSD specific functions */
598 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
599 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
600 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
601 static void iwx_vap_delete(struct ieee80211vap *);
602 static void iwx_parent(struct ieee80211com *);
603 static void iwx_scan_start(struct ieee80211com *);
604 static void iwx_scan_end(struct ieee80211com *);
605 static void iwx_update_mcast(struct ieee80211com *ic);
606 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
607 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
608 static void iwx_set_channel(struct ieee80211com *);
609 static void iwx_endscan_cb(void *, int );
610 static int iwx_wme_update(struct ieee80211com *);
611 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
612 const struct ieee80211_bpf_params *);
613 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
614 static void iwx_start(struct iwx_softc *);
615 static int iwx_ampdu_rx_start(struct ieee80211_node *,
616 struct ieee80211_rx_ampdu *, int, int, int);
617 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
618 struct ieee80211_rx_ampdu *);
619 static int iwx_addba_request(struct ieee80211_node *,
620 struct ieee80211_tx_ampdu *, int, int, int);
621 static int iwx_addba_response(struct ieee80211_node *,
622 struct ieee80211_tx_ampdu *, int, int, int);
623 static void iwx_key_update_begin(struct ieee80211vap *);
624 static void iwx_key_update_end(struct ieee80211vap *);
625 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
626 ieee80211_keyix *,ieee80211_keyix *);
627 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
628 static int iwx_key_delete(struct ieee80211vap *,
629 const struct ieee80211_key *);
630 static int iwx_suspend(device_t);
631 static int iwx_resume(device_t);
632 static void iwx_radiotap_attach(struct iwx_softc *);
633
634 /* OpenBSD compat defines */
635 #define IEEE80211_HTOP0_SCO_SCN 0
636 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
638
639 #define IEEE80211_HT_RATESET_SISO 0
640 #define IEEE80211_HT_RATESET_MIMO2 2
641
642 const struct ieee80211_rateset ieee80211_std_rateset_11a =
643 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
644
645 const struct ieee80211_rateset ieee80211_std_rateset_11b =
646 { 4, { 2, 4, 11, 22 } };
647
648 const struct ieee80211_rateset ieee80211_std_rateset_11g =
649 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
650
651 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)652 ieee80211_has_addr4(const struct ieee80211_frame *wh)
653 {
654 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
655 IEEE80211_FC1_DIR_DSTODS;
656 }
657
658 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)659 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
660 {
661 const struct iwx_fw_cmd_version *entry;
662 int i;
663
664 for (i = 0; i < sc->n_cmd_versions; i++) {
665 entry = &sc->cmd_versions[i];
666 if (entry->group == grp && entry->cmd == cmd)
667 return entry->cmd_ver;
668 }
669
670 return IWX_FW_CMD_VER_UNKNOWN;
671 }
672
673 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)674 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
675 {
676 const struct iwx_fw_cmd_version *entry;
677 int i;
678
679 for (i = 0; i < sc->n_cmd_versions; i++) {
680 entry = &sc->cmd_versions[i];
681 if (entry->group == grp && entry->cmd == cmd)
682 return entry->notif_ver;
683 }
684
685 return IWX_FW_CMD_VER_UNKNOWN;
686 }
687
688 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)689 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
690 {
691 const struct iwx_fw_cscheme_list *l = (const void *)data;
692
693 if (dlen < sizeof(*l) ||
694 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
695 return EINVAL;
696
697 /* we don't actually store anything for now, always use s/w crypto */
698
699 return 0;
700 }
701
702 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)703 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
704 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
705 {
706 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
707 if (err) {
708 printf("%s: could not allocate context info DMA memory\n",
709 DEVNAME(sc));
710 return err;
711 }
712
713 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
714
715 return 0;
716 }
717
718 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)719 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
720 {
721 struct iwx_self_init_dram *dram = &sc->init_dram;
722 int i;
723
724 if (!dram->paging)
725 return;
726
727 /* free paging*/
728 for (i = 0; i < dram->paging_cnt; i++)
729 iwx_dma_contig_free(&dram->paging[i]);
730
731 free(dram->paging, M_DEVBUF);
732 dram->paging_cnt = 0;
733 dram->paging = NULL;
734 }
735
736 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)737 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
738 {
739 int i = 0;
740
741 while (start < fws->fw_count &&
742 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
743 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
744 start++;
745 i++;
746 }
747
748 return i;
749 }
750
751 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)752 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
753 struct iwx_context_info_dram *ctxt_dram)
754 {
755 struct iwx_self_init_dram *dram = &sc->init_dram;
756 int i, ret, fw_cnt = 0;
757
758 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
759
760 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
761 /* add 1 due to separator */
762 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
763 /* add 2 due to separators */
764 dram->paging_cnt = iwx_get_num_sections(fws,
765 dram->lmac_cnt + dram->umac_cnt + 2);
766
767 IWX_UNLOCK(sc);
768 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
769 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
770 if (!dram->fw) {
771 printf("%s: could not allocate memory for firmware sections\n",
772 DEVNAME(sc));
773 IWX_LOCK(sc);
774 return ENOMEM;
775 }
776
777 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
778 M_DEVBUF, M_ZERO | M_WAITOK);
779 IWX_LOCK(sc);
780 if (!dram->paging) {
781 printf("%s: could not allocate memory for firmware paging\n",
782 DEVNAME(sc));
783 return ENOMEM;
784 }
785
786 /* initialize lmac sections */
787 for (i = 0; i < dram->lmac_cnt; i++) {
788 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
789 &dram->fw[fw_cnt]);
790 if (ret)
791 return ret;
792 ctxt_dram->lmac_img[i] =
793 htole64(dram->fw[fw_cnt].paddr);
794 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
795 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
796 __func__, i,
797 (unsigned long long)dram->fw[fw_cnt].paddr,
798 (unsigned long long)dram->fw[fw_cnt].size);
799 fw_cnt++;
800 }
801
802 /* initialize umac sections */
803 for (i = 0; i < dram->umac_cnt; i++) {
804 /* access FW with +1 to make up for lmac separator */
805 ret = iwx_ctxt_info_alloc_dma(sc,
806 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
807 if (ret)
808 return ret;
809 ctxt_dram->umac_img[i] =
810 htole64(dram->fw[fw_cnt].paddr);
811 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
812 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
813 __func__, i,
814 (unsigned long long)dram->fw[fw_cnt].paddr,
815 (unsigned long long)dram->fw[fw_cnt].size);
816 fw_cnt++;
817 }
818
819 /*
820 * Initialize paging.
821 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
822 * stored separately.
823 * This is since the timing of its release is different -
824 * while fw memory can be released on alive, the paging memory can be
825 * freed only when the device goes down.
826 * Given that, the logic here in accessing the fw image is a bit
827 * different - fw_cnt isn't changing so loop counter is added to it.
828 */
829 for (i = 0; i < dram->paging_cnt; i++) {
830 /* access FW with +2 to make up for lmac & umac separators */
831 int fw_idx = fw_cnt + i + 2;
832
833 ret = iwx_ctxt_info_alloc_dma(sc,
834 &fws->fw_sect[fw_idx], &dram->paging[i]);
835 if (ret)
836 return ret;
837
838 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
839 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
840 "%s: firmware paging section %d at 0x%llx size %lld\n",
841 __func__, i,
842 (unsigned long long)dram->paging[i].paddr,
843 (unsigned long long)dram->paging[i].size);
844 }
845
846 return 0;
847 }
848
849 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)850 iwx_fw_version_str(char *buf, size_t bufsize,
851 uint32_t major, uint32_t minor, uint32_t api)
852 {
853 /*
854 * Starting with major version 35 the Linux driver prints the minor
855 * version in hexadecimal.
856 */
857 if (major >= 35)
858 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
859 else
860 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
861 }
862 #if 0
863 static int
864 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
865 uint8_t min_power)
866 {
867 struct iwx_dma_info *fw_mon = &sc->fw_mon;
868 uint32_t size = 0;
869 uint8_t power;
870 int err;
871
872 if (fw_mon->size)
873 return 0;
874
875 for (power = max_power; power >= min_power; power--) {
876 size = (1 << power);
877
878 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
879 if (err)
880 continue;
881
882 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
883 "%s: allocated 0x%08x bytes for firmware monitor.\n",
884 DEVNAME(sc), size);
885 break;
886 }
887
888 if (err) {
889 fw_mon->size = 0;
890 return err;
891 }
892
893 if (power != max_power)
894 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
895 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
896 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
897 (unsigned long)(1 << (max_power - 10)));
898
899 return 0;
900 }
901
902 static int
903 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
904 {
905 if (!max_power) {
906 /* default max_power is maximum */
907 max_power = 26;
908 } else {
909 max_power += 11;
910 }
911
912 if (max_power > 26) {
913 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
914 "%s: External buffer size for monitor is too big %d, "
915 "check the FW TLV\n", DEVNAME(sc), max_power);
916 return 0;
917 }
918
919 if (sc->fw_mon.size)
920 return 0;
921
922 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
923 }
924 #endif
925
926 static int
iwx_apply_debug_destination(struct iwx_softc * sc)927 iwx_apply_debug_destination(struct iwx_softc *sc)
928 {
929 #if 0
930 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
931 int i, err;
932 uint8_t mon_mode, size_power, base_shift, end_shift;
933 uint32_t base_reg, end_reg;
934
935 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
936 mon_mode = dest_v1->monitor_mode;
937 size_power = dest_v1->size_power;
938 base_reg = le32toh(dest_v1->base_reg);
939 end_reg = le32toh(dest_v1->end_reg);
940 base_shift = dest_v1->base_shift;
941 end_shift = dest_v1->end_shift;
942
943 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
944
945 if (mon_mode == EXTERNAL_MODE) {
946 err = iwx_alloc_fw_monitor(sc, size_power);
947 if (err)
948 return err;
949 }
950
951 if (!iwx_nic_lock(sc))
952 return EBUSY;
953
954 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
955 uint32_t addr, val;
956 uint8_t op;
957
958 addr = le32toh(dest_v1->reg_ops[i].addr);
959 val = le32toh(dest_v1->reg_ops[i].val);
960 op = dest_v1->reg_ops[i].op;
961
962 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
963 switch (op) {
964 case CSR_ASSIGN:
965 IWX_WRITE(sc, addr, val);
966 break;
967 case CSR_SETBIT:
968 IWX_SETBITS(sc, addr, (1 << val));
969 break;
970 case CSR_CLEARBIT:
971 IWX_CLRBITS(sc, addr, (1 << val));
972 break;
973 case PRPH_ASSIGN:
974 iwx_write_prph(sc, addr, val);
975 break;
976 case PRPH_SETBIT:
977 err = iwx_set_bits_prph(sc, addr, (1 << val));
978 if (err)
979 return err;
980 break;
981 case PRPH_CLEARBIT:
982 err = iwx_clear_bits_prph(sc, addr, (1 << val));
983 if (err)
984 return err;
985 break;
986 case PRPH_BLOCKBIT:
987 if (iwx_read_prph(sc, addr) & (1 << val))
988 goto monitor;
989 break;
990 default:
991 DPRINTF(("%s: FW debug - unknown OP %d\n",
992 DEVNAME(sc), op));
993 break;
994 }
995 }
996
997 monitor:
998 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
999 iwx_write_prph(sc, le32toh(base_reg),
1000 sc->fw_mon.paddr >> base_shift);
1001 iwx_write_prph(sc, end_reg,
1002 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1003 >> end_shift);
1004 }
1005
1006 iwx_nic_unlock(sc);
1007 return 0;
1008 #else
1009 return 0;
1010 #endif
1011 }
1012
1013 static void
iwx_set_ltr(struct iwx_softc * sc)1014 iwx_set_ltr(struct iwx_softc *sc)
1015 {
1016 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1017 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1018 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1020 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1021 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1022 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1023 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1024 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1026 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1027
1028 /*
1029 * To workaround hardware latency issues during the boot process,
1030 * initialize the LTR to ~250 usec (see ltr_val above).
1031 * The firmware initializes this again later (to a smaller value).
1032 */
1033 if (!sc->sc_integrated) {
1034 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1035 } else if (sc->sc_integrated &&
1036 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1037 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1038 IWX_HPM_MAC_LRT_ENABLE_ALL);
1039 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1040 }
1041 }
1042
1043 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1044 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1045 {
1046 struct iwx_context_info *ctxt_info;
1047 struct iwx_context_info_rbd_cfg *rx_cfg;
1048 uint32_t control_flags = 0;
1049 uint64_t paddr;
1050 int err;
1051
1052 ctxt_info = sc->ctxt_info_dma.vaddr;
1053 memset(ctxt_info, 0, sizeof(*ctxt_info));
1054
1055 ctxt_info->version.version = 0;
1056 ctxt_info->version.mac_id =
1057 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1058 /* size is in DWs */
1059 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1060
1061 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1062 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1063
1064 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1065 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1066 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1067 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1068 ctxt_info->control.control_flags = htole32(control_flags);
1069
1070 /* initialize RX default queue */
1071 rx_cfg = &ctxt_info->rbd_cfg;
1072 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1073 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1074 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1075
1076 /* initialize TX command queue */
1077 ctxt_info->hcmd_cfg.cmd_queue_addr =
1078 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1079 ctxt_info->hcmd_cfg.cmd_queue_size =
1080 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1081
1082 /* allocate ucode sections in dram and set addresses */
1083 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1084 if (err) {
1085 iwx_ctxt_info_free_fw_img(sc);
1086 return err;
1087 }
1088
1089 /* Configure debug, if exists */
1090 if (sc->sc_fw.dbg_dest_tlv_v1) {
1091 #if 1
1092 err = iwx_apply_debug_destination(sc);
1093 if (err) {
1094 iwx_ctxt_info_free_fw_img(sc);
1095 return err;
1096 }
1097 #endif
1098 }
1099
1100 /*
1101 * Write the context info DMA base address. The device expects a
1102 * 64-bit address but a simple bus_space_write_8 to this register
1103 * won't work on some devices, such as the AX201.
1104 */
1105 paddr = sc->ctxt_info_dma.paddr;
1106 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1108
1109 /* kick FW self load */
1110 if (!iwx_nic_lock(sc)) {
1111 iwx_ctxt_info_free_fw_img(sc);
1112 return EBUSY;
1113 }
1114
1115 iwx_set_ltr(sc);
1116 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1117 iwx_nic_unlock(sc);
1118
1119 /* Context info will be released upon alive or failure to get one */
1120
1121 return 0;
1122 }
1123
1124 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1125 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1126 {
1127 struct iwx_context_info_gen3 *ctxt_info_gen3;
1128 struct iwx_prph_scratch *prph_scratch;
1129 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1130 uint16_t cb_size;
1131 uint32_t control_flags, scratch_size;
1132 uint64_t paddr;
1133 int err;
1134
1135 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1136 printf("%s: no image loader found in firmware file\n",
1137 DEVNAME(sc));
1138 iwx_ctxt_info_free_fw_img(sc);
1139 return EINVAL;
1140 }
1141
1142 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1143 sc->sc_fw.iml_len, 1);
1144 if (err) {
1145 printf("%s: could not allocate DMA memory for "
1146 "firmware image loader\n", DEVNAME(sc));
1147 iwx_ctxt_info_free_fw_img(sc);
1148 return ENOMEM;
1149 }
1150
1151 prph_scratch = sc->prph_scratch_dma.vaddr;
1152 memset(prph_scratch, 0, sizeof(*prph_scratch));
1153 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1154 prph_sc_ctrl->version.version = 0;
1155 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1156 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1157
1158 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1159 IWX_PRPH_SCRATCH_MTR_MODE |
1160 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1161 if (sc->sc_imr_enabled)
1162 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1163 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1164
1165 /* initialize RX default queue */
1166 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1167 htole64(sc->rxq.free_desc_dma.paddr);
1168
1169 /* allocate ucode sections in dram and set addresses */
1170 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1171 if (err) {
1172 iwx_dma_contig_free(&sc->iml_dma);
1173 iwx_ctxt_info_free_fw_img(sc);
1174 return err;
1175 }
1176
1177 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1178 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1179 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1180 ctxt_info_gen3->prph_scratch_base_addr =
1181 htole64(sc->prph_scratch_dma.paddr);
1182 scratch_size = sizeof(*prph_scratch);
1183 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1184 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1185 htole64(sc->rxq.stat_dma.paddr);
1186 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1187 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1188 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1189 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1190 ctxt_info_gen3->mtr_base_addr =
1191 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1192 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1193 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1194 ctxt_info_gen3->mtr_size = htole16(cb_size);
1195 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1196 ctxt_info_gen3->mcr_size = htole16(cb_size);
1197
1198 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1199
1200 paddr = sc->ctxt_info_dma.paddr;
1201 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1203
1204 paddr = sc->iml_dma.paddr;
1205 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1207 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1208
1209 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1210 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1211
1212 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1213 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1214
1215 /* kick FW self load */
1216 if (!iwx_nic_lock(sc)) {
1217 iwx_dma_contig_free(&sc->iml_dma);
1218 iwx_ctxt_info_free_fw_img(sc);
1219 return EBUSY;
1220 }
1221 iwx_set_ltr(sc);
1222 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1223 iwx_nic_unlock(sc);
1224
1225 /* Context info will be released upon alive or failure to get one */
1226 return 0;
1227 }
1228
1229 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1230 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1231 {
1232 struct iwx_self_init_dram *dram = &sc->init_dram;
1233 int i;
1234
1235 if (!dram->fw)
1236 return;
1237
1238 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1239 iwx_dma_contig_free(&dram->fw[i]);
1240
1241 free(dram->fw, M_DEVBUF);
1242 dram->lmac_cnt = 0;
1243 dram->umac_cnt = 0;
1244 dram->fw = NULL;
1245 }
1246
1247 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1248 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1249 const uint8_t *data, size_t dlen)
1250 {
1251 struct iwx_fw_sects *fws;
1252 struct iwx_fw_onesect *fwone;
1253
1254 if (type >= IWX_UCODE_TYPE_MAX)
1255 return EINVAL;
1256 if (dlen < sizeof(uint32_t))
1257 return EINVAL;
1258
1259 fws = &sc->sc_fw.fw_sects[type];
1260 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1261 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1262 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1263 return EINVAL;
1264
1265 fwone = &fws->fw_sect[fws->fw_count];
1266
1267 /* first 32bit are device load offset */
1268 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1269
1270 /* rest is data */
1271 fwone->fws_data = data + sizeof(uint32_t);
1272 fwone->fws_len = dlen - sizeof(uint32_t);
1273
1274 fws->fw_count++;
1275 fws->fw_totlen += fwone->fws_len;
1276
1277 return 0;
1278 }
1279
1280 #define IWX_DEFAULT_SCAN_CHANNELS 40
1281 /* Newer firmware might support more channels. Raise this value if needed. */
1282 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1283
1284 struct iwx_tlv_calib_data {
1285 uint32_t ucode_type;
1286 struct iwx_tlv_calib_ctrl calib;
1287 } __packed;
1288
1289 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1290 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1291 {
1292 const struct iwx_tlv_calib_data *def_calib = data;
1293 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1294
1295 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1296 return EINVAL;
1297
1298 sc->sc_default_calib[ucode_type].flow_trigger =
1299 def_calib->calib.flow_trigger;
1300 sc->sc_default_calib[ucode_type].event_trigger =
1301 def_calib->calib.event_trigger;
1302
1303 return 0;
1304 }
1305
1306 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1307 iwx_fw_info_free(struct iwx_fw_info *fw)
1308 {
1309 free(fw->fw_rawdata, M_DEVBUF);
1310 fw->fw_rawdata = NULL;
1311 fw->fw_rawsize = 0;
1312 /* don't touch fw->fw_status */
1313 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1314 free(fw->iml, M_DEVBUF);
1315 fw->iml = NULL;
1316 fw->iml_len = 0;
1317 }
1318
1319 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1320
1321 static int
iwx_read_firmware(struct iwx_softc * sc)1322 iwx_read_firmware(struct iwx_softc *sc)
1323 {
1324 struct iwx_fw_info *fw = &sc->sc_fw;
1325 const struct iwx_tlv_ucode_header *uhdr;
1326 struct iwx_ucode_tlv tlv;
1327 uint32_t tlv_type;
1328 const uint8_t *data;
1329 int err = 0;
1330 size_t len;
1331 const struct firmware *fwp;
1332
1333 if (fw->fw_status == IWX_FW_STATUS_DONE)
1334 return 0;
1335
1336 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1337 fwp = firmware_get(sc->sc_fwname);
1338 sc->sc_fwp = fwp;
1339
1340 if (fwp == NULL) {
1341 printf("%s: could not read firmware %s\n",
1342 DEVNAME(sc), sc->sc_fwname);
1343 err = ENOENT;
1344 goto out;
1345 }
1346
1347 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1348 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1349
1350
1351 sc->sc_capaflags = 0;
1352 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1353 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1354 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1355 sc->n_cmd_versions = 0;
1356
1357 uhdr = (const void *)(fwp->data);
1358 if (*(const uint32_t *)fwp->data != 0
1359 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1360 printf("%s: invalid firmware %s\n",
1361 DEVNAME(sc), sc->sc_fwname);
1362 err = EINVAL;
1363 goto out;
1364 }
1365
1366 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1367 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1368 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_API(le32toh(uhdr->ver)));
1370
1371 data = uhdr->data;
1372 len = fwp->datasize - sizeof(*uhdr);
1373
1374 while (len >= sizeof(tlv)) {
1375 size_t tlv_len;
1376 const void *tlv_data;
1377
1378 memcpy(&tlv, data, sizeof(tlv));
1379 tlv_len = le32toh(tlv.length);
1380 tlv_type = le32toh(tlv.type);
1381
1382 len -= sizeof(tlv);
1383 data += sizeof(tlv);
1384 tlv_data = data;
1385
1386 if (len < tlv_len) {
1387 printf("%s: firmware too short: %zu bytes\n",
1388 DEVNAME(sc), len);
1389 err = EINVAL;
1390 goto parse_out;
1391 }
1392
1393 switch (tlv_type) {
1394 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1395 if (tlv_len < sizeof(uint32_t)) {
1396 err = EINVAL;
1397 goto parse_out;
1398 }
1399 sc->sc_capa_max_probe_len
1400 = le32toh(*(const uint32_t *)tlv_data);
1401 if (sc->sc_capa_max_probe_len >
1402 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1403 err = EINVAL;
1404 goto parse_out;
1405 }
1406 break;
1407 case IWX_UCODE_TLV_PAN:
1408 if (tlv_len) {
1409 err = EINVAL;
1410 goto parse_out;
1411 }
1412 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1413 break;
1414 case IWX_UCODE_TLV_FLAGS:
1415 if (tlv_len < sizeof(uint32_t)) {
1416 err = EINVAL;
1417 goto parse_out;
1418 }
1419 /*
1420 * Apparently there can be many flags, but Linux driver
1421 * parses only the first one, and so do we.
1422 *
1423 * XXX: why does this override IWX_UCODE_TLV_PAN?
1424 * Intentional or a bug? Observations from
1425 * current firmware file:
1426 * 1) TLV_PAN is parsed first
1427 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1428 * ==> this resets TLV_PAN to itself... hnnnk
1429 */
1430 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1431 break;
1432 case IWX_UCODE_TLV_CSCHEME:
1433 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1434 if (err)
1435 goto parse_out;
1436 break;
1437 case IWX_UCODE_TLV_NUM_OF_CPU: {
1438 uint32_t num_cpu;
1439 if (tlv_len != sizeof(uint32_t)) {
1440 err = EINVAL;
1441 goto parse_out;
1442 }
1443 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1444 if (num_cpu < 1 || num_cpu > 2) {
1445 err = EINVAL;
1446 goto parse_out;
1447 }
1448 break;
1449 }
1450 case IWX_UCODE_TLV_SEC_RT:
1451 err = iwx_firmware_store_section(sc,
1452 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1453 if (err)
1454 goto parse_out;
1455 break;
1456 case IWX_UCODE_TLV_SEC_INIT:
1457 err = iwx_firmware_store_section(sc,
1458 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1459 if (err)
1460 goto parse_out;
1461 break;
1462 case IWX_UCODE_TLV_SEC_WOWLAN:
1463 err = iwx_firmware_store_section(sc,
1464 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1465 if (err)
1466 goto parse_out;
1467 break;
1468 case IWX_UCODE_TLV_DEF_CALIB:
1469 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1470 err = EINVAL;
1471 goto parse_out;
1472 }
1473 err = iwx_set_default_calib(sc, tlv_data);
1474 if (err)
1475 goto parse_out;
1476 break;
1477 case IWX_UCODE_TLV_PHY_SKU:
1478 if (tlv_len != sizeof(uint32_t)) {
1479 err = EINVAL;
1480 goto parse_out;
1481 }
1482 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1483 break;
1484
1485 case IWX_UCODE_TLV_API_CHANGES_SET: {
1486 const struct iwx_ucode_api *api;
1487 int idx, i;
1488 if (tlv_len != sizeof(*api)) {
1489 err = EINVAL;
1490 goto parse_out;
1491 }
1492 api = (const struct iwx_ucode_api *)tlv_data;
1493 idx = le32toh(api->api_index);
1494 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1495 err = EINVAL;
1496 goto parse_out;
1497 }
1498 for (i = 0; i < 32; i++) {
1499 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1500 continue;
1501 setbit(sc->sc_ucode_api, i + (32 * idx));
1502 }
1503 break;
1504 }
1505
1506 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1507 const struct iwx_ucode_capa *capa;
1508 int idx, i;
1509 if (tlv_len != sizeof(*capa)) {
1510 err = EINVAL;
1511 goto parse_out;
1512 }
1513 capa = (const struct iwx_ucode_capa *)tlv_data;
1514 idx = le32toh(capa->api_index);
1515 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1516 goto parse_out;
1517 }
1518 for (i = 0; i < 32; i++) {
1519 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1520 continue;
1521 setbit(sc->sc_enabled_capa, i + (32 * idx));
1522 }
1523 break;
1524 }
1525
1526 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1527 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1528 /* ignore, not used by current driver */
1529 break;
1530
1531 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1532 err = iwx_firmware_store_section(sc,
1533 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1534 tlv_len);
1535 if (err)
1536 goto parse_out;
1537 break;
1538
1539 case IWX_UCODE_TLV_PAGING:
1540 if (tlv_len != sizeof(uint32_t)) {
1541 err = EINVAL;
1542 goto parse_out;
1543 }
1544 break;
1545
1546 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1547 if (tlv_len != sizeof(uint32_t)) {
1548 err = EINVAL;
1549 goto parse_out;
1550 }
1551 sc->sc_capa_n_scan_channels =
1552 le32toh(*(const uint32_t *)tlv_data);
1553 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1554 err = ERANGE;
1555 goto parse_out;
1556 }
1557 break;
1558
1559 case IWX_UCODE_TLV_FW_VERSION:
1560 if (tlv_len != sizeof(uint32_t) * 3) {
1561 err = EINVAL;
1562 goto parse_out;
1563 }
1564
1565 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1566 le32toh(((const uint32_t *)tlv_data)[0]),
1567 le32toh(((const uint32_t *)tlv_data)[1]),
1568 le32toh(((const uint32_t *)tlv_data)[2]));
1569 break;
1570
1571 case IWX_UCODE_TLV_FW_DBG_DEST: {
1572 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1573
1574 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1575 if (*fw->dbg_dest_ver != 0) {
1576 err = EINVAL;
1577 goto parse_out;
1578 }
1579
1580 if (fw->dbg_dest_tlv_init)
1581 break;
1582 fw->dbg_dest_tlv_init = true;
1583
1584 dest_v1 = (const void *)tlv_data;
1585 fw->dbg_dest_tlv_v1 = dest_v1;
1586 fw->n_dest_reg = tlv_len -
1587 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1588 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1589 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1590 "%s: found debug dest; n_dest_reg=%d\n",
1591 __func__, fw->n_dest_reg);
1592 break;
1593 }
1594
1595 case IWX_UCODE_TLV_FW_DBG_CONF: {
1596 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1597
1598 if (!fw->dbg_dest_tlv_init ||
1599 conf->id >= nitems(fw->dbg_conf_tlv) ||
1600 fw->dbg_conf_tlv[conf->id] != NULL)
1601 break;
1602
1603 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1604 "Found debug configuration: %d\n", conf->id);
1605 fw->dbg_conf_tlv[conf->id] = conf;
1606 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1607 break;
1608 }
1609
1610 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1611 const struct iwx_umac_debug_addrs *dbg_ptrs =
1612 (const void *)tlv_data;
1613
1614 if (tlv_len != sizeof(*dbg_ptrs)) {
1615 err = EINVAL;
1616 goto parse_out;
1617 }
1618 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1619 break;
1620 sc->sc_uc.uc_umac_error_event_table =
1621 le32toh(dbg_ptrs->error_info_addr) &
1622 ~IWX_FW_ADDR_CACHE_CONTROL;
1623 sc->sc_uc.error_event_table_tlv_status |=
1624 IWX_ERROR_EVENT_TABLE_UMAC;
1625 break;
1626 }
1627
1628 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1629 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1630 (const void *)tlv_data;
1631
1632 if (tlv_len != sizeof(*dbg_ptrs)) {
1633 err = EINVAL;
1634 goto parse_out;
1635 }
1636 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1637 break;
1638 sc->sc_uc.uc_lmac_error_event_table[0] =
1639 le32toh(dbg_ptrs->error_event_table_ptr) &
1640 ~IWX_FW_ADDR_CACHE_CONTROL;
1641 sc->sc_uc.error_event_table_tlv_status |=
1642 IWX_ERROR_EVENT_TABLE_LMAC1;
1643 break;
1644 }
1645
1646 case IWX_UCODE_TLV_FW_MEM_SEG:
1647 break;
1648
1649 case IWX_UCODE_TLV_IML:
1650 if (sc->sc_fw.iml != NULL) {
1651 free(fw->iml, M_DEVBUF);
1652 fw->iml_len = 0;
1653 }
1654 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1655 M_WAITOK | M_ZERO);
1656 if (sc->sc_fw.iml == NULL) {
1657 err = ENOMEM;
1658 goto parse_out;
1659 }
1660 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1661 sc->sc_fw.iml_len = tlv_len;
1662 break;
1663
1664 case IWX_UCODE_TLV_CMD_VERSIONS:
1665 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1666 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1667 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1668 }
1669 if (sc->n_cmd_versions != 0) {
1670 err = EINVAL;
1671 goto parse_out;
1672 }
1673 if (tlv_len > sizeof(sc->cmd_versions)) {
1674 err = EINVAL;
1675 goto parse_out;
1676 }
1677 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1678 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1679 break;
1680
1681 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1682 break;
1683
1684 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1685 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1686 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1687 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1688 break;
1689
1690 /* undocumented TLVs found in iwx-cc-a0-46 image */
1691 case 58:
1692 case 0x1000003:
1693 case 0x1000004:
1694 break;
1695
1696 /* undocumented TLVs found in iwx-cc-a0-48 image */
1697 case 0x1000000:
1698 case 0x1000002:
1699 break;
1700
1701 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1702 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1703 case IWX_UCODE_TLV_TYPE_HCMD:
1704 case IWX_UCODE_TLV_TYPE_REGIONS:
1705 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1706 case IWX_UCODE_TLV_TYPE_CONF_SET:
1707 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1708 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1709 case IWX_UCODE_TLV_CURRENT_PC:
1710 break;
1711
1712 /* undocumented TLV found in iwx-cc-a0-67 image */
1713 case 0x100000b:
1714 break;
1715
1716 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1717 case 0x101:
1718 break;
1719
1720 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1721 case 0x100000c:
1722 break;
1723
1724 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1725 case 69:
1726 break;
1727
1728 default:
1729 err = EINVAL;
1730 goto parse_out;
1731 }
1732
1733 /*
1734 * Check for size_t overflow and ignore missing padding at
1735 * end of firmware file.
1736 */
1737 if (roundup(tlv_len, 4) > len)
1738 break;
1739
1740 len -= roundup(tlv_len, 4);
1741 data += roundup(tlv_len, 4);
1742 }
1743
1744 KASSERT(err == 0, ("unhandled fw parse error"));
1745
1746 parse_out:
1747 if (err) {
1748 printf("%s: firmware parse error %d, "
1749 "section type %d\n", DEVNAME(sc), err, tlv_type);
1750 }
1751
1752 out:
1753 if (err) {
1754 fw->fw_status = IWX_FW_STATUS_NONE;
1755 if (fw->fw_rawdata != NULL)
1756 iwx_fw_info_free(fw);
1757 } else
1758 fw->fw_status = IWX_FW_STATUS_DONE;
1759 return err;
1760 }
1761
1762 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1763 iwx_prph_addr_mask(struct iwx_softc *sc)
1764 {
1765 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1766 return 0x00ffffff;
1767 else
1768 return 0x000fffff;
1769 }
1770
1771 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1772 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1773 {
1774 uint32_t mask = iwx_prph_addr_mask(sc);
1775 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1776 IWX_BARRIER_READ_WRITE(sc);
1777 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1778 }
1779
1780 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1781 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1782 {
1783 iwx_nic_assert_locked(sc);
1784 return iwx_read_prph_unlocked(sc, addr);
1785 }
1786
1787 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1788 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1789 {
1790 uint32_t mask = iwx_prph_addr_mask(sc);
1791 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1792 IWX_BARRIER_WRITE(sc);
1793 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1794 }
1795
1796 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1797 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1798 {
1799 iwx_nic_assert_locked(sc);
1800 iwx_write_prph_unlocked(sc, addr, val);
1801 }
1802
1803 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1804 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1805 {
1806 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1807 }
1808
1809 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1810 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1811 {
1812 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1813 }
1814
1815 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1816 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1817 {
1818 int offs, err = 0;
1819 uint32_t *vals = buf;
1820
1821 if (iwx_nic_lock(sc)) {
1822 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1823 for (offs = 0; offs < dwords; offs++)
1824 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1825 iwx_nic_unlock(sc);
1826 } else {
1827 err = EBUSY;
1828 }
1829 return err;
1830 }
1831
1832 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1833 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1834 int timo)
1835 {
1836 for (;;) {
1837 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1838 return 1;
1839 }
1840 if (timo < 10) {
1841 return 0;
1842 }
1843 timo -= 10;
1844 DELAY(10);
1845 }
1846 }
1847
1848 static int
iwx_nic_lock(struct iwx_softc * sc)1849 iwx_nic_lock(struct iwx_softc *sc)
1850 {
1851 if (sc->sc_nic_locks > 0) {
1852 iwx_nic_assert_locked(sc);
1853 sc->sc_nic_locks++;
1854 return 1; /* already locked */
1855 }
1856
1857 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1858 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1859
1860 DELAY(2);
1861
1862 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1863 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1864 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1865 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1866 sc->sc_nic_locks++;
1867 return 1;
1868 }
1869
1870 printf("%s: acquiring device failed\n", DEVNAME(sc));
1871 return 0;
1872 }
1873
1874 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1875 iwx_nic_assert_locked(struct iwx_softc *sc)
1876 {
1877 if (sc->sc_nic_locks <= 0)
1878 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1879 }
1880
1881 static void
iwx_nic_unlock(struct iwx_softc * sc)1882 iwx_nic_unlock(struct iwx_softc *sc)
1883 {
1884 if (sc->sc_nic_locks > 0) {
1885 if (--sc->sc_nic_locks == 0)
1886 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1887 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1888 } else
1889 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1890 }
1891
1892 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1893 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1894 uint32_t mask)
1895 {
1896 uint32_t val;
1897
1898 if (iwx_nic_lock(sc)) {
1899 val = iwx_read_prph(sc, reg) & mask;
1900 val |= bits;
1901 iwx_write_prph(sc, reg, val);
1902 iwx_nic_unlock(sc);
1903 return 0;
1904 }
1905 return EBUSY;
1906 }
1907
1908 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1909 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1910 {
1911 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1912 }
1913
1914 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1915 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1916 {
1917 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1918 }
1919
1920 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1921 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1922 {
1923 if (error != 0)
1924 return;
1925 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1926 *(bus_addr_t *)arg = segs[0].ds_addr;
1927 }
1928
1929 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1930 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1931 bus_size_t size, bus_size_t alignment)
1932 {
1933 int error;
1934
1935 dma->tag = NULL;
1936 dma->map = NULL;
1937 dma->size = size;
1938 dma->vaddr = NULL;
1939
1940 error = bus_dma_tag_create(tag, alignment,
1941 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1942 1, size, 0, NULL, NULL, &dma->tag);
1943 if (error != 0)
1944 goto fail;
1945
1946 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1947 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1948 if (error != 0)
1949 goto fail;
1950
1951 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1952 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1953 if (error != 0) {
1954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1955 dma->vaddr = NULL;
1956 goto fail;
1957 }
1958
1959 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1960
1961 return 0;
1962
1963 fail:
1964 iwx_dma_contig_free(dma);
1965 return error;
1966 }
1967
1968 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1969 iwx_dma_contig_free(struct iwx_dma_info *dma)
1970 {
1971 if (dma->vaddr != NULL) {
1972 bus_dmamap_sync(dma->tag, dma->map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->tag, dma->map);
1975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1976 dma->vaddr = NULL;
1977 }
1978 if (dma->tag != NULL) {
1979 bus_dma_tag_destroy(dma->tag);
1980 dma->tag = NULL;
1981 }
1982 }
1983
1984 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1985 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 bus_size_t size;
1988 int i, err;
1989
1990 ring->cur = 0;
1991
1992 /* Allocate RX descriptors (256-byte aligned). */
1993 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1994 size = sizeof(struct iwx_rx_transfer_desc);
1995 else
1996 size = sizeof(uint64_t);
1997 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1998 size * IWX_RX_MQ_RING_COUNT, 256);
1999 if (err) {
2000 device_printf(sc->sc_dev,
2001 "could not allocate RX ring DMA memory\n");
2002 goto fail;
2003 }
2004 ring->desc = ring->free_desc_dma.vaddr;
2005
2006 /* Allocate RX status area (16-byte aligned). */
2007 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2008 size = sizeof(uint16_t);
2009 else
2010 size = sizeof(*ring->stat);
2011 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2012 if (err) {
2013 device_printf(sc->sc_dev,
2014 "could not allocate RX status DMA memory\n");
2015 goto fail;
2016 }
2017 ring->stat = ring->stat_dma.vaddr;
2018
2019 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2020 size = sizeof(struct iwx_rx_completion_desc);
2021 else
2022 size = sizeof(uint32_t);
2023 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2024 size * IWX_RX_MQ_RING_COUNT, 256);
2025 if (err) {
2026 device_printf(sc->sc_dev,
2027 "could not allocate RX ring DMA memory\n");
2028 goto fail;
2029 }
2030
2031 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2032 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2033 0, NULL, NULL, &ring->data_dmat);
2034
2035 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2036 struct iwx_rx_data *data = &ring->data[i];
2037
2038 memset(data, 0, sizeof(*data));
2039 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (err) {
2041 device_printf(sc->sc_dev,
2042 "could not create RX buf DMA map\n");
2043 goto fail;
2044 }
2045
2046 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2047 if (err)
2048 goto fail;
2049 }
2050 return 0;
2051
2052 fail: iwx_free_rx_ring(sc, ring);
2053 return err;
2054 }
2055
2056 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2057 iwx_disable_rx_dma(struct iwx_softc *sc)
2058 {
2059 int ntries;
2060
2061 if (iwx_nic_lock(sc)) {
2062 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2063 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2064 else
2065 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2066 for (ntries = 0; ntries < 1000; ntries++) {
2067 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 if (iwx_read_umac_prph(sc,
2069 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2070 break;
2071 } else {
2072 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2073 IWX_RXF_DMA_IDLE)
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078 iwx_nic_unlock(sc);
2079 }
2080 }
2081
2082 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2083 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2084 {
2085 ring->cur = 0;
2086 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2087 BUS_DMASYNC_PREWRITE);
2088 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2089 uint16_t *status = sc->rxq.stat_dma.vaddr;
2090 *status = 0;
2091 } else
2092 memset(ring->stat, 0, sizeof(*ring->stat));
2093 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2094 BUS_DMASYNC_POSTWRITE);
2095
2096 }
2097
2098 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2099 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2100 {
2101 int i;
2102
2103 iwx_dma_contig_free(&ring->free_desc_dma);
2104 iwx_dma_contig_free(&ring->stat_dma);
2105 iwx_dma_contig_free(&ring->used_desc_dma);
2106
2107 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2108 struct iwx_rx_data *data = &ring->data[i];
2109 if (data->m != NULL) {
2110 bus_dmamap_sync(ring->data_dmat, data->map,
2111 BUS_DMASYNC_POSTREAD);
2112 bus_dmamap_unload(ring->data_dmat, data->map);
2113 m_freem(data->m);
2114 data->m = NULL;
2115 }
2116 if (data->map != NULL) {
2117 bus_dmamap_destroy(ring->data_dmat, data->map);
2118 data->map = NULL;
2119 }
2120 }
2121 if (ring->data_dmat != NULL) {
2122 bus_dma_tag_destroy(ring->data_dmat);
2123 ring->data_dmat = NULL;
2124 }
2125 }
2126
2127 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2128 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2129 {
2130 bus_addr_t paddr;
2131 bus_size_t size;
2132 int i, err;
2133 size_t bc_tbl_size;
2134 bus_size_t bc_align;
2135 size_t mapsize;
2136
2137 ring->qid = qid;
2138 ring->queued = 0;
2139 ring->cur = 0;
2140 ring->cur_hw = 0;
2141 ring->tail = 0;
2142 ring->tail_hw = 0;
2143
2144 /* Allocate TX descriptors (256-byte aligned). */
2145 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2146 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2147 if (err) {
2148 device_printf(sc->sc_dev,
2149 "could not allocate TX ring DMA memory\n");
2150 goto fail;
2151 }
2152 ring->desc = ring->desc_dma.vaddr;
2153
2154 /*
2155 * The hardware supports up to 512 Tx rings which is more
2156 * than we currently need.
2157 *
2158 * In DQA mode we use 1 command queue + 1 default queue for
2159 * management, control, and non-QoS data frames.
2160 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2161 *
2162 * Tx aggregation requires additional queues, one queue per TID for
2163 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2164 * Firmware may assign its own internal IDs for these queues
2165 * depending on which TID gets aggregation enabled first.
2166 * The driver maintains a table mapping driver-side queue IDs
2167 * to firmware-side queue IDs.
2168 */
2169
2170 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2171 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2172 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2173 bc_align = 128;
2174 } else {
2175 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2176 bc_align = 64;
2177 }
2178 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2179 bc_align);
2180 if (err) {
2181 device_printf(sc->sc_dev,
2182 "could not allocate byte count table DMA memory\n");
2183 goto fail;
2184 }
2185
2186 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2187 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2188 IWX_FIRST_TB_SIZE_ALIGN);
2189 if (err) {
2190 device_printf(sc->sc_dev,
2191 "could not allocate cmd DMA memory\n");
2192 goto fail;
2193 }
2194 ring->cmd = ring->cmd_dma.vaddr;
2195
2196 /* FW commands may require more mapped space than packets. */
2197 if (qid == IWX_DQA_CMD_QUEUE)
2198 mapsize = (sizeof(struct iwx_cmd_header) +
2199 IWX_MAX_CMD_PAYLOAD_SIZE);
2200 else
2201 mapsize = MCLBYTES;
2202 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2203 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2204 mapsize, 0, NULL, NULL, &ring->data_dmat);
2205
2206 paddr = ring->cmd_dma.paddr;
2207 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2208 struct iwx_tx_data *data = &ring->data[i];
2209
2210 data->cmd_paddr = paddr;
2211 paddr += sizeof(struct iwx_device_cmd);
2212
2213 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2214 if (err) {
2215 device_printf(sc->sc_dev,
2216 "could not create TX buf DMA map\n");
2217 goto fail;
2218 }
2219 }
2220 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2221 return 0;
2222
2223 fail:
2224 return err;
2225 }
2226
2227 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2228 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2229 {
2230 int i;
2231
2232 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2233 struct iwx_tx_data *data = &ring->data[i];
2234
2235 if (data->m != NULL) {
2236 bus_dmamap_sync(ring->data_dmat, data->map,
2237 BUS_DMASYNC_POSTWRITE);
2238 bus_dmamap_unload(ring->data_dmat, data->map);
2239 m_freem(data->m);
2240 data->m = NULL;
2241 }
2242 }
2243
2244 /* Clear byte count table. */
2245 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2246
2247 /* Clear TX descriptors. */
2248 memset(ring->desc, 0, ring->desc_dma.size);
2249 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2250 BUS_DMASYNC_PREWRITE);
2251 sc->qfullmsk &= ~(1 << ring->qid);
2252 sc->qenablemsk &= ~(1 << ring->qid);
2253 for (i = 0; i < nitems(sc->aggqid); i++) {
2254 if (sc->aggqid[i] == ring->qid) {
2255 sc->aggqid[i] = 0;
2256 break;
2257 }
2258 }
2259 ring->queued = 0;
2260 ring->cur = 0;
2261 ring->cur_hw = 0;
2262 ring->tail = 0;
2263 ring->tail_hw = 0;
2264 ring->tid = 0;
2265 }
2266
2267 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2268 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2269 {
2270 int i;
2271
2272 iwx_dma_contig_free(&ring->desc_dma);
2273 iwx_dma_contig_free(&ring->cmd_dma);
2274 iwx_dma_contig_free(&ring->bc_tbl);
2275
2276 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2277 struct iwx_tx_data *data = &ring->data[i];
2278
2279 if (data->m != NULL) {
2280 bus_dmamap_sync(ring->data_dmat, data->map,
2281 BUS_DMASYNC_POSTWRITE);
2282 bus_dmamap_unload(ring->data_dmat, data->map);
2283 m_freem(data->m);
2284 data->m = NULL;
2285 }
2286 if (data->map != NULL) {
2287 bus_dmamap_destroy(ring->data_dmat, data->map);
2288 data->map = NULL;
2289 }
2290 }
2291 if (ring->data_dmat != NULL) {
2292 bus_dma_tag_destroy(ring->data_dmat);
2293 ring->data_dmat = NULL;
2294 }
2295 }
2296
2297 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2298 iwx_enable_rfkill_int(struct iwx_softc *sc)
2299 {
2300 if (!sc->sc_msix) {
2301 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2302 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2303 } else {
2304 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2305 sc->sc_fh_init_mask);
2306 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2307 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2308 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2309 }
2310
2311 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2312 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2313 }
2314
2315 static int
iwx_check_rfkill(struct iwx_softc * sc)2316 iwx_check_rfkill(struct iwx_softc *sc)
2317 {
2318 uint32_t v;
2319 int rv;
2320
2321 /*
2322 * "documentation" is not really helpful here:
2323 * 27: HW_RF_KILL_SW
2324 * Indicates state of (platform's) hardware RF-Kill switch
2325 *
2326 * But apparently when it's off, it's on ...
2327 */
2328 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2329 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2330 if (rv) {
2331 sc->sc_flags |= IWX_FLAG_RFKILL;
2332 } else {
2333 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2334 }
2335
2336 return rv;
2337 }
2338
2339 static void
iwx_enable_interrupts(struct iwx_softc * sc)2340 iwx_enable_interrupts(struct iwx_softc *sc)
2341 {
2342 if (!sc->sc_msix) {
2343 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2344 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2345 } else {
2346 /*
2347 * fh/hw_mask keeps all the unmasked causes.
2348 * Unlike msi, in msix cause is enabled when it is unset.
2349 */
2350 sc->sc_hw_mask = sc->sc_hw_init_mask;
2351 sc->sc_fh_mask = sc->sc_fh_init_mask;
2352 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2353 ~sc->sc_fh_mask);
2354 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2355 ~sc->sc_hw_mask);
2356 }
2357 }
2358
2359 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2360 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2361 {
2362 if (!sc->sc_msix) {
2363 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2364 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2365 } else {
2366 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2367 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2368 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2369 /*
2370 * Leave all the FH causes enabled to get the ALIVE
2371 * notification.
2372 */
2373 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2374 ~sc->sc_fh_init_mask);
2375 sc->sc_fh_mask = sc->sc_fh_init_mask;
2376 }
2377 }
2378
2379 #if 0
2380 static void
2381 iwx_restore_interrupts(struct iwx_softc *sc)
2382 {
2383 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2384 }
2385 #endif
2386
2387 static void
iwx_disable_interrupts(struct iwx_softc * sc)2388 iwx_disable_interrupts(struct iwx_softc *sc)
2389 {
2390 if (!sc->sc_msix) {
2391 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2392
2393 /* acknowledge all interrupts */
2394 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2395 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2396 } else {
2397 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2398 sc->sc_fh_init_mask);
2399 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2400 sc->sc_hw_init_mask);
2401 }
2402 }
2403
2404 static void
iwx_ict_reset(struct iwx_softc * sc)2405 iwx_ict_reset(struct iwx_softc *sc)
2406 {
2407 iwx_disable_interrupts(sc);
2408
2409 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2410 sc->ict_cur = 0;
2411
2412 /* Set physical address of ICT (4KB aligned). */
2413 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2414 IWX_CSR_DRAM_INT_TBL_ENABLE
2415 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2416 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2417 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2418
2419 /* Switch to ICT interrupt mode in driver. */
2420 sc->sc_flags |= IWX_FLAG_USE_ICT;
2421
2422 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2423 iwx_enable_interrupts(sc);
2424 }
2425
2426 #define IWX_HW_READY_TIMEOUT 50
2427 static int
iwx_set_hw_ready(struct iwx_softc * sc)2428 iwx_set_hw_ready(struct iwx_softc *sc)
2429 {
2430 int ready;
2431
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2434
2435 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2436 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_HW_READY_TIMEOUT);
2439 if (ready)
2440 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2441 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2442
2443 DPRINTF(("%s: ready=%d\n", __func__, ready));
2444 return ready;
2445 }
2446 #undef IWX_HW_READY_TIMEOUT
2447
2448 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2449 iwx_prepare_card_hw(struct iwx_softc *sc)
2450 {
2451 int t = 0;
2452 int ntries;
2453
2454 if (iwx_set_hw_ready(sc))
2455 return 0;
2456
2457 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2458 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2459 DELAY(1000);
2460
2461 for (ntries = 0; ntries < 10; ntries++) {
2462 /* If HW is not ready, prepare the conditions to check again */
2463 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2464 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2465
2466 do {
2467 if (iwx_set_hw_ready(sc))
2468 return 0;
2469 DELAY(200);
2470 t += 200;
2471 } while (t < 150000);
2472 DELAY(25000);
2473 }
2474
2475 return ETIMEDOUT;
2476 }
2477
2478 static int
iwx_force_power_gating(struct iwx_softc * sc)2479 iwx_force_power_gating(struct iwx_softc *sc)
2480 {
2481 int err;
2482
2483 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2484 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2485 if (err)
2486 return err;
2487 DELAY(20);
2488 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2489 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2490 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2491 if (err)
2492 return err;
2493 DELAY(20);
2494 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2495 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2496 return err;
2497 }
2498
2499 static void
iwx_apm_config(struct iwx_softc * sc)2500 iwx_apm_config(struct iwx_softc *sc)
2501 {
2502 uint16_t lctl, cap;
2503 int pcie_ptr;
2504 int error;
2505
2506 /*
2507 * L0S states have been found to be unstable with our devices
2508 * and in newer hardware they are not officially supported at
2509 * all, so we must always set the L0S_DISABLED bit.
2510 */
2511 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2512
2513 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2514 if (error != 0) {
2515 printf("can't fill pcie_ptr\n");
2516 return;
2517 }
2518
2519 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2520 sizeof(lctl));
2521 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2522 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2523 #define PCI_PCIE_DCSR2 0x28
2524 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2525 sizeof(lctl));
2526 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2527 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2528 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2529 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2530 DEVNAME(sc),
2531 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2532 sc->sc_ltr_enabled ? "En" : "Dis"));
2533 #undef PCI_PCIE_LCSR_ASPM_L0S
2534 #undef PCI_PCIE_DCSR2
2535 #undef PCI_PCIE_DCSR2_LTREN
2536 #undef PCI_PCIE_LCSR_ASPM_L1
2537 }
2538
2539 /*
2540 * Start up NIC's basic functionality after it has been reset
2541 * e.g. after platform boot or shutdown.
2542 * NOTE: This does not load uCode nor start the embedded processor
2543 */
2544 static int
iwx_apm_init(struct iwx_softc * sc)2545 iwx_apm_init(struct iwx_softc *sc)
2546 {
2547 int err = 0;
2548
2549 /*
2550 * Disable L0s without affecting L1;
2551 * don't wait for ICH L0s (ICH bug W/A)
2552 */
2553 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2554 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2555
2556 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2557 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2558
2559 /*
2560 * Enable HAP INTA (interrupt from management bus) to
2561 * wake device's PCI Express link L1a -> L0s
2562 */
2563 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2564 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2565
2566 iwx_apm_config(sc);
2567
2568 /*
2569 * Set "initialization complete" bit to move adapter from
2570 * D0U* --> D0A* (powered-up active) state.
2571 */
2572 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2573
2574 /*
2575 * Wait for clock stabilization; once stabilized, access to
2576 * device-internal resources is supported, e.g. iwx_write_prph()
2577 * and accesses to uCode SRAM.
2578 */
2579 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2580 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2582 printf("%s: timeout waiting for clock stabilization\n",
2583 DEVNAME(sc));
2584 err = ETIMEDOUT;
2585 goto out;
2586 }
2587 out:
2588 if (err)
2589 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2590 return err;
2591 }
2592
2593 static void
iwx_apm_stop(struct iwx_softc * sc)2594 iwx_apm_stop(struct iwx_softc *sc)
2595 {
2596 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2597 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2598 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2599 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2600 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2601 DELAY(1000);
2602 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2603 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2604 DELAY(5000);
2605
2606 /* stop device's busmaster DMA activity */
2607 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2608
2609 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2610 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2612 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2613
2614 /*
2615 * Clear "initialization complete" bit to move adapter from
2616 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2617 */
2618 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2619 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2620 }
2621
2622 static void
iwx_init_msix_hw(struct iwx_softc * sc)2623 iwx_init_msix_hw(struct iwx_softc *sc)
2624 {
2625 iwx_conf_msix_hw(sc, 0);
2626
2627 if (!sc->sc_msix)
2628 return;
2629
2630 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2631 sc->sc_fh_mask = sc->sc_fh_init_mask;
2632 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2633 sc->sc_hw_mask = sc->sc_hw_init_mask;
2634 }
2635
2636 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2637 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2638 {
2639 int vector = 0;
2640
2641 if (!sc->sc_msix) {
2642 /* Newer chips default to MSIX. */
2643 if (!stopped && iwx_nic_lock(sc)) {
2644 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2645 IWX_UREG_CHICK_MSI_ENABLE);
2646 iwx_nic_unlock(sc);
2647 }
2648 return;
2649 }
2650
2651 if (!stopped && iwx_nic_lock(sc)) {
2652 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2653 IWX_UREG_CHICK_MSIX_ENABLE);
2654 iwx_nic_unlock(sc);
2655 }
2656
2657 /* Disable all interrupts */
2658 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2659 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2660
2661 /* Map fallback-queue (command/mgmt) to a single vector */
2662 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2663 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2664 /* Map RSS queue (data) to the same vector */
2665 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2666 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2667
2668 /* Enable the RX queues cause interrupts */
2669 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2670 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2671
2672 /* Map non-RX causes to the same vector */
2673 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2674 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2675 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2676 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2677 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2678 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2679 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2680 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2681 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2682 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2683 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2684 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2685 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2686 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2687 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2688 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2689 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2690 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2691 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2692 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2693 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2694 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2695 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2696 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2697 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2698 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2699 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2700 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2701 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2702 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2703
2704 /* Enable non-RX causes interrupts */
2705 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2706 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_S2D |
2709 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2710 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2711 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2712 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2713 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2714 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2715 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2717 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2719 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2720 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2722 }
2723
2724 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2725 iwx_clear_persistence_bit(struct iwx_softc *sc)
2726 {
2727 uint32_t hpm, wprot;
2728
2729 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2730 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2731 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2732 if (wprot & IWX_PREG_WFPM_ACCESS) {
2733 printf("%s: cannot clear persistence bit\n",
2734 DEVNAME(sc));
2735 return EPERM;
2736 }
2737 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2738 hpm & ~IWX_PERSISTENCE_BIT);
2739 }
2740
2741 return 0;
2742 }
2743
2744 static int
iwx_start_hw(struct iwx_softc * sc)2745 iwx_start_hw(struct iwx_softc *sc)
2746 {
2747 int err;
2748
2749 err = iwx_prepare_card_hw(sc);
2750 if (err)
2751 return err;
2752
2753 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2754 err = iwx_clear_persistence_bit(sc);
2755 if (err)
2756 return err;
2757 }
2758
2759 /* Reset the entire device */
2760 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2761 DELAY(5000);
2762
2763 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2764 sc->sc_integrated) {
2765 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2766 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2767 DELAY(20);
2768 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2769 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2771 printf("%s: timeout waiting for clock stabilization\n",
2772 DEVNAME(sc));
2773 return ETIMEDOUT;
2774 }
2775
2776 err = iwx_force_power_gating(sc);
2777 if (err)
2778 return err;
2779
2780 /* Reset the entire device */
2781 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2782 DELAY(5000);
2783 }
2784
2785 err = iwx_apm_init(sc);
2786 if (err)
2787 return err;
2788
2789 iwx_init_msix_hw(sc);
2790
2791 iwx_enable_rfkill_int(sc);
2792 iwx_check_rfkill(sc);
2793
2794 return 0;
2795 }
2796
2797 static void
iwx_stop_device(struct iwx_softc * sc)2798 iwx_stop_device(struct iwx_softc *sc)
2799 {
2800 int i;
2801
2802 iwx_disable_interrupts(sc);
2803 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2804
2805 iwx_disable_rx_dma(sc);
2806 iwx_reset_rx_ring(sc, &sc->rxq);
2807 for (i = 0; i < nitems(sc->txq); i++)
2808 iwx_reset_tx_ring(sc, &sc->txq[i]);
2809 #if 0
2810 /* XXX-THJ: Tidy up BA state on stop */
2811 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2812 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2813 if (ba->ba_state != IEEE80211_BA_AGREED)
2814 continue;
2815 ieee80211_delba_request(ic, ni, 0, 1, i);
2816 }
2817 #endif
2818 /* Make sure (redundant) we've released our request to stay awake */
2819 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2820 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2821 if (sc->sc_nic_locks > 0)
2822 printf("%s: %d active NIC locks forcefully cleared\n",
2823 DEVNAME(sc), sc->sc_nic_locks);
2824 sc->sc_nic_locks = 0;
2825
2826 /* Stop the device, and put it in low power state */
2827 iwx_apm_stop(sc);
2828
2829 /* Reset the on-board processor. */
2830 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2831 DELAY(5000);
2832
2833 /*
2834 * Upon stop, the IVAR table gets erased, so msi-x won't
2835 * work. This causes a bug in RF-KILL flows, since the interrupt
2836 * that enables radio won't fire on the correct irq, and the
2837 * driver won't be able to handle the interrupt.
2838 * Configure the IVAR table again after reset.
2839 */
2840 iwx_conf_msix_hw(sc, 1);
2841
2842 /*
2843 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2844 * Clear the interrupt again.
2845 */
2846 iwx_disable_interrupts(sc);
2847
2848 /* Even though we stop the HW we still want the RF kill interrupt. */
2849 iwx_enable_rfkill_int(sc);
2850 iwx_check_rfkill(sc);
2851
2852 iwx_prepare_card_hw(sc);
2853
2854 iwx_ctxt_info_free_paging(sc);
2855 iwx_dma_contig_free(&sc->pnvm_dma);
2856 }
2857
2858 static void
iwx_nic_config(struct iwx_softc * sc)2859 iwx_nic_config(struct iwx_softc *sc)
2860 {
2861 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2862 uint32_t mask, val, reg_val = 0;
2863
2864 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2865 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2866 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2867 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2868 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2869 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2870
2871 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2872 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2873 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2874 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2875
2876 /* radio configuration */
2877 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2878 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2879 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2880
2881 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2882 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2886 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2888
2889 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2890 val &= ~mask;
2891 val |= reg_val;
2892 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2893 }
2894
2895 static int
iwx_nic_rx_init(struct iwx_softc * sc)2896 iwx_nic_rx_init(struct iwx_softc *sc)
2897 {
2898 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2899
2900 /*
2901 * We don't configure the RFH; the firmware will do that.
2902 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2903 */
2904 return 0;
2905 }
2906
2907 static int
iwx_nic_init(struct iwx_softc * sc)2908 iwx_nic_init(struct iwx_softc *sc)
2909 {
2910 int err;
2911
2912 iwx_apm_init(sc);
2913 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2914 iwx_nic_config(sc);
2915
2916 err = iwx_nic_rx_init(sc);
2917 if (err)
2918 return err;
2919
2920 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2921
2922 return 0;
2923 }
2924
2925 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2926 const uint8_t iwx_ac_to_tx_fifo[] = {
2927 IWX_GEN2_EDCA_TX_FIFO_BE,
2928 IWX_GEN2_EDCA_TX_FIFO_BK,
2929 IWX_GEN2_EDCA_TX_FIFO_VI,
2930 IWX_GEN2_EDCA_TX_FIFO_VO,
2931 };
2932
2933 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2934 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2935 int num_slots)
2936 {
2937 struct iwx_rx_packet *pkt;
2938 struct iwx_tx_queue_cfg_rsp *resp;
2939 struct iwx_tx_queue_cfg_cmd cmd_v0;
2940 struct iwx_scd_queue_cfg_cmd cmd_v3;
2941 struct iwx_host_cmd hcmd = {
2942 .flags = IWX_CMD_WANT_RESP,
2943 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2944 };
2945 struct iwx_tx_ring *ring = &sc->txq[qid];
2946 int err, fwqid, cmd_ver;
2947 uint32_t wr_idx;
2948 size_t resp_len;
2949
2950 DPRINTF(("%s: tid=%i\n", __func__, tid));
2951 DPRINTF(("%s: qid=%i\n", __func__, qid));
2952 iwx_reset_tx_ring(sc, ring);
2953
2954 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2955 IWX_SCD_QUEUE_CONFIG_CMD);
2956 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2957 memset(&cmd_v0, 0, sizeof(cmd_v0));
2958 cmd_v0.sta_id = sta_id;
2959 cmd_v0.tid = tid;
2960 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2961 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2962 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2963 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2964 hcmd.id = IWX_SCD_QUEUE_CFG;
2965 hcmd.data[0] = &cmd_v0;
2966 hcmd.len[0] = sizeof(cmd_v0);
2967 } else if (cmd_ver == 3) {
2968 memset(&cmd_v3, 0, sizeof(cmd_v3));
2969 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2970 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2971 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2972 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2973 cmd_v3.u.add.flags = htole32(0);
2974 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2975 cmd_v3.u.add.tid = tid;
2976 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2977 IWX_SCD_QUEUE_CONFIG_CMD);
2978 hcmd.data[0] = &cmd_v3;
2979 hcmd.len[0] = sizeof(cmd_v3);
2980 } else {
2981 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2982 DEVNAME(sc), cmd_ver);
2983 return ENOTSUP;
2984 }
2985
2986 err = iwx_send_cmd(sc, &hcmd);
2987 if (err)
2988 return err;
2989
2990 pkt = hcmd.resp_pkt;
2991 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2992 err = EIO;
2993 goto out;
2994 }
2995
2996 resp_len = iwx_rx_packet_payload_len(pkt);
2997 if (resp_len != sizeof(*resp)) {
2998 err = EIO;
2999 goto out;
3000 }
3001
3002 resp = (void *)pkt->data;
3003 fwqid = le16toh(resp->queue_number);
3004 wr_idx = le16toh(resp->write_pointer);
3005
3006 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3007 if (fwqid != qid) {
3008 DPRINTF(("%s: === fwqid != qid\n", __func__));
3009 err = EIO;
3010 goto out;
3011 }
3012
3013 if (wr_idx != ring->cur_hw) {
3014 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3015 err = EIO;
3016 goto out;
3017 }
3018
3019 sc->qenablemsk |= (1 << qid);
3020 ring->tid = tid;
3021 out:
3022 iwx_free_resp(sc, &hcmd);
3023 return err;
3024 }
3025
3026 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3027 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3028 {
3029 struct iwx_rx_packet *pkt;
3030 struct iwx_tx_queue_cfg_rsp *resp;
3031 struct iwx_tx_queue_cfg_cmd cmd_v0;
3032 struct iwx_scd_queue_cfg_cmd cmd_v3;
3033 struct iwx_host_cmd hcmd = {
3034 .flags = IWX_CMD_WANT_RESP,
3035 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3036 };
3037 struct iwx_tx_ring *ring = &sc->txq[qid];
3038 int err, cmd_ver;
3039
3040 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3041 IWX_SCD_QUEUE_CONFIG_CMD);
3042 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3043 memset(&cmd_v0, 0, sizeof(cmd_v0));
3044 cmd_v0.sta_id = sta_id;
3045 cmd_v0.tid = tid;
3046 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3047 cmd_v0.cb_size = htole32(0);
3048 cmd_v0.byte_cnt_addr = htole64(0);
3049 cmd_v0.tfdq_addr = htole64(0);
3050 hcmd.id = IWX_SCD_QUEUE_CFG;
3051 hcmd.data[0] = &cmd_v0;
3052 hcmd.len[0] = sizeof(cmd_v0);
3053 } else if (cmd_ver == 3) {
3054 memset(&cmd_v3, 0, sizeof(cmd_v3));
3055 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3056 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3057 cmd_v3.u.remove.tid = tid;
3058 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3059 IWX_SCD_QUEUE_CONFIG_CMD);
3060 hcmd.data[0] = &cmd_v3;
3061 hcmd.len[0] = sizeof(cmd_v3);
3062 } else {
3063 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3064 DEVNAME(sc), cmd_ver);
3065 return ENOTSUP;
3066 }
3067
3068 err = iwx_send_cmd(sc, &hcmd);
3069 if (err)
3070 return err;
3071
3072 pkt = hcmd.resp_pkt;
3073 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3074 err = EIO;
3075 goto out;
3076 }
3077
3078 sc->qenablemsk &= ~(1 << qid);
3079 iwx_reset_tx_ring(sc, ring);
3080 out:
3081 iwx_free_resp(sc, &hcmd);
3082 return err;
3083 }
3084
3085 static void
iwx_post_alive(struct iwx_softc * sc)3086 iwx_post_alive(struct iwx_softc *sc)
3087 {
3088 int txcmd_ver;
3089
3090 iwx_ict_reset(sc);
3091
3092 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3093 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3094 sc->sc_rate_n_flags_version = 2;
3095 else
3096 sc->sc_rate_n_flags_version = 1;
3097
3098 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3099 }
3100
3101 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3102 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3103 uint32_t duration_tu)
3104 {
3105
3106 struct iwx_session_prot_cmd cmd = {
3107 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3108 in->in_color)),
3109 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3110 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3111 .duration_tu = htole32(duration_tu),
3112 };
3113 uint32_t cmd_id;
3114 int err;
3115
3116 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3117 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3118 if (!err)
3119 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3120 return err;
3121 }
3122
3123 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3124 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3125 {
3126 struct iwx_session_prot_cmd cmd = {
3127 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3128 in->in_color)),
3129 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3130 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3131 .duration_tu = 0,
3132 };
3133 uint32_t cmd_id;
3134
3135 /* Do nothing if the time event has already ended. */
3136 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3137 return;
3138
3139 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3140 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3141 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3142 }
3143
3144 /*
3145 * NVM read access and content parsing. We do not support
3146 * external NVM or writing NVM.
3147 */
3148
3149 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3150 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3151 {
3152 uint8_t tx_ant;
3153
3154 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3155 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3156
3157 if (sc->sc_nvm.valid_tx_ant)
3158 tx_ant &= sc->sc_nvm.valid_tx_ant;
3159
3160 return tx_ant;
3161 }
3162
3163 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3164 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3165 {
3166 uint8_t rx_ant;
3167
3168 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3169 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3170
3171 if (sc->sc_nvm.valid_rx_ant)
3172 rx_ant &= sc->sc_nvm.valid_rx_ant;
3173
3174 return rx_ant;
3175 }
3176
3177 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3178 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3179 struct ieee80211_channel chans[])
3180 {
3181 struct iwx_softc *sc = ic->ic_softc;
3182 struct iwx_nvm_data *data = &sc->sc_nvm;
3183 uint8_t bands[IEEE80211_MODE_BYTES];
3184 const uint8_t *nvm_channels;
3185 uint32_t ch_flags;
3186 int ch_idx, nchan;
3187
3188 if (sc->sc_uhb_supported) {
3189 nchan = nitems(iwx_nvm_channels_uhb);
3190 nvm_channels = iwx_nvm_channels_uhb;
3191 } else {
3192 nchan = nitems(iwx_nvm_channels_8000);
3193 nvm_channels = iwx_nvm_channels_8000;
3194 }
3195
3196 /* 2.4Ghz; 1-13: 11b/g channels. */
3197 if (!data->sku_cap_band_24GHz_enable)
3198 goto band_5;
3199
3200 memset(bands, 0, sizeof(bands));
3201 setbit(bands, IEEE80211_MODE_11B);
3202 setbit(bands, IEEE80211_MODE_11G);
3203 setbit(bands, IEEE80211_MODE_11NG);
3204 for (ch_idx = 0;
3205 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3206 ch_idx++) {
3207
3208 uint32_t nflags = 0;
3209 int cflags = 0;
3210
3211 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3212 ch_flags = le32_to_cpup(
3213 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3214 } else {
3215 ch_flags = le16_to_cpup(
3216 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3217 }
3218 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3219 continue;
3220
3221 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3222 cflags |= NET80211_CBW_FLAG_HT40;
3223
3224 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3225
3226 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3227 nvm_channels[ch_idx],
3228 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3229 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3230 nflags, bands, cflags);
3231 }
3232
3233 band_5:
3234 /* 5Ghz */
3235 if (!data->sku_cap_band_52GHz_enable)
3236 goto band_6;
3237
3238
3239 memset(bands, 0, sizeof(bands));
3240 setbit(bands, IEEE80211_MODE_11A);
3241 setbit(bands, IEEE80211_MODE_11NA);
3242 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3243
3244 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3245 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3246 ch_idx++) {
3247 uint32_t nflags = 0;
3248 int cflags = 0;
3249
3250 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3251 ch_flags = le32_to_cpup(
3252 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3253 else
3254 ch_flags = le16_to_cpup(
3255 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3256
3257 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3258 continue;
3259
3260 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3261 cflags |= NET80211_CBW_FLAG_HT40;
3262 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3263 cflags |= NET80211_CBW_FLAG_VHT80;
3264 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3265 cflags |= NET80211_CBW_FLAG_VHT160;
3266
3267 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3268
3269 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3270 nvm_channels[ch_idx],
3271 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3272 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3273 nflags, bands, cflags);
3274 }
3275 band_6:
3276 /* 6GHz one day ... */
3277 return;
3278 }
3279
3280 static int
iwx_mimo_enabled(struct iwx_softc * sc)3281 iwx_mimo_enabled(struct iwx_softc *sc)
3282 {
3283
3284 return !sc->sc_nvm.sku_cap_mimo_disable;
3285 }
3286
3287 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3288 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3289 uint16_t ssn, uint16_t buf_size)
3290 {
3291 reorder_buf->head_sn = ssn;
3292 reorder_buf->num_stored = 0;
3293 reorder_buf->buf_size = buf_size;
3294 reorder_buf->last_amsdu = 0;
3295 reorder_buf->last_sub_index = 0;
3296 reorder_buf->removed = 0;
3297 reorder_buf->valid = 0;
3298 reorder_buf->consec_oldsn_drops = 0;
3299 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3300 reorder_buf->consec_oldsn_prev_drop = 0;
3301 }
3302
3303 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3304 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3305 {
3306 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3307
3308 reorder_buf->removed = 1;
3309 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3310 }
3311
3312 #define IWX_MAX_RX_BA_SESSIONS 16
3313
3314 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3315 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3316 {
3317 int i;
3318
3319 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3320 if (sc->sc_rxba_data[i].baid ==
3321 IWX_RX_REORDER_DATA_INVALID_BAID)
3322 continue;
3323 if (sc->sc_rxba_data[i].tid == tid)
3324 return &sc->sc_rxba_data[i];
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3331 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3332 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3333 uint8_t *baid)
3334 {
3335 struct iwx_rx_baid_cfg_cmd cmd;
3336 uint32_t new_baid = 0;
3337 int err;
3338
3339 IWX_ASSERT_LOCKED(sc);
3340
3341 memset(&cmd, 0, sizeof(cmd));
3342
3343 if (start) {
3344 cmd.action = IWX_RX_BAID_ACTION_ADD;
3345 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3346 cmd.alloc.tid = tid;
3347 cmd.alloc.ssn = htole16(ssn);
3348 cmd.alloc.win_size = htole16(winsize);
3349 } else {
3350 struct iwx_rxba_data *rxba;
3351
3352 rxba = iwx_find_rxba_data(sc, tid);
3353 if (rxba == NULL)
3354 return ENOENT;
3355 *baid = rxba->baid;
3356
3357 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3358 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3359 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3360 cmd.remove_v1.baid = rxba->baid;
3361 } else {
3362 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3363 cmd.remove.tid = tid;
3364 }
3365 }
3366
3367 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3368 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3369 if (err)
3370 return err;
3371
3372 if (start) {
3373 if (new_baid >= nitems(sc->sc_rxba_data))
3374 return ERANGE;
3375 *baid = new_baid;
3376 }
3377
3378 return 0;
3379 }
3380
3381 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3382 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3383 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3384 {
3385 int err;
3386 struct iwx_rxba_data *rxba = NULL;
3387 uint8_t baid = 0;
3388
3389 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3390 return;
3391 }
3392
3393 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3394 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3395 timeout_val, start, &baid);
3396 } else {
3397 panic("sta_rx_agg unsupported hw");
3398 }
3399 if (err) {
3400 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3401 return;
3402 } else
3403 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3404
3405 rxba = &sc->sc_rxba_data[baid];
3406
3407 /* Deaggregation is done in hardware. */
3408 if (start) {
3409 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3410 return;
3411 }
3412 rxba->sta_id = IWX_STATION_ID;
3413 rxba->tid = tid;
3414 rxba->baid = baid;
3415 rxba->timeout = timeout_val;
3416 getmicrouptime(&rxba->last_rx);
3417 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3418 winsize);
3419 if (timeout_val != 0) {
3420 DPRINTF(("%s: timeout_val != 0\n", __func__));
3421 return;
3422 }
3423 } else
3424 iwx_clear_reorder_buffer(sc, rxba);
3425
3426 if (start) {
3427 sc->sc_rx_ba_sessions++;
3428 } else if (sc->sc_rx_ba_sessions > 0)
3429 sc->sc_rx_ba_sessions--;
3430 }
3431
3432 /**
3433 * @brief Allocate an A-MPDU / aggregation session for the given node and TID.
3434 *
3435 * This allocates a TX queue specifically for that TID.
3436 *
3437 * Note that this routine currently doesn't return any status/errors,
3438 * so the caller can't know if the aggregation session was setup or not.
3439 */
3440 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3441 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3442 uint8_t tid)
3443 {
3444 int err, qid;
3445
3446 qid = sc->aggqid[tid];
3447 if (qid == 0) {
3448 /* Firmware should pick the next unused Tx queue. */
3449 qid = fls(sc->qenablemsk);
3450 }
3451
3452 DPRINTF(("%s: qid=%i\n", __func__, qid));
3453
3454 /*
3455 * Simply enable the queue.
3456 * Firmware handles Tx Ba session setup and teardown.
3457 */
3458 if ((sc->qenablemsk & (1 << qid)) == 0) {
3459 if (!iwx_nic_lock(sc)) {
3460 return;
3461 }
3462 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3463 IWX_TX_RING_COUNT);
3464 iwx_nic_unlock(sc);
3465 if (err) {
3466 printf("%s: could not enable Tx queue %d "
3467 "(error %d)\n", DEVNAME(sc), qid, err);
3468 return;
3469 }
3470 }
3471 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3472 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3473 sc->aggqid[tid] = qid;
3474 }
3475
3476 static void
iwx_ba_rx_task(void * arg,int npending __unused)3477 iwx_ba_rx_task(void *arg, int npending __unused)
3478 {
3479 struct iwx_softc *sc = arg;
3480 struct ieee80211com *ic = &sc->sc_ic;
3481 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3482 struct ieee80211_node *ni = vap->iv_bss;
3483 int tid;
3484
3485 IWX_LOCK(sc);
3486 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3487 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3488 break;
3489 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3490 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3491 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3492 ba->ba_flags));
3493 if (ba->ba_flags == IWX_BA_DONE) {
3494 DPRINTF(("%s: ampdu for tid %i already added\n",
3495 __func__, tid));
3496 break;
3497 }
3498
3499 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3500 tid));
3501 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3502 ba->ba_winsize, ba->ba_timeout_val, 1);
3503 sc->ba_rx.start_tidmask &= ~(1 << tid);
3504 ba->ba_flags = IWX_BA_DONE;
3505 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3506 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3507 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3508 }
3509 }
3510 IWX_UNLOCK(sc);
3511 }
3512
3513 /**
3514 * @brief Task called to setup a deferred block-ack session.
3515 *
3516 * This sets up any/all pending blockack sessions as defined
3517 * in sc->ba_tx.start_tidmask.
3518 *
3519 * Note: the call to iwx_sta_tx_agg_start() isn't being error checked.
3520 */
3521 static void
iwx_ba_tx_task(void * arg,int npending __unused)3522 iwx_ba_tx_task(void *arg, int npending __unused)
3523 {
3524 struct iwx_softc *sc = arg;
3525 struct ieee80211com *ic = &sc->sc_ic;
3526 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3527 struct ieee80211_node *ni = vap->iv_bss;
3528 uint32_t started_mask = 0;
3529 int tid;
3530
3531 IWX_LOCK(sc);
3532 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3533 const struct ieee80211_tx_ampdu *tap;
3534
3535 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3536 break;
3537 tap = &ni->ni_tx_ampdu[tid];
3538 if (IEEE80211_AMPDU_RUNNING(tap))
3539 break;
3540 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3541 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3542 "%s: ampdu tx start for tid %i\n", __func__, tid);
3543 iwx_sta_tx_agg_start(sc, ni, tid);
3544 sc->ba_tx.start_tidmask &= ~(1 << tid);
3545 started_mask |= (1 << tid);
3546 }
3547 }
3548
3549 IWX_UNLOCK(sc);
3550
3551 /* Iterate over the sessions we started; mark them as active */
3552 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3553 if (started_mask & (1 << tid)) {
3554 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3555 "%s: informing net80211 to start ampdu on tid %i\n",
3556 __func__, tid);
3557 ieee80211_ampdu_tx_request_active_ext(ni, tid, 1);
3558 }
3559 }
3560 }
3561
3562 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3563 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3564 {
3565 uint32_t mac_addr0, mac_addr1;
3566
3567 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3568
3569 if (!iwx_nic_lock(sc))
3570 return;
3571
3572 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3573 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3574
3575 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3576
3577 /* If OEM fused a valid address, use it instead of the one in OTP. */
3578 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3579 iwx_nic_unlock(sc);
3580 return;
3581 }
3582
3583 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3584 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3585
3586 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3587
3588 iwx_nic_unlock(sc);
3589 }
3590
3591 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3592 iwx_is_valid_mac_addr(const uint8_t *addr)
3593 {
3594 static const uint8_t reserved_mac[] = {
3595 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3596 };
3597
3598 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3599 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3600 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3601 !ETHER_IS_MULTICAST(addr));
3602 }
3603
3604 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3605 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3606 {
3607 const uint8_t *hw_addr;
3608
3609 hw_addr = (const uint8_t *)&mac_addr0;
3610 dest[0] = hw_addr[3];
3611 dest[1] = hw_addr[2];
3612 dest[2] = hw_addr[1];
3613 dest[3] = hw_addr[0];
3614
3615 hw_addr = (const uint8_t *)&mac_addr1;
3616 dest[4] = hw_addr[1];
3617 dest[5] = hw_addr[0];
3618 }
3619
3620 static int
iwx_nvm_get(struct iwx_softc * sc)3621 iwx_nvm_get(struct iwx_softc *sc)
3622 {
3623 struct iwx_nvm_get_info cmd = {};
3624 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3625 struct iwx_host_cmd hcmd = {
3626 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3627 .data = { &cmd, },
3628 .len = { sizeof(cmd) },
3629 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3630 IWX_NVM_GET_INFO)
3631 };
3632 int err = 0;
3633 uint32_t mac_flags;
3634 /*
3635 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3636 * in v3, except for the channel profile part of the
3637 * regulatory. So we can just access the new struct, with the
3638 * exception of the latter.
3639 */
3640 struct iwx_nvm_get_info_rsp *rsp;
3641 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3642 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3643 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3644
3645 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3646 err = iwx_send_cmd(sc, &hcmd);
3647 if (err) {
3648 printf("%s: failed to send cmd (error %d)", __func__, err);
3649 return err;
3650 }
3651
3652 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3653 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3654 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3655 printf("%s: resp_len=%zu\n", __func__, resp_len);
3656 err = EIO;
3657 goto out;
3658 }
3659
3660 memset(nvm, 0, sizeof(*nvm));
3661
3662 iwx_set_mac_addr_from_csr(sc, nvm);
3663 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3664 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3665 err = EINVAL;
3666 goto out;
3667 }
3668
3669 rsp = (void *)hcmd.resp_pkt->data;
3670
3671 /* Initialize general data */
3672 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3673 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3674
3675 /* Initialize MAC sku data */
3676 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3677 nvm->sku_cap_11ac_enable =
3678 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3679 nvm->sku_cap_11n_enable =
3680 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3681 nvm->sku_cap_11ax_enable =
3682 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3683 nvm->sku_cap_band_24GHz_enable =
3684 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3685 nvm->sku_cap_band_52GHz_enable =
3686 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3687 nvm->sku_cap_mimo_disable =
3688 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3689
3690 /* Initialize PHY sku data */
3691 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3692 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3693
3694 if (le32toh(rsp->regulatory.lar_enabled) &&
3695 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3696 nvm->lar_enabled = 1;
3697 }
3698
3699 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3700 if (v4) {
3701 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3702 } else {
3703 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3704 }
3705 out:
3706 iwx_free_resp(sc, &hcmd);
3707 return err;
3708 }
3709
3710 static int
iwx_load_firmware(struct iwx_softc * sc)3711 iwx_load_firmware(struct iwx_softc *sc)
3712 {
3713 struct iwx_fw_sects *fws;
3714 int err;
3715
3716 IWX_ASSERT_LOCKED(sc)
3717
3718 sc->sc_uc.uc_intr = 0;
3719 sc->sc_uc.uc_ok = 0;
3720
3721 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3722 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3723 err = iwx_ctxt_info_gen3_init(sc, fws);
3724 else
3725 err = iwx_ctxt_info_init(sc, fws);
3726 if (err) {
3727 printf("%s: could not init context info\n", DEVNAME(sc));
3728 return err;
3729 }
3730
3731 /* wait for the firmware to load */
3732 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3733 if (err || !sc->sc_uc.uc_ok) {
3734 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3735 iwx_ctxt_info_free_paging(sc);
3736 }
3737
3738 iwx_dma_contig_free(&sc->iml_dma);
3739 iwx_ctxt_info_free_fw_img(sc);
3740
3741 if (!sc->sc_uc.uc_ok)
3742 return EINVAL;
3743
3744 return err;
3745 }
3746
3747 static int
iwx_start_fw(struct iwx_softc * sc)3748 iwx_start_fw(struct iwx_softc *sc)
3749 {
3750 int err;
3751
3752 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3753
3754 iwx_disable_interrupts(sc);
3755
3756 /* make sure rfkill handshake bits are cleared */
3757 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3758 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3759 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3760
3761 /* clear (again), then enable firmware load interrupt */
3762 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3763
3764 err = iwx_nic_init(sc);
3765 if (err) {
3766 printf("%s: unable to init nic\n", DEVNAME(sc));
3767 return err;
3768 }
3769
3770 iwx_enable_fwload_interrupt(sc);
3771
3772 return iwx_load_firmware(sc);
3773 }
3774
3775 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3776 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3777 size_t len)
3778 {
3779 const struct iwx_ucode_tlv *tlv;
3780 uint32_t sha1 = 0;
3781 uint16_t mac_type = 0, rf_id = 0;
3782 uint8_t *pnvm_data = NULL, *tmp;
3783 int hw_match = 0;
3784 uint32_t size = 0;
3785 int err;
3786
3787 while (len >= sizeof(*tlv)) {
3788 uint32_t tlv_len, tlv_type;
3789
3790 len -= sizeof(*tlv);
3791 tlv = (const void *)data;
3792
3793 tlv_len = le32toh(tlv->length);
3794 tlv_type = le32toh(tlv->type);
3795
3796 if (len < tlv_len) {
3797 printf("%s: invalid TLV len: %zd/%u\n",
3798 DEVNAME(sc), len, tlv_len);
3799 err = EINVAL;
3800 goto out;
3801 }
3802
3803 data += sizeof(*tlv);
3804
3805 switch (tlv_type) {
3806 case IWX_UCODE_TLV_PNVM_VERSION:
3807 if (tlv_len < sizeof(uint32_t))
3808 break;
3809
3810 sha1 = le32_to_cpup((const uint32_t *)data);
3811 break;
3812 case IWX_UCODE_TLV_HW_TYPE:
3813 if (tlv_len < 2 * sizeof(uint16_t))
3814 break;
3815
3816 if (hw_match)
3817 break;
3818
3819 mac_type = le16_to_cpup((const uint16_t *)data);
3820 rf_id = le16_to_cpup((const uint16_t *)(data +
3821 sizeof(uint16_t)));
3822
3823 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3824 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3825 hw_match = 1;
3826 break;
3827 case IWX_UCODE_TLV_SEC_RT: {
3828 const struct iwx_pnvm_section *section;
3829 uint32_t data_len;
3830
3831 section = (const void *)data;
3832 data_len = tlv_len - sizeof(*section);
3833
3834 /* TODO: remove, this is a deprecated separator */
3835 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3836 break;
3837
3838 tmp = malloc(size + data_len, M_DEVBUF,
3839 M_WAITOK | M_ZERO);
3840 if (tmp == NULL) {
3841 err = ENOMEM;
3842 goto out;
3843 }
3844 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3845 memcpy(tmp, pnvm_data, size);
3846 memcpy(tmp + size, section->data, data_len);
3847 free(pnvm_data, M_DEVBUF);
3848 pnvm_data = tmp;
3849 size += data_len;
3850 break;
3851 }
3852 case IWX_UCODE_TLV_PNVM_SKU:
3853 /* New PNVM section started, stop parsing. */
3854 goto done;
3855 default:
3856 break;
3857 }
3858
3859 if (roundup(tlv_len, 4) > len)
3860 break;
3861 len -= roundup(tlv_len, 4);
3862 data += roundup(tlv_len, 4);
3863 }
3864 done:
3865 if (!hw_match || size == 0) {
3866 err = ENOENT;
3867 goto out;
3868 }
3869
3870 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3871 if (err) {
3872 printf("%s: could not allocate DMA memory for PNVM\n",
3873 DEVNAME(sc));
3874 err = ENOMEM;
3875 goto out;
3876 }
3877 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3878 iwx_ctxt_info_gen3_set_pnvm(sc);
3879 sc->sc_pnvm_ver = sha1;
3880 out:
3881 free(pnvm_data, M_DEVBUF);
3882 return err;
3883 }
3884
3885 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3886 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3887 {
3888 const struct iwx_ucode_tlv *tlv;
3889
3890 while (len >= sizeof(*tlv)) {
3891 uint32_t tlv_len, tlv_type;
3892
3893 len -= sizeof(*tlv);
3894 tlv = (const void *)data;
3895
3896 tlv_len = le32toh(tlv->length);
3897 tlv_type = le32toh(tlv->type);
3898
3899 if (len < tlv_len || roundup(tlv_len, 4) > len)
3900 return EINVAL;
3901
3902 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3903 const struct iwx_sku_id *sku_id =
3904 (const void *)(data + sizeof(*tlv));
3905
3906 data += sizeof(*tlv) + roundup(tlv_len, 4);
3907 len -= roundup(tlv_len, 4);
3908
3909 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3910 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3911 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3912 iwx_pnvm_handle_section(sc, data, len) == 0)
3913 return 0;
3914 } else {
3915 data += sizeof(*tlv) + roundup(tlv_len, 4);
3916 len -= roundup(tlv_len, 4);
3917 }
3918 }
3919
3920 return ENOENT;
3921 }
3922
3923 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3924 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3925 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3926 {
3927 struct iwx_prph_scratch *prph_scratch;
3928 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3929
3930 prph_scratch = sc->prph_scratch_dma.vaddr;
3931 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3932
3933 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3934 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3935
3936 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3937 }
3938
3939 /*
3940 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3941 * This data apparently contains regulatory information and affects device
3942 * channel configuration.
3943 * The SKU of AX210 devices tells us which PNVM file section is needed.
3944 * Pre-AX210 devices store NVM data onboard.
3945 */
3946 static int
iwx_load_pnvm(struct iwx_softc * sc)3947 iwx_load_pnvm(struct iwx_softc *sc)
3948 {
3949 const int wait_flags = IWX_PNVM_COMPLETE;
3950 int err = 0;
3951 const struct firmware *pnvm;
3952
3953 if (sc->sc_sku_id[0] == 0 &&
3954 sc->sc_sku_id[1] == 0 &&
3955 sc->sc_sku_id[2] == 0)
3956 return 0;
3957
3958 if (sc->sc_pnvm_name) {
3959 if (sc->pnvm_dma.vaddr == NULL) {
3960 IWX_UNLOCK(sc);
3961 pnvm = firmware_get(sc->sc_pnvm_name);
3962 if (pnvm == NULL) {
3963 printf("%s: could not read %s (error %d)\n",
3964 DEVNAME(sc), sc->sc_pnvm_name, err);
3965 IWX_LOCK(sc);
3966 return EINVAL;
3967 }
3968 sc->sc_pnvm = pnvm;
3969
3970 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3971 IWX_LOCK(sc);
3972 if (err && err != ENOENT) {
3973 return EINVAL;
3974 }
3975 } else
3976 iwx_ctxt_info_gen3_set_pnvm(sc);
3977 }
3978
3979 if (!iwx_nic_lock(sc)) {
3980 return EBUSY;
3981 }
3982
3983 /*
3984 * If we don't have a platform NVM file simply ask firmware
3985 * to proceed without it.
3986 */
3987
3988 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3989 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3990
3991 /* Wait for the pnvm complete notification from firmware. */
3992 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3993 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3994 if (err)
3995 break;
3996 }
3997
3998 iwx_nic_unlock(sc);
3999
4000 return err;
4001 }
4002
4003 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)4004 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4005 {
4006 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4007 .valid = htole32(valid_tx_ant),
4008 };
4009
4010 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4011 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4012 }
4013
4014 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)4015 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4016 {
4017 struct iwx_phy_cfg_cmd phy_cfg_cmd;
4018
4019 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4020 phy_cfg_cmd.calib_control.event_trigger =
4021 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4022 phy_cfg_cmd.calib_control.flow_trigger =
4023 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4024
4025 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4026 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4027 }
4028
4029 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)4030 iwx_send_dqa_cmd(struct iwx_softc *sc)
4031 {
4032 struct iwx_dqa_enable_cmd dqa_cmd = {
4033 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4034 };
4035 uint32_t cmd_id;
4036
4037 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4038 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4039 }
4040
4041 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4042 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4043 {
4044 int err;
4045
4046 IWX_UNLOCK(sc);
4047 err = iwx_read_firmware(sc);
4048 IWX_LOCK(sc);
4049 if (err)
4050 return err;
4051
4052 err = iwx_start_fw(sc);
4053 if (err)
4054 return err;
4055
4056 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4057 err = iwx_load_pnvm(sc);
4058 if (err)
4059 return err;
4060 }
4061
4062 iwx_post_alive(sc);
4063
4064 return 0;
4065 }
4066
4067 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4068 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4069 {
4070 const int wait_flags = IWX_INIT_COMPLETE;
4071 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4072 struct iwx_init_extended_cfg_cmd init_cfg = {
4073 .init_flags = htole32(IWX_INIT_NVM),
4074 };
4075
4076 int err;
4077
4078 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4079 printf("%s: radio is disabled by hardware switch\n",
4080 DEVNAME(sc));
4081 return EPERM;
4082 }
4083
4084 sc->sc_init_complete = 0;
4085 err = iwx_load_ucode_wait_alive(sc);
4086 if (err) {
4087 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4088 "%s: failed to load init firmware\n", DEVNAME(sc));
4089 return err;
4090 } else {
4091 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4092 "%s: successfully loaded init firmware\n", __func__);
4093 }
4094
4095 /*
4096 * Send init config command to mark that we are sending NVM
4097 * access commands
4098 */
4099 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4100 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4101 if (err) {
4102 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4103 err);
4104 return err;
4105 }
4106
4107 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4108 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4109 if (err) {
4110 return err;
4111 }
4112
4113 /* Wait for the init complete notification from the firmware. */
4114 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4115 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4116 if (err) {
4117 DPRINTF(("%s: will return err=%d\n", __func__, err));
4118 return err;
4119 } else {
4120 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4121 __func__));
4122 }
4123 }
4124
4125 if (readnvm) {
4126 err = iwx_nvm_get(sc);
4127 DPRINTF(("%s: err=%d\n", __func__, err));
4128 if (err) {
4129 printf("%s: failed to read nvm (error %d)\n",
4130 DEVNAME(sc), err);
4131 return err;
4132 } else {
4133 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4134 }
4135 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4136 }
4137 return 0;
4138 }
4139
4140 static int
iwx_config_ltr(struct iwx_softc * sc)4141 iwx_config_ltr(struct iwx_softc *sc)
4142 {
4143 struct iwx_ltr_config_cmd cmd = {
4144 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4145 };
4146
4147 if (!sc->sc_ltr_enabled)
4148 return 0;
4149
4150 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4151 }
4152
4153 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4154 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4155 bus_dma_segment_t *seg)
4156 {
4157 struct iwx_rx_data *data = &ring->data[idx];
4158
4159 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4160 struct iwx_rx_transfer_desc *desc = ring->desc;
4161 desc[idx].rbid = htole16(idx & 0xffff);
4162 desc[idx].addr = htole64((*seg).ds_addr);
4163 bus_dmamap_sync(ring->data_dmat, data->map,
4164 BUS_DMASYNC_PREWRITE);
4165 } else {
4166 ((uint64_t *)ring->desc)[idx] =
4167 htole64((*seg).ds_addr);
4168 bus_dmamap_sync(ring->data_dmat, data->map,
4169 BUS_DMASYNC_PREWRITE);
4170 }
4171 }
4172
4173 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4174 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4175 {
4176 struct iwx_rx_ring *ring = &sc->rxq;
4177 struct iwx_rx_data *data = &ring->data[idx];
4178 struct mbuf *m;
4179 int err;
4180 int fatal = 0;
4181 bus_dma_segment_t seg;
4182 int nsegs;
4183
4184 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4185 if (m == NULL)
4186 return ENOBUFS;
4187
4188 if (data->m != NULL) {
4189 bus_dmamap_unload(ring->data_dmat, data->map);
4190 fatal = 1;
4191 }
4192
4193 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4194 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4195 &nsegs, BUS_DMA_NOWAIT);
4196 if (err) {
4197 /* XXX */
4198 if (fatal)
4199 panic("could not load RX mbuf");
4200 m_freem(m);
4201 return err;
4202 }
4203 data->m = m;
4204 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4205
4206 /* Update RX descriptor. */
4207 iwx_update_rx_desc(sc, ring, idx, &seg);
4208 return 0;
4209 }
4210
4211 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4212 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4213 struct iwx_rx_mpdu_desc *desc)
4214 {
4215 int energy_a, energy_b;
4216
4217 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4218 energy_a = desc->v3.energy_a;
4219 energy_b = desc->v3.energy_b;
4220 } else {
4221 energy_a = desc->v1.energy_a;
4222 energy_b = desc->v1.energy_b;
4223 }
4224 energy_a = energy_a ? -energy_a : -256;
4225 energy_b = energy_b ? -energy_b : -256;
4226 return MAX(energy_a, energy_b);
4227 }
4228
4229 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4230 iwx_rxmq_get_chains(struct iwx_softc *sc,
4231 struct iwx_rx_mpdu_desc *desc)
4232 {
4233
4234 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4235 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4236 IWX_RATE_MCS_ANT_POS);
4237 else
4238 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4239 IWX_RATE_MCS_ANT_POS);
4240 }
4241
4242 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4243 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4244 struct iwx_rx_data *data)
4245 {
4246 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4247 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4248 int qid = cmd_hdr->qid;
4249 struct iwx_tx_ring *ring = &sc->txq[qid];
4250
4251 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4252 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4253 }
4254
4255 /*
4256 * Retrieve the average noise (in dBm) among receivers.
4257 */
4258 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4259 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4260 {
4261 int i, total, nbant, noise;
4262
4263 total = nbant = noise = 0;
4264 for (i = 0; i < 3; i++) {
4265 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4266 if (noise) {
4267 total += noise;
4268 nbant++;
4269 }
4270 }
4271
4272 /* There should be at least one antenna but check anyway. */
4273 return (nbant == 0) ? -127 : (total / nbant) - 107;
4274 }
4275
4276 #if 0
4277 int
4278 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4279 struct ieee80211_rxinfo *rxi)
4280 {
4281 struct ieee80211com *ic = &sc->sc_ic;
4282 struct ieee80211_key *k;
4283 struct ieee80211_frame *wh;
4284 uint64_t pn, *prsc;
4285 uint8_t *ivp;
4286 uint8_t tid;
4287 int hdrlen, hasqos;
4288
4289 wh = mtod(m, struct ieee80211_frame *);
4290 hdrlen = ieee80211_get_hdrlen(wh);
4291 ivp = (uint8_t *)wh + hdrlen;
4292
4293 /* find key for decryption */
4294 k = ieee80211_get_rxkey(ic, m, ni);
4295 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4296 return 1;
4297
4298 /* Check that ExtIV bit is be set. */
4299 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4300 return 1;
4301
4302 hasqos = ieee80211_has_qos(wh);
4303 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4304 prsc = &k->k_rsc[tid];
4305
4306 /* Extract the 48-bit PN from the CCMP header. */
4307 pn = (uint64_t)ivp[0] |
4308 (uint64_t)ivp[1] << 8 |
4309 (uint64_t)ivp[4] << 16 |
4310 (uint64_t)ivp[5] << 24 |
4311 (uint64_t)ivp[6] << 32 |
4312 (uint64_t)ivp[7] << 40;
4313 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4314 if (pn < *prsc) {
4315 ic->ic_stats.is_ccmp_replays++;
4316 return 1;
4317 }
4318 } else if (pn <= *prsc) {
4319 ic->ic_stats.is_ccmp_replays++;
4320 return 1;
4321 }
4322 /* Last seen packet number is updated in ieee80211_inputm(). */
4323
4324 /*
4325 * Some firmware versions strip the MIC, and some don't. It is not
4326 * clear which of the capability flags could tell us what to expect.
4327 * For now, keep things simple and just leave the MIC in place if
4328 * it is present.
4329 *
4330 * The IV will be stripped by ieee80211_inputm().
4331 */
4332 return 0;
4333 }
4334 #endif
4335
4336 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4337 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4338 {
4339 struct ieee80211_frame *wh;
4340 int ret = 0;
4341 uint8_t type, subtype;
4342
4343 wh = mtod(m, struct ieee80211_frame *);
4344
4345 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4346 if (type == IEEE80211_FC0_TYPE_CTL) {
4347 return 0;
4348 }
4349
4350 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4351 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4352 return 0;
4353 }
4354
4355
4356 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4357 IEEE80211_FC0_TYPE_CTL)
4358 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4359 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4360 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4361 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4362 ret = 1;
4363 goto out;
4364 }
4365 /* Check whether decryption was successful or not. */
4366 if ((rx_pkt_status &
4367 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4368 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4369 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4370 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4371 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4372 ret = 1;
4373 goto out;
4374 }
4375 }
4376 out:
4377 return ret;
4378 }
4379
4380 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4381 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4382 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4383 uint32_t device_timestamp, uint8_t rssi)
4384 {
4385 struct ieee80211com *ic = &sc->sc_ic;
4386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4387 struct ieee80211_frame *wh;
4388 struct ieee80211_node *ni;
4389
4390 /*
4391 * We need to turn the hardware provided channel index into a channel
4392 * and then find it in our ic_channels array
4393 */
4394 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4395 /*
4396 * OpenBSD points this at the ibss chan, which it defaults to
4397 * channel 1 and then never touches again. Skip a step.
4398 */
4399 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4400 chanidx = 1;
4401 }
4402
4403 int channel = chanidx;
4404 for (int i = 0; i < ic->ic_nchans; i++) {
4405 if (ic->ic_channels[i].ic_ieee == channel) {
4406 chanidx = i;
4407 }
4408 }
4409 ic->ic_curchan = &ic->ic_channels[chanidx];
4410
4411 wh = mtod(m, struct ieee80211_frame *);
4412 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4413
4414 #if 0 /* XXX hw decrypt */
4415 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4416 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4417 m_freem(m);
4418 ieee80211_release_node(ic, ni);
4419 return;
4420 }
4421 #endif
4422 if (ieee80211_radiotap_active_vap(vap)) {
4423 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4424 uint16_t chan_flags;
4425 int have_legacy_rate = 1;
4426 uint8_t mcs, rate;
4427
4428 tap->wr_flags = 0;
4429 if (is_shortpre)
4430 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4431 tap->wr_chan_freq =
4432 htole16(ic->ic_channels[chanidx].ic_freq);
4433 chan_flags = ic->ic_channels[chanidx].ic_flags;
4434 #if 0
4435 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4436 ic->ic_curmode != IEEE80211_MODE_11AC) {
4437 chan_flags &= ~IEEE80211_CHAN_HT;
4438 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4439 }
4440 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4441 chan_flags &= ~IEEE80211_CHAN_VHT;
4442 #else
4443 chan_flags &= ~IEEE80211_CHAN_HT;
4444 #endif
4445 tap->wr_chan_flags = htole16(chan_flags);
4446 tap->wr_dbm_antsignal = rssi;
4447 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4448 tap->wr_tsft = device_timestamp;
4449
4450 if (sc->sc_rate_n_flags_version >= 2) {
4451 uint32_t mod_type = (rate_n_flags &
4452 IWX_RATE_MCS_MOD_TYPE_MSK);
4453 const struct ieee80211_rateset *rs = NULL;
4454 uint32_t ridx;
4455 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4456 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4457 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4458 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4459 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4460 rs = &ieee80211_std_rateset_11b;
4461 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4462 rs = &ieee80211_std_rateset_11a;
4463 if (rs && ridx < rs->rs_nrates) {
4464 rate = (rs->rs_rates[ridx] &
4465 IEEE80211_RATE_VAL);
4466 } else
4467 rate = 0;
4468 } else {
4469 have_legacy_rate = ((rate_n_flags &
4470 (IWX_RATE_MCS_HT_MSK_V1 |
4471 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4472 mcs = (rate_n_flags &
4473 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4474 IWX_RATE_HT_MCS_NSS_MSK_V1));
4475 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4476 }
4477 if (!have_legacy_rate) {
4478 tap->wr_rate = (0x80 | mcs);
4479 } else {
4480 switch (rate) {
4481 /* CCK rates. */
4482 case 10: tap->wr_rate = 2; break;
4483 case 20: tap->wr_rate = 4; break;
4484 case 55: tap->wr_rate = 11; break;
4485 case 110: tap->wr_rate = 22; break;
4486 /* OFDM rates. */
4487 case 0xd: tap->wr_rate = 12; break;
4488 case 0xf: tap->wr_rate = 18; break;
4489 case 0x5: tap->wr_rate = 24; break;
4490 case 0x7: tap->wr_rate = 36; break;
4491 case 0x9: tap->wr_rate = 48; break;
4492 case 0xb: tap->wr_rate = 72; break;
4493 case 0x1: tap->wr_rate = 96; break;
4494 case 0x3: tap->wr_rate = 108; break;
4495 /* Unknown rate: should not happen. */
4496 default: tap->wr_rate = 0;
4497 }
4498 // XXX hack - this needs rebased with the new rate stuff anyway
4499 tap->wr_rate = rate;
4500 }
4501 }
4502
4503 IWX_UNLOCK(sc);
4504 if (ni == NULL) {
4505 if (ieee80211_input_mimo_all(ic, m) == -1)
4506 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4507 } else {
4508
4509 if (ieee80211_input_mimo(ni, m) == -1)
4510 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4511 ieee80211_free_node(ni);
4512 }
4513 IWX_LOCK(sc);
4514 }
4515
4516 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4517 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4518 size_t maxlen)
4519 {
4520 struct ieee80211com *ic = &sc->sc_ic;
4521 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4522 struct ieee80211_node *ni = vap->iv_bss;
4523 struct ieee80211_key *k;
4524 struct ieee80211_rx_stats rxs;
4525 struct iwx_rx_mpdu_desc *desc;
4526 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4527 int rssi;
4528 uint8_t chanidx;
4529 uint16_t phy_info;
4530 size_t desc_size;
4531 int pad = 0;
4532
4533 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4534 desc_size = sizeof(*desc);
4535 else
4536 desc_size = IWX_RX_DESC_SIZE_V1;
4537
4538 if (maxlen < desc_size) {
4539 m_freem(m);
4540 return; /* drop */
4541 }
4542
4543 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4544
4545 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4546 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4547 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4548 m_freem(m);
4549 return; /* drop */
4550 }
4551
4552 len = le16toh(desc->mpdu_len);
4553 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4554 /* Allow control frames in monitor mode. */
4555 if (len < sizeof(struct ieee80211_frame_cts)) {
4556 m_freem(m);
4557 return;
4558 }
4559
4560 } else if (len < sizeof(struct ieee80211_frame)) {
4561 m_freem(m);
4562 return;
4563 }
4564 if (len > maxlen - desc_size) {
4565 m_freem(m);
4566 return;
4567 }
4568
4569 // TODO: arithmetic on a pointer to void is a GNU extension
4570 m->m_data = (char *)pktdata + desc_size;
4571 m->m_pkthdr.len = m->m_len = len;
4572
4573 /* Account for padding following the frame header. */
4574 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4575 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4576 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4577 if (type == IEEE80211_FC0_TYPE_CTL) {
4578 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4579 case IEEE80211_FC0_SUBTYPE_CTS:
4580 hdrlen = sizeof(struct ieee80211_frame_cts);
4581 break;
4582 case IEEE80211_FC0_SUBTYPE_ACK:
4583 hdrlen = sizeof(struct ieee80211_frame_ack);
4584 break;
4585 default:
4586 hdrlen = sizeof(struct ieee80211_frame_min);
4587 break;
4588 }
4589 } else
4590 hdrlen = ieee80211_hdrsize(wh);
4591
4592 if ((le16toh(desc->status) &
4593 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4594 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4595 // CCMP header length
4596 hdrlen += 8;
4597 }
4598
4599 memmove(m->m_data + 2, m->m_data, hdrlen);
4600 m_adj(m, 2);
4601
4602 }
4603
4604 if ((le16toh(desc->status) &
4605 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4606 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4607 pad = 1;
4608 }
4609
4610 /* If it's a HT node then perform re-order processing */
4611 if (ni->ni_flags & IEEE80211_NODE_HT)
4612 m->m_flags |= M_AMPDU;
4613
4614 /*
4615 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4616 * in place for each subframe. But it leaves the 'A-MSDU present'
4617 * bit set in the frame header. We need to clear this bit ourselves.
4618 * (XXX This workaround is not required on AX200/AX201 devices that
4619 * have been tested by me, but it's unclear when this problem was
4620 * fixed in the hardware. It definitely affects the 9k generation.
4621 * Leaving this in place for now since some 9k/AX200 hybrids seem
4622 * to exist that we may eventually add support for.)
4623 *
4624 * And we must allow the same CCMP PN for subframes following the
4625 * first subframe. Otherwise they would be discarded as replays.
4626 */
4627 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4628 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4629 uint8_t subframe_idx = (desc->amsdu_info &
4630 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4631 uint8_t *qos;
4632
4633 rxs.c_pktflags |= IEEE80211_RX_F_AMSDU;
4634 if (subframe_idx > 0)
4635 rxs.c_pktflags |= IEEE80211_RX_F_AMSDU_MORE;
4636
4637 /* XXX should keep driver statistics about this */
4638 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
4639 "%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__);
4640
4641 qos = ieee80211_getqos(wh);
4642 qos[0] &= ~IEEE80211_QOS_AMSDU;
4643 }
4644
4645 /*
4646 * Verify decryption before duplicate detection. The latter uses
4647 * the TID supplied in QoS frame headers and this TID is implicitly
4648 * verified as part of the CCMP nonce.
4649 */
4650 k = ieee80211_crypto_get_txkey(ni, m);
4651 if (k != NULL &&
4652 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4653 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4654 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4655 m_freem(m);
4656 return;
4657 }
4658
4659 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4660 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4661 chanidx = desc->v3.channel;
4662 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4663 } else {
4664 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4665 chanidx = desc->v1.channel;
4666 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4667 }
4668
4669 phy_info = le16toh(desc->phy_info);
4670
4671 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4672 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4673 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4674
4675 memset(&rxs, 0, sizeof(rxs));
4676 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4677 rxs.r_flags |= IEEE80211_R_BAND;
4678 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4679 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4680
4681 rxs.c_ieee = chanidx;
4682 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4683 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4684 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4685 rxs.c_rx_tsf = device_timestamp;
4686 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4687 if (rxs.c_chain != 0)
4688 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4689
4690 /* rssi is in 1/2db units */
4691 rxs.c_rssi = rssi * 2;
4692 rxs.c_nf = sc->sc_noise;
4693
4694 if (pad) {
4695 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4696 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4697 }
4698
4699 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4700 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4701 return;
4702 }
4703
4704 ieee80211_add_rx_params(m, &rxs);
4705
4706 #if 0
4707 if (iwx_rx_reorder(sc, m, chanidx, desc,
4708 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4709 rate_n_flags, device_timestamp, &rxi, ml))
4710 return;
4711 #endif
4712
4713 if (pad) {
4714 #define TRIM 8
4715 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4716 hdrlen = ieee80211_hdrsize(wh);
4717 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4718 m_adj(m, TRIM);
4719 #undef TRIM
4720 }
4721
4722 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4723 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4724 rate_n_flags, device_timestamp, rssi);
4725 }
4726
4727 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4728 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4729 {
4730 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4731 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4732 int i;
4733
4734 /* First TB is never cleared - it is bidirectional DMA data. */
4735 for (i = 1; i < num_tbs; i++) {
4736 struct iwx_tfh_tb *tb = &desc->tbs[i];
4737 memset(tb, 0, sizeof(*tb));
4738 }
4739 desc->num_tbs = htole16(1);
4740
4741 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4742 BUS_DMASYNC_PREWRITE);
4743 }
4744
4745 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4746 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4747 struct iwx_tx_data *txd)
4748 {
4749 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4750 bus_dmamap_unload(ring->data_dmat, txd->map);
4751
4752 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4753 txd->m = NULL;
4754 txd->in = NULL;
4755 }
4756
4757 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4758 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4759 {
4760 struct iwx_tx_data *txd;
4761
4762 while (ring->tail_hw != idx) {
4763 txd = &ring->data[ring->tail];
4764 if (txd->m != NULL) {
4765 iwx_clear_tx_desc(sc, ring, ring->tail);
4766 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4767 iwx_txd_done(sc, ring, txd);
4768 ring->queued--;
4769 if (ring->queued < 0)
4770 panic("caught negative queue count");
4771 }
4772 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4773 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4774 }
4775 }
4776
4777 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4778 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4779 struct iwx_rx_data *data)
4780 {
4781 struct ieee80211com *ic = &sc->sc_ic;
4782 struct ifnet *ifp = IC2IFP(ic);
4783 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4784 int qid = cmd_hdr->qid, status, txfail;
4785 struct iwx_tx_ring *ring = &sc->txq[qid];
4786 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4787 uint32_t ssn;
4788 uint32_t len = iwx_rx_packet_len(pkt);
4789 int idx = cmd_hdr->idx;
4790 struct iwx_tx_data *txd = &ring->data[idx];
4791 struct mbuf *m = txd->m;
4792
4793 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4794
4795 /* Sanity checks. */
4796 if (sizeof(*tx_resp) > len)
4797 return;
4798 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4799 return;
4800 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4801 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4802 return;
4803
4804 sc->sc_tx_timer[qid] = 0;
4805
4806 if (tx_resp->frame_count > 1) /* A-MPDU */
4807 return;
4808
4809 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4810 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4811 status != IWX_TX_STATUS_DIRECT_DONE);
4812
4813 #ifdef __not_yet__
4814 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4815 ieee80211_tx_complete(&in->in_ni, m, txfail);
4816 #else
4817 if (txfail)
4818 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4819 else {
4820 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4821 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4822 if (m->m_flags & M_MCAST)
4823 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4824 }
4825 #endif
4826 /*
4827 * On hardware supported by iwx(4) the SSN counter corresponds
4828 * to a Tx ring index rather than a sequence number.
4829 * Frames up to this index (non-inclusive) can now be freed.
4830 */
4831 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4832 ssn = le32toh(ssn);
4833 if (ssn < sc->max_tfd_queue_size) {
4834 iwx_txq_advance(sc, ring, ssn);
4835 iwx_clear_oactive(sc, ring);
4836 }
4837 }
4838
4839 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4840 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4841 {
4842 IWX_ASSERT_LOCKED(sc);
4843
4844 if (ring->queued < iwx_lomark) {
4845 sc->qfullmsk &= ~(1 << ring->qid);
4846 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4847 /*
4848 * Well, we're in interrupt context, but then again
4849 * I guess net80211 does all sorts of stunts in
4850 * interrupt context, so maybe this is no biggie.
4851 */
4852 iwx_start(sc);
4853 }
4854 }
4855 }
4856
4857 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4858 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4859 {
4860 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4861 struct ieee80211com *ic = &sc->sc_ic;
4862 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4863 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4864 struct ieee80211_node *ni = &in->in_ni;
4865 struct iwx_tx_ring *ring;
4866 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4867 int qid;
4868
4869 // if (ic->ic_state != IEEE80211_S_RUN)
4870 // return;
4871
4872 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4873 return;
4874
4875 if (ba_res->sta_id != IWX_STATION_ID)
4876 return;
4877
4878 in = (void *)ni;
4879
4880 tfd_cnt = le16toh(ba_res->tfd_cnt);
4881 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4882 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4883 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4884 sizeof(ba_res->tfd[0]) * tfd_cnt))
4885 return;
4886
4887 for (i = 0; i < tfd_cnt; i++) {
4888 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4889 uint8_t tid;
4890
4891 tid = ba_tfd->tid;
4892 if (tid >= nitems(sc->aggqid))
4893 continue;
4894
4895 qid = sc->aggqid[tid];
4896 if (qid != htole16(ba_tfd->q_num))
4897 continue;
4898
4899 ring = &sc->txq[qid];
4900
4901 #if 0
4902 ba = &ni->ni_tx_ba[tid];
4903 if (ba->ba_state != IEEE80211_BA_AGREED)
4904 continue;
4905 #endif
4906 idx = le16toh(ba_tfd->tfd_index);
4907 sc->sc_tx_timer[qid] = 0;
4908 iwx_txq_advance(sc, ring, idx);
4909 iwx_clear_oactive(sc, ring);
4910 }
4911 }
4912
4913 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4914 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4915 struct iwx_rx_data *data)
4916 {
4917 struct ieee80211com *ic = &sc->sc_ic;
4918 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4919 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4920 uint32_t missed;
4921
4922 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4923 (vap->iv_state != IEEE80211_S_RUN))
4924 return;
4925
4926 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4927 BUS_DMASYNC_POSTREAD);
4928
4929 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
4930 "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
4931 __func__,
4932 le32toh(mbn->mac_id),
4933 le32toh(mbn->consec_missed_beacons_since_last_rx),
4934 le32toh(mbn->consec_missed_beacons),
4935 le32toh(mbn->num_expected_beacons),
4936 le32toh(mbn->num_recvd_beacons));
4937
4938 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4939 if (missed > vap->iv_bmissthreshold) {
4940 ieee80211_beacon_miss(ic);
4941 }
4942 }
4943
4944 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4945 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4946 {
4947 struct iwx_binding_cmd cmd;
4948 struct ieee80211com *ic = &sc->sc_ic;
4949 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4950 struct iwx_vap *ivp = IWX_VAP(vap);
4951 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4952 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4953 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4954 uint32_t status;
4955
4956 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4957 panic("binding already added");
4958 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4959 panic("binding already removed");
4960
4961 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4962 return EINVAL;
4963
4964 memset(&cmd, 0, sizeof(cmd));
4965
4966 cmd.id_and_color
4967 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4968 cmd.action = htole32(action);
4969 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4970
4971 cmd.macs[0] = htole32(mac_id);
4972 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4973 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4974
4975 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4976 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4977 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4978 else
4979 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4980
4981 status = 0;
4982 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4983 &cmd, &status);
4984 if (err == 0 && status != 0)
4985 err = EIO;
4986
4987 return err;
4988 }
4989
4990 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4991 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4992 {
4993 int ctlchan = ieee80211_chan2ieee(ic, chan);
4994 int midpoint = chan->ic_vht_ch_freq1;
4995
4996 /*
4997 * The FW is expected to check the control channel position only
4998 * when in HT/VHT and the channel width is not 20MHz. Return
4999 * this value as the default one:
5000 */
5001 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5002
5003 switch (ctlchan - midpoint) {
5004 case -6:
5005 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5006 break;
5007 case -2:
5008 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5009 break;
5010 case 2:
5011 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5012 break;
5013 case 6:
5014 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5015 break;
5016 default:
5017 break;
5018 }
5019
5020 return pos;
5021 }
5022
5023 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)5024 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5025 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5026 uint8_t vht_chan_width, int cmdver)
5027 {
5028 struct ieee80211com *ic = &sc->sc_ic;
5029 struct iwx_phy_context_cmd_uhb cmd;
5030 uint8_t active_cnt, idle_cnt;
5031 struct ieee80211_channel *chan = ctxt->channel;
5032
5033 memset(&cmd, 0, sizeof(cmd));
5034 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5035 ctxt->color));
5036 cmd.action = htole32(action);
5037
5038 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
5039 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5040 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5041 else
5042 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5043
5044 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5045 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5046 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5047
5048 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5049 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5050 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5051 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5052 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5053 if (IEEE80211_IS_CHAN_HT40D(chan))
5054 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5055 else
5056 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5057 } else {
5058 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5059 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5060 }
5061
5062 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5063 IWX_RLC_CONFIG_CMD) != 2) {
5064 idle_cnt = chains_static;
5065 active_cnt = chains_dynamic;
5066 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5067 IWX_PHY_RX_CHAIN_VALID_POS);
5068 cmd.rxchain_info |= htole32(idle_cnt <<
5069 IWX_PHY_RX_CHAIN_CNT_POS);
5070 cmd.rxchain_info |= htole32(active_cnt <<
5071 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5072 }
5073
5074 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5075 }
5076
5077 #if 0
5078 int
5079 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5080 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5081 uint8_t vht_chan_width, int cmdver)
5082 {
5083 struct ieee80211com *ic = &sc->sc_ic;
5084 struct iwx_phy_context_cmd cmd;
5085 uint8_t active_cnt, idle_cnt;
5086 struct ieee80211_channel *chan = ctxt->channel;
5087
5088 memset(&cmd, 0, sizeof(cmd));
5089 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5090 ctxt->color));
5091 cmd.action = htole32(action);
5092
5093 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5094 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5095 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5096 else
5097 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5098
5099 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5100 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5101 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5102 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5103 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5104 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5105 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5106 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5107 /* secondary chan above -> control chan below */
5108 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5109 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5110 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5111 /* secondary chan below -> control chan above */
5112 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5113 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5114 } else {
5115 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5116 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5117 }
5118 } else {
5119 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5120 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5121 }
5122
5123 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5124 IWX_RLC_CONFIG_CMD) != 2) {
5125 idle_cnt = chains_static;
5126 active_cnt = chains_dynamic;
5127 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5128 IWX_PHY_RX_CHAIN_VALID_POS);
5129 cmd.rxchain_info |= htole32(idle_cnt <<
5130 IWX_PHY_RX_CHAIN_CNT_POS);
5131 cmd.rxchain_info |= htole32(active_cnt <<
5132 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5133 }
5134
5135 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5136 }
5137 #endif
5138
5139 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5140 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5141 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5142 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5143 {
5144 int cmdver;
5145
5146 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5147 if (cmdver != 3 && cmdver != 4) {
5148 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5149 DEVNAME(sc));
5150 return ENOTSUP;
5151 }
5152
5153 /*
5154 * Intel increased the size of the fw_channel_info struct and neglected
5155 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5156 * member in the middle.
5157 * To keep things simple we use a separate function to handle the larger
5158 * variant of the phy context command.
5159 */
5160 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5161 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5162 chains_dynamic, action, sco, vht_chan_width, cmdver);
5163 } else
5164 panic("Unsupported old hardware contact thj@");
5165
5166 #if 0
5167 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5168 action, sco, vht_chan_width, cmdver);
5169 #endif
5170 }
5171
5172 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5173 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5174 {
5175 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5176 struct iwx_tfh_tfd *desc;
5177 struct iwx_tx_data *txdata;
5178 struct iwx_device_cmd *cmd;
5179 struct mbuf *m;
5180 bus_addr_t paddr;
5181 uint64_t addr;
5182 int err = 0, i, paylen, off/*, s*/;
5183 int idx, code, async, group_id;
5184 size_t hdrlen, datasz;
5185 uint8_t *data;
5186 int generation = sc->sc_generation;
5187 bus_dma_segment_t seg[10];
5188 int nsegs;
5189
5190 code = hcmd->id;
5191 async = hcmd->flags & IWX_CMD_ASYNC;
5192 idx = ring->cur;
5193
5194 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5195 paylen += hcmd->len[i];
5196 }
5197
5198 /* If this command waits for a response, allocate response buffer. */
5199 hcmd->resp_pkt = NULL;
5200 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5201 uint8_t *resp_buf;
5202 KASSERT(!async, ("async command want response"));
5203 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5204 ("wrong pkt len 1"));
5205 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5206 ("wrong pkt len 2"));
5207 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5208 return ENOSPC;
5209 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5210 M_NOWAIT | M_ZERO);
5211 if (resp_buf == NULL)
5212 return ENOMEM;
5213 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5214 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5215 } else {
5216 sc->sc_cmd_resp_pkt[idx] = NULL;
5217 }
5218
5219 desc = &ring->desc[idx];
5220 txdata = &ring->data[idx];
5221
5222 /*
5223 * XXX Intel inside (tm)
5224 * Firmware API versions >= 50 reject old-style commands in
5225 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5226 * that such commands were in the LONG_GROUP instead in order
5227 * for firmware to accept them.
5228 */
5229 if (iwx_cmd_groupid(code) == 0) {
5230 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5231 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5232 } else
5233 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5234
5235 group_id = iwx_cmd_groupid(code);
5236
5237 hdrlen = sizeof(cmd->hdr_wide);
5238 datasz = sizeof(cmd->data_wide);
5239
5240 if (paylen > datasz) {
5241 /* Command is too large to fit in pre-allocated space. */
5242 size_t totlen = hdrlen + paylen;
5243 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5244 printf("%s: firmware command too long (%zd bytes)\n",
5245 DEVNAME(sc), totlen);
5246 err = EINVAL;
5247 goto out;
5248 }
5249 if (totlen > IWX_RBUF_SIZE)
5250 panic("totlen > IWX_RBUF_SIZE");
5251 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5252 if (m == NULL) {
5253 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5254 DEVNAME(sc), IWX_RBUF_SIZE);
5255 err = ENOMEM;
5256 goto out;
5257 }
5258 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5259 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5260 seg, &nsegs, BUS_DMA_NOWAIT);
5261 if (nsegs > 20)
5262 panic("nsegs > 20");
5263 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5264 if (err) {
5265 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5266 DEVNAME(sc), totlen);
5267 m_freem(m);
5268 goto out;
5269 }
5270 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5271 cmd = mtod(m, struct iwx_device_cmd *);
5272 paddr = seg[0].ds_addr;
5273 } else {
5274 cmd = &ring->cmd[idx];
5275 paddr = txdata->cmd_paddr;
5276 }
5277
5278 memset(cmd, 0, sizeof(*cmd));
5279 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5280 cmd->hdr_wide.group_id = group_id;
5281 cmd->hdr_wide.qid = ring->qid;
5282 cmd->hdr_wide.idx = idx;
5283 cmd->hdr_wide.length = htole16(paylen);
5284 cmd->hdr_wide.version = iwx_cmd_version(code);
5285 data = cmd->data_wide;
5286
5287 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5288 if (hcmd->len[i] == 0)
5289 continue;
5290 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5291 off += hcmd->len[i];
5292 }
5293 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5294
5295 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5296 addr = htole64(paddr);
5297 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5298 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5299 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5300 paylen));
5301 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5302 IWX_FIRST_TB_SIZE);
5303 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5304 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5305 desc->num_tbs = htole16(2);
5306 } else
5307 desc->num_tbs = htole16(1);
5308
5309 if (paylen > datasz) {
5310 bus_dmamap_sync(ring->data_dmat, txdata->map,
5311 BUS_DMASYNC_PREWRITE);
5312 } else {
5313 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5314 BUS_DMASYNC_PREWRITE);
5315 }
5316 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5317 BUS_DMASYNC_PREWRITE);
5318
5319 /* Kick command ring. */
5320 ring->queued++;
5321 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5322 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5323 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5324 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5325
5326 if (!async) {
5327 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5328 if (err == 0) {
5329 /* if hardware is no longer up, return error */
5330 if (generation != sc->sc_generation) {
5331 err = ENXIO;
5332 goto out;
5333 }
5334
5335 /* Response buffer will be freed in iwx_free_resp(). */
5336 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5337 sc->sc_cmd_resp_pkt[idx] = NULL;
5338 } else if (generation == sc->sc_generation) {
5339 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5340 sc->sc_cmd_resp_pkt[idx] = NULL;
5341 }
5342 }
5343 out:
5344 return err;
5345 }
5346
5347 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5348 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5349 uint16_t len, const void *data)
5350 {
5351 struct iwx_host_cmd cmd = {
5352 .id = id,
5353 .len = { len, },
5354 .data = { data, },
5355 .flags = flags,
5356 };
5357
5358 return iwx_send_cmd(sc, &cmd);
5359 }
5360
5361 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5362 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5363 uint32_t *status)
5364 {
5365 struct iwx_rx_packet *pkt;
5366 struct iwx_cmd_response *resp;
5367 int err, resp_len;
5368
5369 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5370 cmd->flags |= IWX_CMD_WANT_RESP;
5371 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5372
5373 err = iwx_send_cmd(sc, cmd);
5374 if (err)
5375 return err;
5376
5377 pkt = cmd->resp_pkt;
5378 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5379 return EIO;
5380
5381 resp_len = iwx_rx_packet_payload_len(pkt);
5382 if (resp_len != sizeof(*resp)) {
5383 iwx_free_resp(sc, cmd);
5384 return EIO;
5385 }
5386
5387 resp = (void *)pkt->data;
5388 *status = le32toh(resp->status);
5389 iwx_free_resp(sc, cmd);
5390 return err;
5391 }
5392
5393 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5394 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5395 const void *data, uint32_t *status)
5396 {
5397 struct iwx_host_cmd cmd = {
5398 .id = id,
5399 .len = { len, },
5400 .data = { data, },
5401 };
5402
5403 return iwx_send_cmd_status(sc, &cmd, status);
5404 }
5405
5406 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5407 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5408 {
5409 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5410 ("hcmd flags !IWX_CMD_WANT_RESP"));
5411 free(hcmd->resp_pkt, M_DEVBUF);
5412 hcmd->resp_pkt = NULL;
5413 }
5414
5415 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5416 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5417 {
5418 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5419 struct iwx_tx_data *data;
5420
5421 if (qid != IWX_DQA_CMD_QUEUE) {
5422 return; /* Not a command ack. */
5423 }
5424
5425 data = &ring->data[idx];
5426
5427 if (data->m != NULL) {
5428 bus_dmamap_sync(ring->data_dmat, data->map,
5429 BUS_DMASYNC_POSTWRITE);
5430 bus_dmamap_unload(ring->data_dmat, data->map);
5431 m_freem(data->m);
5432 data->m = NULL;
5433 }
5434 wakeup(&ring->desc[idx]);
5435
5436 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5437 if (ring->queued == 0) {
5438 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5439 DEVNAME(sc), code));
5440 } else if (ring->queued > 0)
5441 ring->queued--;
5442 }
5443
5444 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5445 iwx_fw_rateidx_ofdm(uint8_t rval)
5446 {
5447 /* Firmware expects indices which match our 11a rate set. */
5448 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5449 int i;
5450
5451 for (i = 0; i < rs->rs_nrates; i++) {
5452 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5453 return i;
5454 }
5455
5456 return 0;
5457 }
5458
5459 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5460 iwx_fw_rateidx_cck(uint8_t rval)
5461 {
5462 /* Firmware expects indices which match our 11b rate set. */
5463 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5464 int i;
5465
5466 for (i = 0; i < rs->rs_nrates; i++) {
5467 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5468 return i;
5469 }
5470
5471 return 0;
5472 }
5473
5474 static int
iwx_min_basic_rate(struct ieee80211com * ic)5475 iwx_min_basic_rate(struct ieee80211com *ic)
5476 {
5477 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5478 struct ieee80211_node *ni = vap->iv_bss;
5479 struct ieee80211_rateset *rs = &ni->ni_rates;
5480 struct ieee80211_channel *c = ni->ni_chan;
5481 int i, min, rval;
5482
5483 min = -1;
5484
5485 if (c == IEEE80211_CHAN_ANYC) {
5486 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5487 return -1;
5488 }
5489
5490 for (i = 0; i < rs->rs_nrates; i++) {
5491 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5492 continue;
5493 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5494 if (min == -1)
5495 min = rval;
5496 else if (rval < min)
5497 min = rval;
5498 }
5499
5500 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5501 if (min == -1)
5502 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5503
5504 return min;
5505 }
5506
5507 /*
5508 * Determine the Tx command flags and Tx rate+flags to use.
5509 * Return the selected Tx rate.
5510 */
5511 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5512 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5513 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5514 struct mbuf *m)
5515 {
5516 struct ieee80211com *ic = &sc->sc_ic;
5517 struct ieee80211_node *ni = &in->in_ni;
5518 struct ieee80211_rateset *rs = &ni->ni_rates;
5519 const struct iwx_rate *rinfo = NULL;
5520 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5521 int ridx = iwx_min_basic_rate(ic);
5522 int min_ridx, rate_flags;
5523 uint8_t rval;
5524
5525 /* We're in the process of clearing the node, no channel already */
5526 if (ridx == -1)
5527 return NULL;
5528
5529 min_ridx = iwx_rval2ridx(ridx);
5530
5531 *flags = 0;
5532
5533 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5534 type != IEEE80211_FC0_TYPE_DATA) {
5535 /* for non-data, use the lowest supported rate */
5536 ridx = min_ridx;
5537 *flags |= IWX_TX_FLAGS_CMD_RATE;
5538 } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
5539 /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
5540 ridx = iwx_min_basic_rate(ic);
5541 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5542 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5543 & ~IEEE80211_RATE_MCS];
5544 } else {
5545 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5546 & IEEE80211_RATE_VAL);
5547 ridx = iwx_rval2ridx(rval);
5548 if (ridx < min_ridx)
5549 ridx = min_ridx;
5550 }
5551
5552 if (m->m_flags & M_EAPOL)
5553 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5554
5555 rinfo = &iwx_rates[ridx];
5556
5557 /*
5558 * Do not fill rate_n_flags if firmware controls the Tx rate.
5559 * For data frames we rely on Tx rate scaling in firmware by default.
5560 */
5561 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5562 *rate_n_flags = 0;
5563 return rinfo;
5564 }
5565
5566 /*
5567 * Forcing a CCK/OFDM legacy rate is important for management frames.
5568 * Association will only succeed if we do this correctly.
5569 */
5570
5571 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5572 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5573 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5574 if (IWX_RIDX_IS_CCK(ridx)) {
5575 if (sc->sc_rate_n_flags_version >= 2)
5576 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5577 else
5578 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5579 } else if (sc->sc_rate_n_flags_version >= 2)
5580 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5581
5582 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5583 & IEEE80211_RATE_VAL);
5584 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5585 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5586
5587 if (sc->sc_rate_n_flags_version >= 2) {
5588 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5589 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5590 IWX_RATE_LEGACY_RATE_MSK);
5591 } else {
5592 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5593 IWX_RATE_LEGACY_RATE_MSK);
5594 }
5595 } else
5596 rate_flags |= rinfo->plcp;
5597
5598 *rate_n_flags = rate_flags;
5599 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5600 __func__, __LINE__,*flags);
5601 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5602 __func__, __LINE__, *rate_n_flags);
5603
5604 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5605 print_ratenflags(__func__, __LINE__,
5606 *rate_n_flags, sc->sc_rate_n_flags_version);
5607
5608 return rinfo;
5609 }
5610
5611 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5612 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5613 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5614 {
5615 uint8_t filled_tfd_size, num_fetch_chunks;
5616 uint16_t len = byte_cnt;
5617 uint16_t bc_ent;
5618
5619 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5620 num_tbs * sizeof(struct iwx_tfh_tb);
5621 /*
5622 * filled_tfd_size contains the number of filled bytes in the TFD.
5623 * Dividing it by 64 will give the number of chunks to fetch
5624 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5625 * If, for example, TFD contains only 3 TBs then 32 bytes
5626 * of the TFD are used, and only one chunk of 64 bytes should
5627 * be fetched
5628 */
5629 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5630
5631 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5632 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5633 /* Starting from AX210, the HW expects bytes */
5634 bc_ent = htole16(len | (num_fetch_chunks << 14));
5635 scd_bc_tbl[idx].tfd_offset = bc_ent;
5636 } else {
5637 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5638 /* Before AX210, the HW expects DW */
5639 len = howmany(len, 4);
5640 bc_ent = htole16(len | (num_fetch_chunks << 12));
5641 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5642 }
5643
5644 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5645 }
5646
5647 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5648 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5649 {
5650 struct ieee80211com *ic = &sc->sc_ic;
5651 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5652 struct iwx_node *in = (void *)ni;
5653 struct iwx_tx_ring *ring;
5654 struct iwx_tx_data *data;
5655 struct iwx_tfh_tfd *desc;
5656 struct iwx_device_cmd *cmd;
5657 struct ieee80211_frame *wh;
5658 struct ieee80211_key *k = NULL;
5659 const struct iwx_rate *rinfo;
5660 uint64_t paddr;
5661 u_int hdrlen;
5662 uint32_t rate_n_flags;
5663 uint16_t num_tbs, flags, offload_assist = 0;
5664 int i, totlen, err, pad, qid;
5665 #define IWM_MAX_SCATTER 20
5666 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5667 int nsegs;
5668 struct mbuf *m1;
5669 size_t txcmd_size;
5670
5671 IWX_ASSERT_LOCKED(sc);
5672
5673 wh = mtod(m, struct ieee80211_frame *);
5674 hdrlen = ieee80211_anyhdrsize(wh);
5675
5676 qid = sc->first_data_qid;
5677
5678 /* Put QoS frames on the data queue which maps to their TID. */
5679 if (IEEE80211_QOS_HAS_SEQ(wh)) {
5680 uint16_t qos = ieee80211_gettid(wh);
5681 uint8_t tid = qos & IEEE80211_QOS_TID;
5682 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
5683
5684 /*
5685 * Note: we're currently putting all frames into one queue
5686 * except for A-MPDU queues. We should be able to choose
5687 * other WME queues but first we need to verify they've been
5688 * correctly setup for data.
5689 */
5690
5691 /*
5692 * Only QoS data goes into an A-MPDU queue;
5693 * don't add QoS null, the other data types, etc.
5694 */
5695 if (IEEE80211_AMPDU_RUNNING(tap) &&
5696 IEEE80211_IS_QOSDATA(wh) &&
5697 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5698 sc->aggqid[tid] != 0) {
5699 qid = sc->aggqid[tid];
5700 }
5701 }
5702
5703 ring = &sc->txq[qid];
5704 desc = &ring->desc[ring->cur];
5705 memset(desc, 0, sizeof(*desc));
5706 data = &ring->data[ring->cur];
5707
5708 cmd = &ring->cmd[ring->cur];
5709 cmd->hdr.code = IWX_TX_CMD;
5710 cmd->hdr.flags = 0;
5711 cmd->hdr.qid = ring->qid;
5712 cmd->hdr.idx = ring->cur;
5713
5714 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5715 if (rinfo == NULL)
5716 return EINVAL;
5717
5718 /* Offloaded sequence number assignment; non-AMPDU case */
5719 if ((m->m_flags & M_AMPDU_MPDU) == 0)
5720 ieee80211_output_seqno_assign(ni, -1, m);
5721
5722 /* Radiotap */
5723 if (ieee80211_radiotap_active_vap(vap)) {
5724 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5725
5726 tap->wt_flags = 0;
5727 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5728 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5729 tap->wt_rate = rinfo->rate;
5730 if (k != NULL)
5731 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5732 ieee80211_radiotap_tx(vap, m);
5733 }
5734
5735 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
5736 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5737 k = ieee80211_crypto_get_txkey(ni, m);
5738 if (k == NULL) {
5739 printf("%s: k is NULL!\n", __func__);
5740 m_freem(m);
5741 return (ENOBUFS);
5742 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5743 k->wk_keytsc++;
5744 } else {
5745 k->wk_cipher->ic_encap(k, m);
5746
5747 /* 802.11 headers may have moved */
5748 wh = mtod(m, struct ieee80211_frame *);
5749 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5750 }
5751 } else
5752 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5753
5754 totlen = m->m_pkthdr.len;
5755
5756 if (hdrlen & 3) {
5757 /* First segment length must be a multiple of 4. */
5758 pad = 4 - (hdrlen & 3);
5759 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5760 } else
5761 pad = 0;
5762
5763 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5764 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5765 memset(tx, 0, sizeof(*tx));
5766 tx->len = htole16(totlen);
5767 tx->offload_assist = htole32(offload_assist);
5768 tx->flags = htole16(flags);
5769 tx->rate_n_flags = htole32(rate_n_flags);
5770 memcpy(tx->hdr, wh, hdrlen);
5771 txcmd_size = sizeof(*tx);
5772 } else {
5773 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5774 memset(tx, 0, sizeof(*tx));
5775 tx->len = htole16(totlen);
5776 tx->offload_assist = htole16(offload_assist);
5777 tx->flags = htole32(flags);
5778 tx->rate_n_flags = htole32(rate_n_flags);
5779 memcpy(tx->hdr, wh, hdrlen);
5780 txcmd_size = sizeof(*tx);
5781 }
5782
5783 /* Trim 802.11 header. */
5784 m_adj(m, hdrlen);
5785
5786 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5787 &nsegs, BUS_DMA_NOWAIT);
5788 if (err && err != EFBIG) {
5789 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5790 m_freem(m);
5791 return err;
5792 }
5793 if (err) {
5794 /* Too many DMA segments, linearize mbuf. */
5795 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5796 if (m1 == NULL) {
5797 printf("%s: could not defrag mbufs\n", __func__);
5798 m_freem(m);
5799 return (ENOBUFS);
5800 }
5801 m = m1;
5802 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5803 segs, &nsegs, BUS_DMA_NOWAIT);
5804 if (err) {
5805 printf("%s: can't map mbuf (error %d)\n", __func__,
5806 err);
5807 m_freem(m);
5808 return (err);
5809 }
5810 }
5811 data->m = m;
5812 data->in = in;
5813
5814 /* Fill TX descriptor. */
5815 num_tbs = 2 + nsegs;
5816 desc->num_tbs = htole16(num_tbs);
5817
5818 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5819 paddr = htole64(data->cmd_paddr);
5820 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5821 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5822 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5823 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5824 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5825 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5826 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5827
5828 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5829 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5830
5831 /* Other DMA segments are for data payload. */
5832 for (i = 0; i < nsegs; i++) {
5833 seg = &segs[i];
5834 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5835 paddr = htole64(seg->ds_addr);
5836 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5837 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5838 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5839 }
5840
5841 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5842 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5843 BUS_DMASYNC_PREWRITE);
5844 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5845 BUS_DMASYNC_PREWRITE);
5846
5847 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5848
5849 /* Kick TX ring. */
5850 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5851 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5852 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5853
5854 /* Mark TX ring as full if we reach a certain threshold. */
5855 if (++ring->queued > iwx_himark) {
5856 sc->qfullmsk |= 1 << ring->qid;
5857 }
5858
5859 sc->sc_tx_timer[ring->qid] = 15;
5860
5861 return 0;
5862 }
5863
5864 static int
iwx_flush_sta_tids(struct iwx_softc * sc,int sta_id,uint16_t tids)5865 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5866 {
5867 struct iwx_rx_packet *pkt;
5868 struct iwx_tx_path_flush_cmd_rsp *resp;
5869 struct iwx_tx_path_flush_cmd flush_cmd = {
5870 .sta_id = htole32(sta_id),
5871 .tid_mask = htole16(tids),
5872 };
5873 struct iwx_host_cmd hcmd = {
5874 .id = IWX_TXPATH_FLUSH,
5875 .len = { sizeof(flush_cmd), },
5876 .data = { &flush_cmd, },
5877 .flags = IWX_CMD_WANT_RESP,
5878 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5879 };
5880 int err, resp_len, i, num_flushed_queues;
5881
5882 err = iwx_send_cmd(sc, &hcmd);
5883 if (err)
5884 return err;
5885
5886 pkt = hcmd.resp_pkt;
5887 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5888 err = EIO;
5889 goto out;
5890 }
5891
5892 resp_len = iwx_rx_packet_payload_len(pkt);
5893 /* Some firmware versions don't provide a response. */
5894 if (resp_len == 0)
5895 goto out;
5896 else if (resp_len != sizeof(*resp)) {
5897 err = EIO;
5898 goto out;
5899 }
5900
5901 resp = (void *)pkt->data;
5902
5903 if (le16toh(resp->sta_id) != sta_id) {
5904 err = EIO;
5905 goto out;
5906 }
5907
5908 num_flushed_queues = le16toh(resp->num_flushed_queues);
5909 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5910 err = EIO;
5911 goto out;
5912 }
5913
5914 for (i = 0; i < num_flushed_queues; i++) {
5915 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5916 uint16_t tid = le16toh(queue_info->tid);
5917 uint16_t read_after = le16toh(queue_info->read_after_flush);
5918 uint16_t qid = le16toh(queue_info->queue_num);
5919 struct iwx_tx_ring *txq;
5920
5921 if (qid >= nitems(sc->txq))
5922 continue;
5923
5924 txq = &sc->txq[qid];
5925 if (tid != txq->tid)
5926 continue;
5927
5928 iwx_txq_advance(sc, txq, read_after);
5929 }
5930 out:
5931 iwx_free_resp(sc, &hcmd);
5932 return err;
5933 }
5934
5935 #define IWX_FLUSH_WAIT_MS 2000
5936
5937 static int
iwx_drain_sta(struct iwx_softc * sc,struct iwx_node * in,int drain)5938 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5939 {
5940 struct iwx_add_sta_cmd cmd;
5941 int err;
5942 uint32_t status;
5943
5944 memset(&cmd, 0, sizeof(cmd));
5945 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5946 in->in_color));
5947 cmd.sta_id = IWX_STATION_ID;
5948 cmd.add_modify = IWX_STA_MODE_MODIFY;
5949 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5950 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5951
5952 status = IWX_ADD_STA_SUCCESS;
5953 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5954 sizeof(cmd), &cmd, &status);
5955 if (err) {
5956 printf("%s: could not update sta (error %d)\n",
5957 DEVNAME(sc), err);
5958 return err;
5959 }
5960
5961 switch (status & IWX_ADD_STA_STATUS_MASK) {
5962 case IWX_ADD_STA_SUCCESS:
5963 break;
5964 default:
5965 err = EIO;
5966 printf("%s: Couldn't %s draining for station\n",
5967 DEVNAME(sc), drain ? "enable" : "disable");
5968 break;
5969 }
5970
5971 return err;
5972 }
5973
5974 static int
iwx_flush_sta(struct iwx_softc * sc,struct iwx_node * in)5975 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5976 {
5977 int err;
5978
5979 IWX_ASSERT_LOCKED(sc);
5980
5981 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5982
5983 err = iwx_drain_sta(sc, in, 1);
5984 if (err)
5985 goto done;
5986
5987 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5988 if (err) {
5989 printf("%s: could not flush Tx path (error %d)\n",
5990 DEVNAME(sc), err);
5991 goto done;
5992 }
5993
5994 /*
5995 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
5996 * fc drive rand has has been replaced in OpenBSD.
5997 */
5998
5999 err = iwx_drain_sta(sc, in, 0);
6000 done:
6001 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6002 return err;
6003 }
6004
6005 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
6006
6007 static int
iwx_beacon_filter_send_cmd(struct iwx_softc * sc,struct iwx_beacon_filter_cmd * cmd)6008 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6009 struct iwx_beacon_filter_cmd *cmd)
6010 {
6011 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6012 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6013 }
6014
6015 static int
iwx_update_beacon_abort(struct iwx_softc * sc,struct iwx_node * in,int enable)6016 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6017 {
6018 struct iwx_beacon_filter_cmd cmd = {
6019 IWX_BF_CMD_CONFIG_DEFAULTS,
6020 .bf_enable_beacon_filter = htole32(1),
6021 .ba_enable_beacon_abort = htole32(enable),
6022 };
6023
6024 if (!sc->sc_bf.bf_enabled)
6025 return 0;
6026
6027 sc->sc_bf.ba_enabled = enable;
6028 return iwx_beacon_filter_send_cmd(sc, &cmd);
6029 }
6030
6031 static void
iwx_power_build_cmd(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_power_cmd * cmd)6032 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6033 struct iwx_mac_power_cmd *cmd)
6034 {
6035 struct ieee80211com *ic = &sc->sc_ic;
6036 struct ieee80211_node *ni = &in->in_ni;
6037 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6038 int dtim_period, dtim_msec, keep_alive;
6039
6040 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6041 in->in_color));
6042 if (vap->iv_dtim_period)
6043 dtim_period = vap->iv_dtim_period;
6044 else
6045 dtim_period = 1;
6046
6047 /*
6048 * Regardless of power management state the driver must set
6049 * keep alive period. FW will use it for sending keep alive NDPs
6050 * immediately after association. Check that keep alive period
6051 * is at least 3 * DTIM.
6052 */
6053 dtim_msec = dtim_period * ni->ni_intval;
6054 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6055 keep_alive = roundup(keep_alive, 1000) / 1000;
6056 cmd->keep_alive_seconds = htole16(keep_alive);
6057
6058 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6059 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6060 }
6061
6062 static int
iwx_power_mac_update_mode(struct iwx_softc * sc,struct iwx_node * in)6063 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6064 {
6065 int err;
6066 int ba_enable;
6067 struct iwx_mac_power_cmd cmd;
6068
6069 memset(&cmd, 0, sizeof(cmd));
6070
6071 iwx_power_build_cmd(sc, in, &cmd);
6072
6073 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6074 sizeof(cmd), &cmd);
6075 if (err != 0)
6076 return err;
6077
6078 ba_enable = !!(cmd.flags &
6079 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6080 return iwx_update_beacon_abort(sc, in, ba_enable);
6081 }
6082
6083 static int
iwx_power_update_device(struct iwx_softc * sc)6084 iwx_power_update_device(struct iwx_softc *sc)
6085 {
6086 struct iwx_device_power_cmd cmd = { };
6087 struct ieee80211com *ic = &sc->sc_ic;
6088
6089 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6090 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6091
6092 return iwx_send_cmd_pdu(sc,
6093 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6094 }
6095 #if 0
6096 static int
6097 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6098 {
6099 struct iwx_beacon_filter_cmd cmd = {
6100 IWX_BF_CMD_CONFIG_DEFAULTS,
6101 .bf_enable_beacon_filter = htole32(1),
6102 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6103 };
6104 int err;
6105
6106 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6107 if (err == 0)
6108 sc->sc_bf.bf_enabled = 1;
6109
6110 return err;
6111 }
6112 #endif
6113 static int
iwx_disable_beacon_filter(struct iwx_softc * sc)6114 iwx_disable_beacon_filter(struct iwx_softc *sc)
6115 {
6116 struct iwx_beacon_filter_cmd cmd;
6117 int err;
6118
6119 memset(&cmd, 0, sizeof(cmd));
6120
6121 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6122 if (err == 0)
6123 sc->sc_bf.bf_enabled = 0;
6124
6125 return err;
6126 }
6127
6128 static int
iwx_add_sta_cmd(struct iwx_softc * sc,struct iwx_node * in,int update)6129 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6130 {
6131 struct iwx_add_sta_cmd add_sta_cmd;
6132 int err, i;
6133 uint32_t status, aggsize;
6134 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6135 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6136 struct ieee80211com *ic = &sc->sc_ic;
6137 struct ieee80211_node *ni = &in->in_ni;
6138 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6139
6140 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6141 panic("STA already added");
6142
6143 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6144
6145 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6146 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6147 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6148 } else {
6149 add_sta_cmd.sta_id = IWX_STATION_ID;
6150 add_sta_cmd.station_type = IWX_STA_LINK;
6151 }
6152 add_sta_cmd.mac_id_n_color
6153 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6154 if (!update) {
6155 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6156 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6157 etheranyaddr);
6158 else
6159 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6160 in->in_macaddr);
6161 }
6162 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6163 ether_sprintf(add_sta_cmd.addr)));
6164 add_sta_cmd.add_modify = update ? 1 : 0;
6165 add_sta_cmd.station_flags_msk
6166 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6167
6168 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6169 add_sta_cmd.station_flags_msk
6170 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6171 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6172
6173 if (iwx_mimo_enabled(sc)) {
6174 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6175 add_sta_cmd.station_flags |=
6176 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6177 } else {
6178 int hasmimo = 0;
6179 for (i = 0; i < htrs->rs_nrates; i++) {
6180 if (htrs->rs_rates[i] > 7) {
6181 hasmimo = 1;
6182 break;
6183 }
6184 }
6185 if (hasmimo) {
6186 add_sta_cmd.station_flags |=
6187 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6188 }
6189 }
6190 }
6191
6192 if (ni->ni_flags & IEEE80211_NODE_HT &&
6193 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6194 add_sta_cmd.station_flags |= htole32(
6195 IWX_STA_FLG_FAT_EN_40MHZ);
6196 }
6197
6198
6199 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6200 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6201 add_sta_cmd.station_flags |= htole32(
6202 IWX_STA_FLG_FAT_EN_80MHZ);
6203 }
6204 // XXX-misha: TODO get real ampdu size
6205 aggsize = max_aggsize;
6206 } else {
6207 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6208 IEEE80211_HTCAP_MAXRXAMPDU);
6209 }
6210
6211 if (aggsize > max_aggsize)
6212 aggsize = max_aggsize;
6213 add_sta_cmd.station_flags |= htole32((aggsize <<
6214 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6215 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6216
6217 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6218 IEEE80211_HTCAP_MPDUDENSITY)) {
6219 case IEEE80211_HTCAP_MPDUDENSITY_2:
6220 add_sta_cmd.station_flags
6221 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6222 break;
6223 case IEEE80211_HTCAP_MPDUDENSITY_4:
6224 add_sta_cmd.station_flags
6225 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6226 break;
6227 case IEEE80211_HTCAP_MPDUDENSITY_8:
6228 add_sta_cmd.station_flags
6229 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6230 break;
6231 case IEEE80211_HTCAP_MPDUDENSITY_16:
6232 add_sta_cmd.station_flags
6233 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6234 break;
6235 default:
6236 break;
6237 }
6238 }
6239
6240 status = IWX_ADD_STA_SUCCESS;
6241 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6242 &add_sta_cmd, &status);
6243 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6244 err = EIO;
6245
6246 return err;
6247 }
6248
6249 static int
iwx_rm_sta_cmd(struct iwx_softc * sc,struct iwx_node * in)6250 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6251 {
6252 struct ieee80211com *ic = &sc->sc_ic;
6253 struct iwx_rm_sta_cmd rm_sta_cmd;
6254 int err;
6255
6256 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6257 panic("sta already removed");
6258
6259 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6260 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6261 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6262 else
6263 rm_sta_cmd.sta_id = IWX_STATION_ID;
6264
6265 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6266 &rm_sta_cmd);
6267
6268 return err;
6269 }
6270
6271 static int
iwx_rm_sta(struct iwx_softc * sc,struct iwx_node * in)6272 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6273 {
6274 int err, i, cmd_ver;
6275
6276 err = iwx_flush_sta(sc, in);
6277 if (err) {
6278 printf("%s: could not flush Tx path (error %d)\n",
6279 DEVNAME(sc), err);
6280 return err;
6281 }
6282
6283 /*
6284 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6285 * before a station gets removed.
6286 */
6287 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6288 IWX_SCD_QUEUE_CONFIG_CMD);
6289 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6290 err = iwx_disable_mgmt_queue(sc);
6291 if (err)
6292 return err;
6293 for (i = IWX_FIRST_AGG_TX_QUEUE;
6294 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6295 struct iwx_tx_ring *ring = &sc->txq[i];
6296 if ((sc->qenablemsk & (1 << i)) == 0)
6297 continue;
6298 err = iwx_disable_txq(sc, IWX_STATION_ID,
6299 ring->qid, ring->tid);
6300 if (err) {
6301 printf("%s: could not disable Tx queue %d "
6302 "(error %d)\n", DEVNAME(sc), ring->qid,
6303 err);
6304 return err;
6305 }
6306 }
6307 }
6308
6309 err = iwx_rm_sta_cmd(sc, in);
6310 if (err) {
6311 printf("%s: could not remove STA (error %d)\n",
6312 DEVNAME(sc), err);
6313 return err;
6314 }
6315
6316 in->in_flags = 0;
6317
6318 sc->sc_rx_ba_sessions = 0;
6319 sc->ba_rx.start_tidmask = 0;
6320 sc->ba_rx.stop_tidmask = 0;
6321 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6322 sc->ba_tx.start_tidmask = 0;
6323 sc->ba_tx.stop_tidmask = 0;
6324 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6325 sc->qenablemsk &= ~(1 << i);
6326
6327 #if 0
6328 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6329 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6330 if (ba->ba_state != IEEE80211_BA_AGREED)
6331 continue;
6332 ieee80211_delba_request(ic, ni, 0, 1, i);
6333 }
6334 #endif
6335 /* Clear ampdu rx state (GOS-1525) */
6336 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6337 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6338 ba->ba_flags = 0;
6339 }
6340
6341 return 0;
6342 }
6343
6344 static uint8_t
iwx_umac_scan_fill_channels(struct iwx_softc * sc,struct iwx_scan_channel_cfg_umac * chan,size_t chan_nitems,int n_ssids,uint32_t channel_cfg_flags)6345 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6346 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6347 int n_ssids, uint32_t channel_cfg_flags)
6348 {
6349 struct ieee80211com *ic = &sc->sc_ic;
6350 struct ieee80211_scan_state *ss = ic->ic_scan;
6351 struct ieee80211_channel *c;
6352 uint8_t nchan;
6353 int j;
6354
6355 for (nchan = j = 0;
6356 j < ss->ss_last &&
6357 nchan < sc->sc_capa_n_scan_channels;
6358 j++) {
6359 uint8_t channel_num;
6360
6361 c = ss->ss_chans[j];
6362 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6363 if (isset(sc->sc_ucode_api,
6364 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6365 chan->v2.channel_num = channel_num;
6366 if (IEEE80211_IS_CHAN_2GHZ(c))
6367 chan->v2.band = IWX_PHY_BAND_24;
6368 else
6369 chan->v2.band = IWX_PHY_BAND_5;
6370 chan->v2.iter_count = 1;
6371 chan->v2.iter_interval = 0;
6372 } else {
6373 chan->v1.channel_num = channel_num;
6374 chan->v1.iter_count = 1;
6375 chan->v1.iter_interval = htole16(0);
6376 }
6377 chan->flags |= htole32(channel_cfg_flags);
6378 chan++;
6379 nchan++;
6380 }
6381
6382 return nchan;
6383 }
6384
6385 static int
iwx_fill_probe_req(struct iwx_softc * sc,struct iwx_scan_probe_req * preq)6386 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6387 {
6388 struct ieee80211com *ic = &sc->sc_ic;
6389 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6390 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6391 struct ieee80211_rateset *rs;
6392 size_t remain = sizeof(preq->buf);
6393 uint8_t *frm, *pos;
6394
6395 memset(preq, 0, sizeof(*preq));
6396
6397 if (remain < sizeof(*wh) + 2)
6398 return ENOBUFS;
6399
6400 /*
6401 * Build a probe request frame. Most of the following code is a
6402 * copy & paste of what is done in net80211.
6403 */
6404 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6405 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6406 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6407 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6408 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6409 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6410 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6411 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6412
6413 frm = (uint8_t *)(wh + 1);
6414 *frm++ = IEEE80211_ELEMID_SSID;
6415 *frm++ = 0;
6416 /* hardware inserts SSID */
6417
6418 /* Tell the firmware where the MAC header is. */
6419 preq->mac_header.offset = 0;
6420 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6421 remain -= frm - (uint8_t *)wh;
6422
6423 /* Fill in 2GHz IEs and tell firmware where they are. */
6424 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6425 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6426 if (remain < 4 + rs->rs_nrates)
6427 return ENOBUFS;
6428 } else if (remain < 2 + rs->rs_nrates)
6429 return ENOBUFS;
6430 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6431 pos = frm;
6432 frm = ieee80211_add_rates(frm, rs);
6433 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6434 frm = ieee80211_add_xrates(frm, rs);
6435 remain -= frm - pos;
6436
6437 if (isset(sc->sc_enabled_capa,
6438 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6439 if (remain < 3)
6440 return ENOBUFS;
6441 *frm++ = IEEE80211_ELEMID_DSPARMS;
6442 *frm++ = 1;
6443 *frm++ = 0;
6444 remain -= 3;
6445 }
6446 preq->band_data[0].len = htole16(frm - pos);
6447
6448 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6449 /* Fill in 5GHz IEs. */
6450 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6451 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6452 if (remain < 4 + rs->rs_nrates)
6453 return ENOBUFS;
6454 } else if (remain < 2 + rs->rs_nrates)
6455 return ENOBUFS;
6456 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6457 pos = frm;
6458 frm = ieee80211_add_rates(frm, rs);
6459 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6460 frm = ieee80211_add_xrates(frm, rs);
6461 preq->band_data[1].len = htole16(frm - pos);
6462 remain -= frm - pos;
6463 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6464 if (remain < 14)
6465 return ENOBUFS;
6466 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6467 remain -= frm - pos;
6468 preq->band_data[1].len = htole16(frm - pos);
6469 }
6470 }
6471
6472 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6473 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6474 pos = frm;
6475 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6476 if (remain < 28)
6477 return ENOBUFS;
6478 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6479 /* XXX add WME info? */
6480 remain -= frm - pos;
6481 }
6482
6483 preq->common_data.len = htole16(frm - pos);
6484
6485 return 0;
6486 }
6487
6488 static int
iwx_config_umac_scan_reduced(struct iwx_softc * sc)6489 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6490 {
6491 struct iwx_scan_config scan_cfg;
6492 struct iwx_host_cmd hcmd = {
6493 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6494 .len[0] = sizeof(scan_cfg),
6495 .data[0] = &scan_cfg,
6496 .flags = 0,
6497 };
6498 int cmdver;
6499
6500 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6501 printf("%s: firmware does not support reduced scan config\n",
6502 DEVNAME(sc));
6503 return ENOTSUP;
6504 }
6505
6506 memset(&scan_cfg, 0, sizeof(scan_cfg));
6507
6508 /*
6509 * SCAN_CFG version >= 5 implies that the broadcast
6510 * STA ID field is deprecated.
6511 */
6512 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6513 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6514 scan_cfg.bcast_sta_id = 0xff;
6515
6516 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6517 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6518
6519 return iwx_send_cmd(sc, &hcmd);
6520 }
6521
6522 static uint16_t
iwx_scan_umac_flags_v2(struct iwx_softc * sc,int bgscan)6523 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6524 {
6525 struct ieee80211com *ic = &sc->sc_ic;
6526 struct ieee80211_scan_state *ss = ic->ic_scan;
6527 uint16_t flags = 0;
6528
6529 if (ss->ss_nssid == 0) {
6530 DPRINTF(("%s: Passive scan started\n", __func__));
6531 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6532 }
6533
6534 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6535 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6536 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6537
6538 return flags;
6539 }
6540
6541 #define IWX_SCAN_DWELL_ACTIVE 10
6542 #define IWX_SCAN_DWELL_PASSIVE 110
6543
6544 /* adaptive dwell max budget time [TU] for full scan */
6545 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6546 /* adaptive dwell max budget time [TU] for directed scan */
6547 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6548 /* adaptive dwell default high band APs number */
6549 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6550 /* adaptive dwell default low band APs number */
6551 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6552 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6553 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6554 /* adaptive dwell number of APs override for p2p friendly GO channels */
6555 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6556 /* adaptive dwell number of APs override for social channels */
6557 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6558
6559 static void
iwx_scan_umac_dwell_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * general_params,int bgscan)6560 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6561 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6562 {
6563 uint32_t suspend_time, max_out_time;
6564 uint8_t active_dwell, passive_dwell;
6565
6566 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6567 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6568
6569 general_params->adwell_default_social_chn =
6570 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6571 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6572 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6573
6574 if (bgscan)
6575 general_params->adwell_max_budget =
6576 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6577 else
6578 general_params->adwell_max_budget =
6579 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6580
6581 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6582 if (bgscan) {
6583 max_out_time = htole32(120);
6584 suspend_time = htole32(120);
6585 } else {
6586 max_out_time = htole32(0);
6587 suspend_time = htole32(0);
6588 }
6589 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6590 htole32(max_out_time);
6591 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6592 htole32(suspend_time);
6593 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6594 htole32(max_out_time);
6595 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6596 htole32(suspend_time);
6597
6598 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6599 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6600 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6601 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6602 }
6603
6604 static void
iwx_scan_umac_fill_general_p_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * gp,uint16_t gen_flags,int bgscan)6605 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6606 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6607 {
6608 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6609
6610 gp->flags = htole16(gen_flags);
6611
6612 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6613 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6614 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6615 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6616
6617 gp->scan_start_mac_id = 0;
6618 }
6619
6620 static void
iwx_scan_umac_fill_ch_p_v6(struct iwx_softc * sc,struct iwx_scan_channel_params_v6 * cp,uint32_t channel_cfg_flags,int n_ssid)6621 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6622 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6623 int n_ssid)
6624 {
6625 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6626
6627 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6628 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6629
6630 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6631 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6632 }
6633
6634 static int
iwx_umac_scan_v14(struct iwx_softc * sc,int bgscan)6635 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6636 {
6637 struct ieee80211com *ic = &sc->sc_ic;
6638 struct ieee80211_scan_state *ss = ic->ic_scan;
6639 struct iwx_host_cmd hcmd = {
6640 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6641 .len = { 0, },
6642 .data = { NULL, },
6643 .flags = 0,
6644 };
6645 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6646 struct iwx_scan_req_params_v14 *scan_p;
6647 int err, async = bgscan, n_ssid = 0;
6648 uint16_t gen_flags;
6649 uint32_t bitmap_ssid = 0;
6650
6651 IWX_ASSERT_LOCKED(sc);
6652
6653 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6654
6655 scan_p = &cmd->scan_params;
6656
6657 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6658 cmd->uid = htole32(0);
6659
6660 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6661 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6662 gen_flags, bgscan);
6663
6664 scan_p->periodic_params.schedule[0].interval = htole16(0);
6665 scan_p->periodic_params.schedule[0].iter_count = 1;
6666
6667 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6668 if (err) {
6669 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6670 err);
6671 return err;
6672 }
6673
6674 for (int i=0; i < ss->ss_nssid; i++) {
6675 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6676 scan_p->probe_params.direct_scan[i].len =
6677 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6678 DPRINTF(("%s: Active scan started for ssid ", __func__));
6679 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6680 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6681 n_ssid++;
6682 bitmap_ssid |= (1 << i);
6683 }
6684 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6685
6686 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6687 n_ssid);
6688
6689 hcmd.len[0] = sizeof(*cmd);
6690 hcmd.data[0] = (void *)cmd;
6691 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6692
6693 err = iwx_send_cmd(sc, &hcmd);
6694 return err;
6695 }
6696
6697 static void
iwx_mcc_update(struct iwx_softc * sc,struct iwx_mcc_chub_notif * notif)6698 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6699 {
6700 char alpha2[3];
6701
6702 snprintf(alpha2, sizeof(alpha2), "%c%c",
6703 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6704
6705 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6706 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6707
6708 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6709 }
6710
6711 uint8_t
iwx_ridx2rate(struct ieee80211_rateset * rs,int ridx)6712 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6713 {
6714 int i;
6715 uint8_t rval;
6716
6717 for (i = 0; i < rs->rs_nrates; i++) {
6718 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6719 if (rval == iwx_rates[ridx].rate)
6720 return rs->rs_rates[i];
6721 }
6722
6723 return 0;
6724 }
6725
6726 static int
iwx_rval2ridx(int rval)6727 iwx_rval2ridx(int rval)
6728 {
6729 int ridx;
6730
6731 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6732 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6733 continue;
6734 if (rval == iwx_rates[ridx].rate)
6735 break;
6736 }
6737
6738 return ridx;
6739 }
6740
6741 static void
iwx_ack_rates(struct iwx_softc * sc,struct iwx_node * in,int * cck_rates,int * ofdm_rates)6742 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6743 int *ofdm_rates)
6744 {
6745 struct ieee80211_node *ni = &in->in_ni;
6746 struct ieee80211_rateset *rs = &ni->ni_rates;
6747 int lowest_present_ofdm = -1;
6748 int lowest_present_cck = -1;
6749 uint8_t cck = 0;
6750 uint8_t ofdm = 0;
6751 int i;
6752
6753 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6754 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6755 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6756 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6757 continue;
6758 cck |= (1 << i);
6759 if (lowest_present_cck == -1 || lowest_present_cck > i)
6760 lowest_present_cck = i;
6761 }
6762 }
6763 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6764 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6765 continue;
6766 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6767 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6768 lowest_present_ofdm = i;
6769 }
6770
6771 /*
6772 * Now we've got the basic rates as bitmaps in the ofdm and cck
6773 * variables. This isn't sufficient though, as there might not
6774 * be all the right rates in the bitmap. E.g. if the only basic
6775 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6776 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6777 *
6778 * [...] a STA responding to a received frame shall transmit
6779 * its Control Response frame [...] at the highest rate in the
6780 * BSSBasicRateSet parameter that is less than or equal to the
6781 * rate of the immediately previous frame in the frame exchange
6782 * sequence ([...]) and that is of the same modulation class
6783 * ([...]) as the received frame. If no rate contained in the
6784 * BSSBasicRateSet parameter meets these conditions, then the
6785 * control frame sent in response to a received frame shall be
6786 * transmitted at the highest mandatory rate of the PHY that is
6787 * less than or equal to the rate of the received frame, and
6788 * that is of the same modulation class as the received frame.
6789 *
6790 * As a consequence, we need to add all mandatory rates that are
6791 * lower than all of the basic rates to these bitmaps.
6792 */
6793
6794 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6795 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6796 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6797 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6798 /* 6M already there or needed so always add */
6799 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6800
6801 /*
6802 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6803 * Note, however:
6804 * - if no CCK rates are basic, it must be ERP since there must
6805 * be some basic rates at all, so they're OFDM => ERP PHY
6806 * (or we're in 5 GHz, and the cck bitmap will never be used)
6807 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6808 * - if 5.5M is basic, 1M and 2M are mandatory
6809 * - if 2M is basic, 1M is mandatory
6810 * - if 1M is basic, that's the only valid ACK rate.
6811 * As a consequence, it's not as complicated as it sounds, just add
6812 * any lower rates to the ACK rate bitmap.
6813 */
6814 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6815 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6816 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6817 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6818 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6819 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6820 /* 1M already there or needed so always add */
6821 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6822
6823 *cck_rates = cck;
6824 *ofdm_rates = ofdm;
6825 }
6826
6827 static void
iwx_mac_ctxt_cmd_common(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_ctx_cmd * cmd,uint32_t action)6828 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6829 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6830 {
6831 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6832 struct ieee80211com *ic = &sc->sc_ic;
6833 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6834 struct ieee80211_node *ni = vap->iv_bss;
6835 int cck_ack_rates, ofdm_ack_rates;
6836
6837 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6838 in->in_color));
6839 cmd->action = htole32(action);
6840
6841 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6842 return;
6843
6844 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6845 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6846 else if (ic->ic_opmode == IEEE80211_M_STA)
6847 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6848 else
6849 panic("unsupported operating mode %d", ic->ic_opmode);
6850 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6851
6852 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6853 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6854 ether_sprintf(cmd->node_addr)));
6855 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6856 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6857 return;
6858 }
6859
6860 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6861 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6862 ether_sprintf(cmd->bssid_addr)));
6863 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6864 cmd->cck_rates = htole32(cck_ack_rates);
6865 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6866
6867 cmd->cck_short_preamble
6868 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6869 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6870 cmd->short_slot
6871 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6872 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6873
6874 struct chanAccParams chp;
6875 ieee80211_wme_vap_getparams(vap, &chp);
6876
6877 for (int i = 0; i < WME_NUM_AC; i++) {
6878 int txf = iwx_ac_to_tx_fifo[i];
6879 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6880 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6881 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6882 cmd->ac[txf].fifos_mask = (1 << txf);
6883 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6884
6885 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6886 }
6887
6888 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6889 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6890 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6891 }
6892
6893 if (ni->ni_flags & IEEE80211_NODE_HT) {
6894 switch (vap->iv_curhtprotmode) {
6895 case IEEE80211_HTINFO_OPMODE_PURE:
6896 break;
6897 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6898 case IEEE80211_HTINFO_OPMODE_MIXED:
6899 cmd->protection_flags |=
6900 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6901 IWX_MAC_PROT_FLG_FAT_PROT);
6902 break;
6903 case IEEE80211_HTINFO_OPMODE_HT20PR:
6904 if (in->in_phyctxt &&
6905 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6906 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6907 cmd->protection_flags |=
6908 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6909 IWX_MAC_PROT_FLG_FAT_PROT);
6910 }
6911 break;
6912 default:
6913 break;
6914 }
6915 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6916 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6917 }
6918
6919 if (ic->ic_flags & IEEE80211_F_USEPROT)
6920 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6921 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6922 #undef IWX_EXP2
6923 }
6924
6925 static void
iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_data_sta * sta,int assoc)6926 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6927 struct iwx_mac_data_sta *sta, int assoc)
6928 {
6929 struct ieee80211_node *ni = &in->in_ni;
6930 struct ieee80211com *ic = &sc->sc_ic;
6931 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6932 uint32_t dtim_off;
6933 uint64_t tsf;
6934 int dtim_period;
6935
6936 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6937 tsf = le64toh(ni->ni_tstamp.tsf);
6938 dtim_period = vap->iv_dtim_period;
6939
6940 sta->is_assoc = htole32(assoc);
6941
6942 if (assoc) {
6943 sta->dtim_time = htole32(tsf + dtim_off);
6944 sta->dtim_tsf = htole64(tsf + dtim_off);
6945 // XXX: unset in iwm
6946 sta->assoc_beacon_arrive_time = 0;
6947 }
6948 sta->bi = htole32(ni->ni_intval);
6949 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6950 sta->data_policy = htole32(0);
6951 sta->listen_interval = htole32(10);
6952 sta->assoc_id = htole32(ni->ni_associd);
6953 }
6954
6955 static int
iwx_mac_ctxt_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action,int assoc)6956 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6957 int assoc)
6958 {
6959 struct ieee80211com *ic = &sc->sc_ic;
6960 struct ieee80211_node *ni = &in->in_ni;
6961 struct iwx_mac_ctx_cmd cmd;
6962 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6963
6964 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6965 panic("MAC already added");
6966 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6967 panic("MAC already removed");
6968
6969 memset(&cmd, 0, sizeof(cmd));
6970
6971 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6972
6973 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6974 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6975 sizeof(cmd), &cmd);
6976 }
6977
6978 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6979 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6980 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6981 IWX_MAC_FILTER_ACCEPT_GRP |
6982 IWX_MAC_FILTER_IN_BEACON |
6983 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6984 IWX_MAC_FILTER_IN_CRC32);
6985 // XXX: dtim period is in vap
6986 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6987 /*
6988 * Allow beacons to pass through as long as we are not
6989 * associated or we do not have dtim period information.
6990 */
6991 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6992 }
6993 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6994 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6995 }
6996
6997 static int
iwx_clear_statistics(struct iwx_softc * sc)6998 iwx_clear_statistics(struct iwx_softc *sc)
6999 {
7000 struct iwx_statistics_cmd scmd = {
7001 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7002 };
7003 struct iwx_host_cmd cmd = {
7004 .id = IWX_STATISTICS_CMD,
7005 .len[0] = sizeof(scmd),
7006 .data[0] = &scmd,
7007 .flags = IWX_CMD_WANT_RESP,
7008 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
7009 };
7010 int err;
7011
7012 err = iwx_send_cmd(sc, &cmd);
7013 if (err)
7014 return err;
7015
7016 iwx_free_resp(sc, &cmd);
7017 return 0;
7018 }
7019
7020 static int
iwx_scan(struct iwx_softc * sc)7021 iwx_scan(struct iwx_softc *sc)
7022 {
7023 int err;
7024 err = iwx_umac_scan_v14(sc, 0);
7025
7026 if (err) {
7027 printf("%s: could not initiate scan\n", DEVNAME(sc));
7028 return err;
7029 }
7030 return 0;
7031 }
7032
7033 static int
iwx_bgscan(struct ieee80211com * ic)7034 iwx_bgscan(struct ieee80211com *ic)
7035 {
7036 struct iwx_softc *sc = ic->ic_softc;
7037 int err;
7038
7039 err = iwx_umac_scan_v14(sc, 1);
7040 if (err) {
7041 printf("%s: could not initiate scan\n", DEVNAME(sc));
7042 return err;
7043 }
7044 return 0;
7045 }
7046
7047 static int
iwx_enable_mgmt_queue(struct iwx_softc * sc)7048 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7049 {
7050 int err;
7051
7052 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7053
7054 /*
7055 * Non-QoS frames use the "MGMT" TID and queue.
7056 * Other TIDs and data queues are reserved for QoS data frames.
7057 */
7058 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7059 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7060 if (err) {
7061 printf("%s: could not enable Tx queue %d (error %d)\n",
7062 DEVNAME(sc), sc->first_data_qid, err);
7063 return err;
7064 }
7065
7066 return 0;
7067 }
7068
7069 static int
iwx_disable_mgmt_queue(struct iwx_softc * sc)7070 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7071 {
7072 int err, cmd_ver;
7073
7074 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7075 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7076 IWX_SCD_QUEUE_CONFIG_CMD);
7077 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7078 return 0;
7079
7080 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7081
7082 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7083 IWX_MGMT_TID);
7084 if (err) {
7085 printf("%s: could not disable Tx queue %d (error %d)\n",
7086 DEVNAME(sc), sc->first_data_qid, err);
7087 return err;
7088 }
7089
7090 return 0;
7091 }
7092
7093 static int
iwx_rs_rval2idx(uint8_t rval)7094 iwx_rs_rval2idx(uint8_t rval)
7095 {
7096 /* Firmware expects indices which match our 11g rate set. */
7097 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7098 int i;
7099
7100 for (i = 0; i < rs->rs_nrates; i++) {
7101 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7102 return i;
7103 }
7104
7105 return -1;
7106 }
7107
7108 static uint16_t
iwx_rs_ht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int rsidx)7109 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7110 {
7111 uint16_t htrates = 0;
7112 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7113 int i;
7114
7115 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7116 for (i = 0; i < htrs->rs_nrates; i++) {
7117 if (htrs->rs_rates[i] <= 7)
7118 htrates |= (1 << htrs->rs_rates[i]);
7119 }
7120 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7121 for (i = 0; i < htrs->rs_nrates; i++) {
7122 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7123 htrates |= (1 << (htrs->rs_rates[i] - 8));
7124 }
7125 } else
7126 panic(("iwx_rs_ht_rates"));
7127
7128 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7129 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7130
7131 return htrates;
7132 }
7133
7134 uint16_t
iwx_rs_vht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int num_ss)7135 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7136 {
7137 uint16_t rx_mcs;
7138 int max_mcs = -1;
7139 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7140 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7141 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7142 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7143 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7144
7145 switch (rx_mcs) {
7146 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7147 break;
7148 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7149 max_mcs = 7;
7150 break;
7151 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7152 max_mcs = 8;
7153 break;
7154 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7155 /* Disable VHT MCS 9 for 20MHz-only stations. */
7156 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7157 max_mcs = 8;
7158 else
7159 max_mcs = 9;
7160 break;
7161 default:
7162 /* Should not happen; Values above cover the possible range. */
7163 panic("invalid VHT Rx MCS value %u", rx_mcs);
7164 }
7165
7166 return ((1 << (max_mcs + 1)) - 1);
7167 }
7168
7169 static int
iwx_rs_init_v3(struct iwx_softc * sc,struct iwx_node * in)7170 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7171 {
7172 #if 1
7173 panic("iwx: Trying to init rate set on untested version");
7174 #else
7175 struct ieee80211_node *ni = &in->in_ni;
7176 struct ieee80211_rateset *rs = &ni->ni_rates;
7177 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7178 uint32_t cmd_id;
7179 int i;
7180 size_t cmd_size = sizeof(cfg_cmd);
7181
7182 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7183
7184 for (i = 0; i < rs->rs_nrates; i++) {
7185 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7186 int idx = iwx_rs_rval2idx(rval);
7187 if (idx == -1)
7188 return EINVAL;
7189 cfg_cmd.non_ht_rates |= (1 << idx);
7190 }
7191
7192 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7193 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7194 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7195 htole16(iwx_rs_vht_rates(sc, ni, 1));
7196 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7197 htole16(iwx_rs_vht_rates(sc, ni, 2));
7198 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7199 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7200 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7201 htole16(iwx_rs_ht_rates(sc, ni,
7202 IEEE80211_HT_RATESET_SISO));
7203 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7204 htole16(iwx_rs_ht_rates(sc, ni,
7205 IEEE80211_HT_RATESET_MIMO2));
7206 } else
7207 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7208
7209 cfg_cmd.sta_id = IWX_STATION_ID;
7210 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7211 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7212 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7213 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7214 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7215 else
7216 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7217 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7218 if (ni->ni_flags & IEEE80211_NODE_VHT)
7219 cfg_cmd.max_mpdu_len = htole16(3895);
7220 else
7221 cfg_cmd.max_mpdu_len = htole16(3839);
7222 if (ni->ni_flags & IEEE80211_NODE_HT) {
7223 if (ieee80211_node_supports_ht_sgi20(ni)) {
7224 cfg_cmd.sgi_ch_width_supp |= (1 <<
7225 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7226 }
7227 if (ieee80211_node_supports_ht_sgi40(ni)) {
7228 cfg_cmd.sgi_ch_width_supp |= (1 <<
7229 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7230 }
7231 }
7232 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7233 ieee80211_node_supports_vht_sgi80(ni))
7234 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7235
7236 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7237 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7238 #endif
7239 }
7240
7241 static int
iwx_rs_init_v4(struct iwx_softc * sc,struct iwx_node * in)7242 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7243 {
7244 struct ieee80211_node *ni = &in->in_ni;
7245 struct ieee80211_rateset *rs = &ni->ni_rates;
7246 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7247 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7248 uint32_t cmd_id;
7249 int i;
7250 int sgi80 = 0;
7251 size_t cmd_size = sizeof(cfg_cmd);
7252
7253 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7254
7255 for (i = 0; i < rs->rs_nrates; i++) {
7256 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7257 int idx = iwx_rs_rval2idx(rval);
7258 if (idx == -1)
7259 return EINVAL;
7260 cfg_cmd.non_ht_rates |= (1 << idx);
7261 }
7262 for (i = 0; i < htrs->rs_nrates; i++) {
7263 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7264 }
7265
7266 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7267 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7268 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7269 htole16(iwx_rs_vht_rates(sc, ni, 1));
7270 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7271 htole16(iwx_rs_vht_rates(sc, ni, 2));
7272
7273 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7274 __func__, __LINE__,
7275 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7276 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7277 __func__, __LINE__,
7278 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7279 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7280 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7281 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7282 htole16(iwx_rs_ht_rates(sc, ni,
7283 IEEE80211_HT_RATESET_SISO));
7284 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7285 htole16(iwx_rs_ht_rates(sc, ni,
7286 IEEE80211_HT_RATESET_MIMO2));
7287
7288 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7289 __func__, __LINE__,
7290 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7291 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7292 __func__, __LINE__,
7293 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7294 } else
7295 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7296
7297 cfg_cmd.sta_id = IWX_STATION_ID;
7298 #if 0
7299 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7300 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7301 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7302 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7303 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7304 else
7305 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7306 #endif
7307 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7308 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7309 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7310 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7311 } else {
7312 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7313 }
7314
7315 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7316 if (ni->ni_flags & IEEE80211_NODE_VHT)
7317 cfg_cmd.max_mpdu_len = htole16(3895);
7318 else
7319 cfg_cmd.max_mpdu_len = htole16(3839);
7320 if (ni->ni_flags & IEEE80211_NODE_HT) {
7321 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7322 cfg_cmd.sgi_ch_width_supp |= (1 <<
7323 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7324 }
7325 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7326 cfg_cmd.sgi_ch_width_supp |= (1 <<
7327 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7328 }
7329 }
7330 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7331 IEEE80211_VHTCAP_SHORT_GI_80);
7332 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7333 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7334 }
7335
7336 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7337 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7338 }
7339
7340 static int
iwx_rs_init(struct iwx_softc * sc,struct iwx_node * in)7341 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7342 {
7343 int cmd_ver;
7344
7345 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7346 IWX_TLC_MNG_CONFIG_CMD);
7347 if (cmd_ver == 4)
7348 return iwx_rs_init_v4(sc, in);
7349 else
7350 return iwx_rs_init_v3(sc, in);
7351 }
7352
7353
7354 /**
7355 * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
7356 *
7357 * This populates the given txrate node with the TX rate control notification.
7358 *
7359 * @param sc driver softc
7360 * @param notif firmware notification
7361 * @param ni ieee80211_node update
7362 * @returns true if updated, false if not
7363 */
7364 static bool
iwx_rs_update_node_txrate(struct iwx_softc * sc,const struct iwx_tlc_update_notif * notif,struct ieee80211_node * ni)7365 iwx_rs_update_node_txrate(struct iwx_softc *sc,
7366 const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
7367 {
7368 struct ieee80211com *ic = &sc->sc_ic;
7369 /* XXX TODO: create an inline function in if_iwxreg.h? */
7370 static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
7371 static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
7372
7373 uint32_t rate_n_flags;
7374 uint32_t type;
7375
7376 /* Extract the rate and command version */
7377 rate_n_flags = le32toh(notif->rate);
7378
7379 if (sc->sc_rate_n_flags_version != 2) {
7380 net80211_ic_printf(ic,
7381 "%s: unsupported rate_n_flags version (%d)\n",
7382 __func__,
7383 sc->sc_rate_n_flags_version);
7384 return (false);
7385 }
7386
7387 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7388 print_ratenflags(__func__, __LINE__,
7389 rate_n_flags, sc->sc_rate_n_flags_version);
7390
7391 type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7392 switch (type) {
7393 case IWX_RATE_MCS_CCK_MSK:
7394 ieee80211_node_set_txrate_dot11rate(ni,
7395 cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7396 return (true);
7397 case IWX_RATE_MCS_LEGACY_OFDM_MSK:
7398 ieee80211_node_set_txrate_dot11rate(ni,
7399 ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7400 return (true);
7401 case IWX_RATE_MCS_HT_MSK:
7402 /*
7403 * TODO: the current API doesn't include channel width
7404 * and other flags, so we can't accurately store them yet!
7405 *
7406 * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
7407 * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
7408 * LDPC: (flags & (1 << 16))
7409 */
7410 ieee80211_node_set_txrate_ht_mcsrate(ni,
7411 IWX_RATE_HT_MCS_INDEX(rate_n_flags));
7412 return (true);
7413 case IWX_RATE_MCS_VHT_MSK:
7414 /* TODO: same comment on channel width, etc above */
7415 ieee80211_node_set_txrate_vht_rate(ni,
7416 IWX_RATE_VHT_MCS_CODE(rate_n_flags),
7417 IWX_RATE_VHT_MCS_NSS(rate_n_flags));
7418 return (true);
7419 default:
7420 net80211_ic_printf(ic,
7421 "%s: unsupported chosen rate type in "
7422 "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
7423 type >> IWX_RATE_MCS_MOD_TYPE_POS);
7424 return (false);
7425 }
7426
7427 /* Default: if we get here, we didn't successfully update anything */
7428 return (false);
7429 }
7430
7431 /**
7432 * @brief Process a firmware rate control update and update net80211.
7433 *
7434 * Since firmware is doing rate control, this just needs to update
7435 * the txrate in the ieee80211_node entry.
7436 */
7437 static void
iwx_rs_update(struct iwx_softc * sc,struct iwx_tlc_update_notif * notif)7438 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7439 {
7440 struct ieee80211com *ic = &sc->sc_ic;
7441 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7442 /* XXX TODO: get a node ref! */
7443 struct ieee80211_node *ni = (void *)vap->iv_bss;
7444
7445 /*
7446 * For now the iwx driver only supports a single vdev with a single
7447 * node; it doesn't yet support ibss/hostap/multiple vdevs.
7448 */
7449 if (notif->sta_id != IWX_STATION_ID ||
7450 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7451 return;
7452
7453 iwx_rs_update_node_txrate(sc, notif, ni);
7454 }
7455
7456 static int
iwx_phy_send_rlc(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,uint8_t chains_static,uint8_t chains_dynamic)7457 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7458 uint8_t chains_static, uint8_t chains_dynamic)
7459 {
7460 struct iwx_rlc_config_cmd cmd;
7461 uint32_t cmd_id;
7462 uint8_t active_cnt, idle_cnt;
7463
7464 memset(&cmd, 0, sizeof(cmd));
7465
7466 idle_cnt = chains_static;
7467 active_cnt = chains_dynamic;
7468
7469 cmd.phy_id = htole32(phyctxt->id);
7470 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7471 IWX_PHY_RX_CHAIN_VALID_POS);
7472 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7473 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7474 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7475
7476 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7477 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7478 }
7479
7480 static int
iwx_phy_ctxt_update(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)7481 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7482 struct ieee80211_channel *chan, uint8_t chains_static,
7483 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7484 uint8_t vht_chan_width)
7485 {
7486 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7487 int err;
7488
7489 if (chan == IEEE80211_CHAN_ANYC) {
7490 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7491 DEVNAME(sc));
7492 return EIO;
7493 }
7494
7495 if (isset(sc->sc_enabled_capa,
7496 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7497 (phyctxt->channel->ic_flags & band_flags) !=
7498 (chan->ic_flags & band_flags)) {
7499 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7500 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7501 vht_chan_width);
7502 if (err) {
7503 printf("%s: could not remove PHY context "
7504 "(error %d)\n", DEVNAME(sc), err);
7505 return err;
7506 }
7507 phyctxt->channel = chan;
7508 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7509 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7510 vht_chan_width);
7511 if (err) {
7512 printf("%s: could not add PHY context "
7513 "(error %d)\n", DEVNAME(sc), err);
7514 return err;
7515 }
7516 } else {
7517 phyctxt->channel = chan;
7518 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7519 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7520 vht_chan_width);
7521 if (err) {
7522 printf("%s: could not update PHY context (error %d)\n",
7523 DEVNAME(sc), err);
7524 return err;
7525 }
7526 }
7527
7528 phyctxt->sco = sco;
7529 phyctxt->vht_chan_width = vht_chan_width;
7530
7531 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7532 phyctxt->channel->ic_ieee));
7533 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7534 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7535 phyctxt->vht_chan_width));
7536
7537 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7538 IWX_RLC_CONFIG_CMD) == 2)
7539 return iwx_phy_send_rlc(sc, phyctxt,
7540 chains_static, chains_dynamic);
7541
7542 return 0;
7543 }
7544
7545 static int
iwx_auth(struct ieee80211vap * vap,struct iwx_softc * sc)7546 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7547 {
7548 struct ieee80211com *ic = &sc->sc_ic;
7549 struct iwx_node *in;
7550 struct iwx_vap *ivp = IWX_VAP(vap);
7551 struct ieee80211_node *ni;
7552 uint32_t duration;
7553 int generation = sc->sc_generation, err;
7554
7555 IWX_ASSERT_LOCKED(sc);
7556
7557 ni = ieee80211_ref_node(vap->iv_bss);
7558 in = IWX_NODE(ni);
7559
7560 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7561 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7562 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7563 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7564 if (err)
7565 return err;
7566 } else {
7567 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7568 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7569 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7570 if (err)
7571 return err;
7572 }
7573 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7574 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7575 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7576 ether_sprintf(in->in_macaddr)));
7577
7578 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7579 if (err) {
7580 printf("%s: could not add MAC context (error %d)\n",
7581 DEVNAME(sc), err);
7582 return err;
7583 }
7584 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7585
7586 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7587 if (err) {
7588 printf("%s: could not add binding (error %d)\n",
7589 DEVNAME(sc), err);
7590 goto rm_mac_ctxt;
7591 }
7592 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7593
7594 err = iwx_add_sta_cmd(sc, in, 0);
7595 if (err) {
7596 printf("%s: could not add sta (error %d)\n",
7597 DEVNAME(sc), err);
7598 goto rm_binding;
7599 }
7600 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7601
7602 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7603 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7604 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7605 IWX_TX_RING_COUNT);
7606 if (err)
7607 goto rm_sta;
7608 return 0;
7609 }
7610
7611 err = iwx_enable_mgmt_queue(sc);
7612 if (err)
7613 goto rm_sta;
7614
7615 err = iwx_clear_statistics(sc);
7616 if (err)
7617 goto rm_mgmt_queue;
7618
7619 /*
7620 * Prevent the FW from wandering off channel during association
7621 * by "protecting" the session with a time event.
7622 */
7623 if (in->in_ni.ni_intval)
7624 duration = in->in_ni.ni_intval * 9;
7625 else
7626 duration = 900;
7627 return iwx_schedule_session_protection(sc, in, duration);
7628
7629 rm_mgmt_queue:
7630 if (generation == sc->sc_generation)
7631 iwx_disable_mgmt_queue(sc);
7632 rm_sta:
7633 if (generation == sc->sc_generation) {
7634 iwx_rm_sta_cmd(sc, in);
7635 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7636 }
7637 rm_binding:
7638 if (generation == sc->sc_generation) {
7639 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7640 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7641 }
7642 rm_mac_ctxt:
7643 if (generation == sc->sc_generation) {
7644 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7645 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7646 }
7647 return err;
7648 }
7649
7650 static int
iwx_deauth(struct iwx_softc * sc)7651 iwx_deauth(struct iwx_softc *sc)
7652 {
7653 struct ieee80211com *ic = &sc->sc_ic;
7654 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7655 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7656 int err;
7657
7658 IWX_ASSERT_LOCKED(sc);
7659
7660 iwx_unprotect_session(sc, in);
7661
7662 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7663 err = iwx_rm_sta(sc, in);
7664 if (err)
7665 return err;
7666 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7667 }
7668
7669 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7670 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7671 if (err) {
7672 printf("%s: could not remove binding (error %d)\n",
7673 DEVNAME(sc), err);
7674 return err;
7675 }
7676 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7677 }
7678
7679 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7680 IWX_FLAG_MAC_ACTIVE));
7681 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7682 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7683 if (err) {
7684 printf("%s: could not remove MAC context (error %d)\n",
7685 DEVNAME(sc), err);
7686 return err;
7687 }
7688 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7689 }
7690
7691 /* Move unused PHY context to a default channel. */
7692 //TODO uncommented in obsd, but stays on the way of auth->auth
7693 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7694 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7695 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7696 if (err)
7697 return err;
7698
7699 return 0;
7700 }
7701
7702 static int
iwx_run(struct ieee80211vap * vap,struct iwx_softc * sc)7703 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7704 {
7705 struct ieee80211com *ic = &sc->sc_ic;
7706 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7707 struct ieee80211_node *ni = &in->in_ni;
7708 struct iwx_vap *ivp = IWX_VAP(vap);
7709 int err;
7710
7711 IWX_ASSERT_LOCKED(sc);
7712
7713 if (ni->ni_flags & IEEE80211_NODE_HT) {
7714 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7715 uint8_t sco, vht_chan_width;
7716 sco = IEEE80211_HTOP0_SCO_SCN;
7717 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7718 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7719 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7720 else
7721 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7722 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7723 ivp->phy_ctxt->channel, chains, chains,
7724 0, sco, vht_chan_width);
7725 if (err) {
7726 printf("%s: failed to update PHY\n", DEVNAME(sc));
7727 return err;
7728 }
7729 }
7730
7731 /* Update STA again to apply HT and VHT settings. */
7732 err = iwx_add_sta_cmd(sc, in, 1);
7733 if (err) {
7734 printf("%s: could not update STA (error %d)\n",
7735 DEVNAME(sc), err);
7736 return err;
7737 }
7738
7739 /* We have now been assigned an associd by the AP. */
7740 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7741 if (err) {
7742 printf("%s: failed to update MAC\n", DEVNAME(sc));
7743 return err;
7744 }
7745
7746 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7747 if (err) {
7748 printf("%s: could not set sf full on (error %d)\n",
7749 DEVNAME(sc), err);
7750 return err;
7751 }
7752
7753 err = iwx_allow_mcast(sc);
7754 if (err) {
7755 printf("%s: could not allow mcast (error %d)\n",
7756 DEVNAME(sc), err);
7757 return err;
7758 }
7759
7760 err = iwx_power_update_device(sc);
7761 if (err) {
7762 printf("%s: could not send power command (error %d)\n",
7763 DEVNAME(sc), err);
7764 return err;
7765 }
7766 #ifdef notyet
7767 /*
7768 * Disabled for now. Default beacon filter settings
7769 * prevent net80211 from getting ERP and HT protection
7770 * updates from beacons.
7771 */
7772 err = iwx_enable_beacon_filter(sc, in);
7773 if (err) {
7774 printf("%s: could not enable beacon filter\n",
7775 DEVNAME(sc));
7776 return err;
7777 }
7778 #endif
7779 err = iwx_power_mac_update_mode(sc, in);
7780 if (err) {
7781 printf("%s: could not update MAC power (error %d)\n",
7782 DEVNAME(sc), err);
7783 return err;
7784 }
7785
7786 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7787 return 0;
7788
7789 err = iwx_rs_init(sc, in);
7790 if (err) {
7791 printf("%s: could not init rate scaling (error %d)\n",
7792 DEVNAME(sc), err);
7793 return err;
7794 }
7795
7796 return 0;
7797 }
7798
7799 static int
iwx_run_stop(struct iwx_softc * sc)7800 iwx_run_stop(struct iwx_softc *sc)
7801 {
7802 struct ieee80211com *ic = &sc->sc_ic;
7803 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7804 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7805 struct ieee80211_node *ni = &in->in_ni;
7806 int err, i;
7807
7808 IWX_ASSERT_LOCKED(sc);
7809
7810 err = iwx_flush_sta(sc, in);
7811 if (err) {
7812 printf("%s: could not flush Tx path (error %d)\n",
7813 DEVNAME(sc), err);
7814 return err;
7815 }
7816
7817 /*
7818 * Stop Rx BA sessions now. We cannot rely on the BA task
7819 * for this when moving out of RUN state since it runs in a
7820 * separate thread.
7821 * Note that in->in_ni (struct ieee80211_node) already represents
7822 * our new access point in case we are roaming between APs.
7823 * This means we cannot rely on struct ieee802111_node to tell
7824 * us which BA sessions exist.
7825 */
7826 // TODO agg
7827 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7828 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7829 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7830 continue;
7831 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7832 }
7833
7834 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7835 if (err)
7836 return err;
7837
7838 err = iwx_disable_beacon_filter(sc);
7839 if (err) {
7840 printf("%s: could not disable beacon filter (error %d)\n",
7841 DEVNAME(sc), err);
7842 return err;
7843 }
7844
7845 /* Mark station as disassociated. */
7846 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7847 if (err) {
7848 printf("%s: failed to update MAC\n", DEVNAME(sc));
7849 return err;
7850 }
7851
7852 return 0;
7853 }
7854
7855 static struct ieee80211_node *
iwx_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])7856 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7857 {
7858 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7859 M_NOWAIT | M_ZERO);
7860 }
7861
7862 #if 0
7863 int
7864 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7865 struct ieee80211_key *k)
7866 {
7867 struct iwx_softc *sc = ic->ic_softc;
7868 struct iwx_node *in = (void *)ni;
7869 struct iwx_setkey_task_arg *a;
7870 int err;
7871
7872 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7873 /* Fallback to software crypto for other ciphers. */
7874 err = ieee80211_set_key(ic, ni, k);
7875 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7876 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7877 return err;
7878 }
7879
7880 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7881 return ENOSPC;
7882
7883 a = &sc->setkey_arg[sc->setkey_cur];
7884 a->sta_id = IWX_STATION_ID;
7885 a->ni = ni;
7886 a->k = k;
7887 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7888 sc->setkey_nkeys++;
7889 iwx_add_task(sc, systq, &sc->setkey_task);
7890 return EBUSY;
7891 }
7892
7893 int
7894 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7895 struct ieee80211_key *k)
7896 {
7897 struct ieee80211com *ic = &sc->sc_ic;
7898 struct iwx_node *in = (void *)ni;
7899 struct iwx_add_sta_key_cmd cmd;
7900 uint32_t status;
7901 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7902 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7903 int err;
7904
7905 /*
7906 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7907 * Currently we only implement station mode where 'ni' is always
7908 * ic->ic_bss so there is no need to validate arguments beyond this:
7909 */
7910 KASSERT(ni == ic->ic_bss);
7911
7912 memset(&cmd, 0, sizeof(cmd));
7913
7914 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7915 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7916 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7917 IWX_STA_KEY_FLG_KEYID_MSK));
7918 if (k->k_flags & IEEE80211_KEY_GROUP) {
7919 cmd.common.key_offset = 1;
7920 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7921 } else
7922 cmd.common.key_offset = 0;
7923
7924 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7925 cmd.common.sta_id = sta_id;
7926
7927 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7928
7929 status = IWX_ADD_STA_SUCCESS;
7930 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7931 &status);
7932 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7933 return ECANCELED;
7934 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7935 err = EIO;
7936 if (err) {
7937 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7938 IEEE80211_REASON_AUTH_LEAVE);
7939 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7940 return err;
7941 }
7942
7943 if (k->k_flags & IEEE80211_KEY_GROUP)
7944 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7945 else
7946 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7947
7948 if ((in->in_flags & want_keymask) == want_keymask) {
7949 DPRINTF(("marking port %s valid\n",
7950 ether_sprintf(ni->ni_macaddr)));
7951 ni->ni_port_valid = 1;
7952 ieee80211_set_link_state(ic, LINK_STATE_UP);
7953 }
7954
7955 return 0;
7956 }
7957
7958 void
7959 iwx_setkey_task(void *arg)
7960 {
7961 struct iwx_softc *sc = arg;
7962 struct iwx_setkey_task_arg *a;
7963 int err = 0, s = splnet();
7964
7965 while (sc->setkey_nkeys > 0) {
7966 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7967 break;
7968 a = &sc->setkey_arg[sc->setkey_tail];
7969 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7970 a->sta_id = 0;
7971 a->ni = NULL;
7972 a->k = NULL;
7973 sc->setkey_tail = (sc->setkey_tail + 1) %
7974 nitems(sc->setkey_arg);
7975 sc->setkey_nkeys--;
7976 }
7977
7978 refcnt_rele_wake(&sc->task_refs);
7979 splx(s);
7980 }
7981
7982 void
7983 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7984 struct ieee80211_key *k)
7985 {
7986 struct iwx_softc *sc = ic->ic_softc;
7987 struct iwx_add_sta_key_cmd cmd;
7988
7989 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7990 /* Fallback to software crypto for other ciphers. */
7991 ieee80211_delete_key(ic, ni, k);
7992 return;
7993 }
7994
7995 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7996 return;
7997
7998 memset(&cmd, 0, sizeof(cmd));
7999
8000 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8001 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8002 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8003 IWX_STA_KEY_FLG_KEYID_MSK));
8004 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8005 if (k->k_flags & IEEE80211_KEY_GROUP)
8006 cmd.common.key_offset = 1;
8007 else
8008 cmd.common.key_offset = 0;
8009 cmd.common.sta_id = IWX_STATION_ID;
8010
8011 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8012 }
8013 #endif
8014
8015 static int
iwx_newstate_sub(struct ieee80211vap * vap,enum ieee80211_state nstate)8016 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
8017 {
8018 struct ieee80211com *ic = vap->iv_ic;
8019 struct iwx_softc *sc = ic->ic_softc;
8020 enum ieee80211_state ostate = vap->iv_state;
8021 int err = 0;
8022
8023 IWX_LOCK(sc);
8024
8025 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
8026 switch (ostate) {
8027 case IEEE80211_S_RUN:
8028 err = iwx_run_stop(sc);
8029 if (err)
8030 goto out;
8031 /* FALLTHROUGH */
8032 case IEEE80211_S_ASSOC:
8033 case IEEE80211_S_AUTH:
8034 if (nstate <= IEEE80211_S_AUTH) {
8035 err = iwx_deauth(sc);
8036 if (err)
8037 goto out;
8038 }
8039 /* FALLTHROUGH */
8040 case IEEE80211_S_SCAN:
8041 case IEEE80211_S_INIT:
8042 default:
8043 break;
8044 }
8045 //
8046 // /* Die now if iwx_stop() was called while we were sleeping. */
8047 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8048 // refcnt_rele_wake(&sc->task_refs);
8049 // splx(s);
8050 // return;
8051 // }
8052 }
8053
8054 switch (nstate) {
8055 case IEEE80211_S_INIT:
8056 break;
8057
8058 case IEEE80211_S_SCAN:
8059 break;
8060
8061 case IEEE80211_S_AUTH:
8062 err = iwx_auth(vap, sc);
8063 break;
8064
8065 case IEEE80211_S_ASSOC:
8066 break;
8067
8068 case IEEE80211_S_RUN:
8069 err = iwx_run(vap, sc);
8070 break;
8071 default:
8072 break;
8073 }
8074
8075 out:
8076 IWX_UNLOCK(sc);
8077
8078 return (err);
8079 }
8080
8081 static int
iwx_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)8082 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8083 {
8084 struct iwx_vap *ivp = IWX_VAP(vap);
8085 struct ieee80211com *ic = vap->iv_ic;
8086 enum ieee80211_state ostate = vap->iv_state;
8087 int err;
8088
8089 /*
8090 * Prevent attempts to transition towards the same state, unless
8091 * we are scanning in which case a SCAN -> SCAN transition
8092 * triggers another scan iteration. And AUTH -> AUTH is needed
8093 * to support band-steering.
8094 */
8095 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8096 nstate != IEEE80211_S_AUTH)
8097 return 0;
8098 IEEE80211_UNLOCK(ic);
8099 err = iwx_newstate_sub(vap, nstate);
8100 IEEE80211_LOCK(ic);
8101 if (err == 0)
8102 err = ivp->iv_newstate(vap, nstate, arg);
8103
8104 return (err);
8105 }
8106
8107 static void
iwx_endscan(struct iwx_softc * sc)8108 iwx_endscan(struct iwx_softc *sc)
8109 {
8110 struct ieee80211com *ic = &sc->sc_ic;
8111 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8112
8113 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8114 return;
8115
8116 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8117
8118 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8119 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8120 }
8121
8122 /*
8123 * Aging and idle timeouts for the different possible scenarios
8124 * in default configuration
8125 */
8126 static const uint32_t
8127 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8128 {
8129 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8130 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8131 },
8132 {
8133 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8134 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8135 },
8136 {
8137 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8138 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8139 },
8140 {
8141 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8142 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8143 },
8144 {
8145 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8146 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8147 },
8148 };
8149
8150 /*
8151 * Aging and idle timeouts for the different possible scenarios
8152 * in single BSS MAC configuration.
8153 */
8154 static const uint32_t
8155 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8156 {
8157 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8158 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8159 },
8160 {
8161 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8162 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8163 },
8164 {
8165 htole32(IWX_SF_MCAST_AGING_TIMER),
8166 htole32(IWX_SF_MCAST_IDLE_TIMER)
8167 },
8168 {
8169 htole32(IWX_SF_BA_AGING_TIMER),
8170 htole32(IWX_SF_BA_IDLE_TIMER)
8171 },
8172 {
8173 htole32(IWX_SF_TX_RE_AGING_TIMER),
8174 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8175 },
8176 };
8177
8178 static void
iwx_fill_sf_command(struct iwx_softc * sc,struct iwx_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)8179 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8180 struct ieee80211_node *ni)
8181 {
8182 int i, j, watermark;
8183
8184 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8185
8186 /*
8187 * If we are in association flow - check antenna configuration
8188 * capabilities of the AP station, and choose the watermark accordingly.
8189 */
8190 if (ni) {
8191 if (ni->ni_flags & IEEE80211_NODE_HT) {
8192 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8193 int hasmimo = 0;
8194 for (i = 0; i < htrs->rs_nrates; i++) {
8195 if (htrs->rs_rates[i] > 7) {
8196 hasmimo = 1;
8197 break;
8198 }
8199 }
8200 if (hasmimo)
8201 watermark = IWX_SF_W_MARK_MIMO2;
8202 else
8203 watermark = IWX_SF_W_MARK_SISO;
8204 } else {
8205 watermark = IWX_SF_W_MARK_LEGACY;
8206 }
8207 /* default watermark value for unassociated mode. */
8208 } else {
8209 watermark = IWX_SF_W_MARK_MIMO2;
8210 }
8211 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8212
8213 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8214 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8215 sf_cmd->long_delay_timeouts[i][j] =
8216 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8217 }
8218 }
8219
8220 if (ni) {
8221 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8222 sizeof(iwx_sf_full_timeout));
8223 } else {
8224 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8225 sizeof(iwx_sf_full_timeout_def));
8226 }
8227
8228 }
8229
8230 static int
iwx_sf_config(struct iwx_softc * sc,int new_state)8231 iwx_sf_config(struct iwx_softc *sc, int new_state)
8232 {
8233 struct ieee80211com *ic = &sc->sc_ic;
8234 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8235 struct ieee80211_node *ni = vap->iv_bss;
8236 struct iwx_sf_cfg_cmd sf_cmd = {
8237 .state = htole32(new_state),
8238 };
8239 int err = 0;
8240
8241 switch (new_state) {
8242 case IWX_SF_UNINIT:
8243 case IWX_SF_INIT_OFF:
8244 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8245 break;
8246 case IWX_SF_FULL_ON:
8247 iwx_fill_sf_command(sc, &sf_cmd, ni);
8248 break;
8249 default:
8250 return EINVAL;
8251 }
8252
8253 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8254 sizeof(sf_cmd), &sf_cmd);
8255 return err;
8256 }
8257
8258 static int
iwx_send_bt_init_conf(struct iwx_softc * sc)8259 iwx_send_bt_init_conf(struct iwx_softc *sc)
8260 {
8261 struct iwx_bt_coex_cmd bt_cmd;
8262
8263 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8264
8265 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8266 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8267 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8268
8269
8270 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8271 &bt_cmd);
8272 }
8273
8274 static int
iwx_send_soc_conf(struct iwx_softc * sc)8275 iwx_send_soc_conf(struct iwx_softc *sc)
8276 {
8277 struct iwx_soc_configuration_cmd cmd;
8278 int err;
8279 uint32_t cmd_id, flags = 0;
8280
8281 memset(&cmd, 0, sizeof(cmd));
8282
8283 /*
8284 * In VER_1 of this command, the discrete value is considered
8285 * an integer; In VER_2, it's a bitmask. Since we have only 2
8286 * values in VER_1, this is backwards-compatible with VER_2,
8287 * as long as we don't set any other flag bits.
8288 */
8289 if (!sc->sc_integrated) { /* VER_1 */
8290 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8291 } else { /* VER_2 */
8292 uint8_t scan_cmd_ver;
8293 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8294 flags |= (sc->sc_ltr_delay &
8295 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8296 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8297 IWX_SCAN_REQ_UMAC);
8298 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8299 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8300 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8301 }
8302 cmd.flags = htole32(flags);
8303
8304 cmd.latency = htole32(sc->sc_xtal_latency);
8305
8306 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8307 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8308 if (err)
8309 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8310 return err;
8311 }
8312
8313 static int
iwx_send_update_mcc_cmd(struct iwx_softc * sc,const char * alpha2)8314 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8315 {
8316 struct iwx_mcc_update_cmd mcc_cmd;
8317 struct iwx_host_cmd hcmd = {
8318 .id = IWX_MCC_UPDATE_CMD,
8319 .flags = IWX_CMD_WANT_RESP,
8320 .data = { &mcc_cmd },
8321 };
8322 struct iwx_rx_packet *pkt;
8323 struct iwx_mcc_update_resp *resp;
8324 size_t resp_len;
8325 int err;
8326
8327 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8328 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8329 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8330 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8331 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8332 else
8333 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8334
8335 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8336 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8337
8338 err = iwx_send_cmd(sc, &hcmd);
8339 if (err)
8340 return err;
8341
8342 pkt = hcmd.resp_pkt;
8343 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8344 err = EIO;
8345 goto out;
8346 }
8347
8348 resp_len = iwx_rx_packet_payload_len(pkt);
8349 if (resp_len < sizeof(*resp)) {
8350 err = EIO;
8351 goto out;
8352 }
8353
8354 resp = (void *)pkt->data;
8355 if (resp_len != sizeof(*resp) +
8356 resp->n_channels * sizeof(resp->channels[0])) {
8357 err = EIO;
8358 goto out;
8359 }
8360
8361 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8362 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8363
8364 out:
8365 iwx_free_resp(sc, &hcmd);
8366
8367 return err;
8368 }
8369
8370 static int
iwx_send_temp_report_ths_cmd(struct iwx_softc * sc)8371 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8372 {
8373 struct iwx_temp_report_ths_cmd cmd;
8374 int err;
8375
8376 /*
8377 * In order to give responsibility for critical-temperature-kill
8378 * and TX backoff to FW we need to send an empty temperature
8379 * reporting command at init time.
8380 */
8381 memset(&cmd, 0, sizeof(cmd));
8382
8383 err = iwx_send_cmd_pdu(sc,
8384 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8385 0, sizeof(cmd), &cmd);
8386 if (err)
8387 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8388 DEVNAME(sc), err);
8389
8390 return err;
8391 }
8392
8393 static int
iwx_init_hw(struct iwx_softc * sc)8394 iwx_init_hw(struct iwx_softc *sc)
8395 {
8396 struct ieee80211com *ic = &sc->sc_ic;
8397 int err = 0, i;
8398
8399 err = iwx_run_init_mvm_ucode(sc, 0);
8400 if (err)
8401 return err;
8402
8403 if (!iwx_nic_lock(sc))
8404 return EBUSY;
8405
8406 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8407 if (err) {
8408 printf("%s: could not init tx ant config (error %d)\n",
8409 DEVNAME(sc), err);
8410 goto err;
8411 }
8412
8413 if (sc->sc_tx_with_siso_diversity) {
8414 err = iwx_send_phy_cfg_cmd(sc);
8415 if (err) {
8416 printf("%s: could not send phy config (error %d)\n",
8417 DEVNAME(sc), err);
8418 goto err;
8419 }
8420 }
8421
8422 err = iwx_send_bt_init_conf(sc);
8423 if (err) {
8424 printf("%s: could not init bt coex (error %d)\n",
8425 DEVNAME(sc), err);
8426 return err;
8427 }
8428
8429 err = iwx_send_soc_conf(sc);
8430 if (err) {
8431 printf("%s: iwx_send_soc_conf failed\n", __func__);
8432 return err;
8433 }
8434
8435 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8436 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8437 err = iwx_send_dqa_cmd(sc);
8438 if (err) {
8439 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8440 "failed (error %d)\n", __func__, err);
8441 return err;
8442 }
8443 }
8444 // TODO phyctxt
8445 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8446 /*
8447 * The channel used here isn't relevant as it's
8448 * going to be overwritten in the other flows.
8449 * For now use the first channel we have.
8450 */
8451 sc->sc_phyctxt[i].id = i;
8452 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8453 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8454 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8455 if (err) {
8456 printf("%s: could not add phy context %d (error %d)\n",
8457 DEVNAME(sc), i, err);
8458 goto err;
8459 }
8460 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8461 IWX_RLC_CONFIG_CMD) == 2) {
8462 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8463 if (err) {
8464 printf("%s: could not configure RLC for PHY "
8465 "%d (error %d)\n", DEVNAME(sc), i, err);
8466 goto err;
8467 }
8468 }
8469 }
8470
8471 err = iwx_config_ltr(sc);
8472 if (err) {
8473 printf("%s: PCIe LTR configuration failed (error %d)\n",
8474 DEVNAME(sc), err);
8475 }
8476
8477 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8478 err = iwx_send_temp_report_ths_cmd(sc);
8479 if (err) {
8480 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8481 __func__);
8482 goto err;
8483 }
8484 }
8485
8486 err = iwx_power_update_device(sc);
8487 if (err) {
8488 printf("%s: could not send power command (error %d)\n",
8489 DEVNAME(sc), err);
8490 goto err;
8491 }
8492
8493 if (sc->sc_nvm.lar_enabled) {
8494 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8495 if (err) {
8496 printf("%s: could not init LAR (error %d)\n",
8497 DEVNAME(sc), err);
8498 goto err;
8499 }
8500 }
8501
8502 err = iwx_config_umac_scan_reduced(sc);
8503 if (err) {
8504 printf("%s: could not configure scan (error %d)\n",
8505 DEVNAME(sc), err);
8506 goto err;
8507 }
8508
8509 err = iwx_disable_beacon_filter(sc);
8510 if (err) {
8511 printf("%s: could not disable beacon filter (error %d)\n",
8512 DEVNAME(sc), err);
8513 goto err;
8514 }
8515
8516 err:
8517 iwx_nic_unlock(sc);
8518 return err;
8519 }
8520
8521 /* Allow multicast from our BSSID. */
8522 static int
iwx_allow_mcast(struct iwx_softc * sc)8523 iwx_allow_mcast(struct iwx_softc *sc)
8524 {
8525 struct ieee80211com *ic = &sc->sc_ic;
8526 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8527 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8528 struct iwx_mcast_filter_cmd *cmd;
8529 size_t size;
8530 int err;
8531
8532 size = roundup(sizeof(*cmd), 4);
8533 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8534 if (cmd == NULL)
8535 return ENOMEM;
8536 cmd->filter_own = 1;
8537 cmd->port_id = 0;
8538 cmd->count = 0;
8539 cmd->pass_all = 1;
8540 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8541
8542 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8543 0, size, cmd);
8544 free(cmd, M_DEVBUF);
8545 return err;
8546 }
8547
8548 static int
iwx_init(struct iwx_softc * sc)8549 iwx_init(struct iwx_softc *sc)
8550 {
8551 int err, generation;
8552 generation = ++sc->sc_generation;
8553 iwx_preinit(sc);
8554
8555 err = iwx_start_hw(sc);
8556 if (err) {
8557 printf("%s: iwx_start_hw failed\n", __func__);
8558 return err;
8559 }
8560
8561 err = iwx_init_hw(sc);
8562 if (err) {
8563 if (generation == sc->sc_generation)
8564 iwx_stop_device(sc);
8565 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8566 return err;
8567 }
8568
8569 sc->sc_flags |= IWX_FLAG_HW_INITED;
8570 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8571
8572 return 0;
8573 }
8574
8575 static void
iwx_start(struct iwx_softc * sc)8576 iwx_start(struct iwx_softc *sc)
8577 {
8578 struct ieee80211_node *ni;
8579 struct mbuf *m;
8580
8581 IWX_ASSERT_LOCKED(sc);
8582
8583 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8584 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8585 if (iwx_tx(sc, m, ni) != 0) {
8586 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8587 continue;
8588 }
8589 }
8590 }
8591
8592 static void
iwx_stop(struct iwx_softc * sc)8593 iwx_stop(struct iwx_softc *sc)
8594 {
8595 struct ieee80211com *ic = &sc->sc_ic;
8596 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8597 struct iwx_vap *ivp = IWX_VAP(vap);
8598
8599 iwx_stop_device(sc);
8600
8601 /* Reset soft state. */
8602 sc->sc_generation++;
8603 ivp->phy_ctxt = NULL;
8604
8605 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8606 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8607 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8608 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8609 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8610 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8611 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8612 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8613
8614 sc->sc_rx_ba_sessions = 0;
8615 sc->ba_rx.start_tidmask = 0;
8616 sc->ba_rx.stop_tidmask = 0;
8617 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8618 sc->ba_tx.start_tidmask = 0;
8619 sc->ba_tx.stop_tidmask = 0;
8620 }
8621
8622 static void
iwx_watchdog(void * arg)8623 iwx_watchdog(void *arg)
8624 {
8625 struct iwx_softc *sc = arg;
8626 struct ieee80211com *ic = &sc->sc_ic;
8627 int i;
8628
8629 /*
8630 * We maintain a separate timer for each Tx queue because
8631 * Tx aggregation queues can get "stuck" while other queues
8632 * keep working. The Linux driver uses a similar workaround.
8633 */
8634 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8635 if (sc->sc_tx_timer[i] > 0) {
8636 if (--sc->sc_tx_timer[i] == 0) {
8637 printf("%s: device timeout\n", DEVNAME(sc));
8638
8639 iwx_nic_error(sc);
8640 iwx_dump_driver_status(sc);
8641 ieee80211_restart_all(ic);
8642 return;
8643 }
8644 }
8645 }
8646 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8647 }
8648
8649 /*
8650 * Note: This structure is read from the device with IO accesses,
8651 * and the reading already does the endian conversion. As it is
8652 * read with uint32_t-sized accesses, any members with a different size
8653 * need to be ordered correctly though!
8654 */
8655 struct iwx_error_event_table {
8656 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8657 uint32_t error_id; /* type of error */
8658 uint32_t trm_hw_status0; /* TRM HW status */
8659 uint32_t trm_hw_status1; /* TRM HW status */
8660 uint32_t blink2; /* branch link */
8661 uint32_t ilink1; /* interrupt link */
8662 uint32_t ilink2; /* interrupt link */
8663 uint32_t data1; /* error-specific data */
8664 uint32_t data2; /* error-specific data */
8665 uint32_t data3; /* error-specific data */
8666 uint32_t bcon_time; /* beacon timer */
8667 uint32_t tsf_low; /* network timestamp function timer */
8668 uint32_t tsf_hi; /* network timestamp function timer */
8669 uint32_t gp1; /* GP1 timer register */
8670 uint32_t gp2; /* GP2 timer register */
8671 uint32_t fw_rev_type; /* firmware revision type */
8672 uint32_t major; /* uCode version major */
8673 uint32_t minor; /* uCode version minor */
8674 uint32_t hw_ver; /* HW Silicon version */
8675 uint32_t brd_ver; /* HW board version */
8676 uint32_t log_pc; /* log program counter */
8677 uint32_t frame_ptr; /* frame pointer */
8678 uint32_t stack_ptr; /* stack pointer */
8679 uint32_t hcmd; /* last host command header */
8680 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8681 * rxtx_flag */
8682 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8683 * host_flag */
8684 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8685 * enc_flag */
8686 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8687 * time_flag */
8688 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8689 * wico interrupt */
8690 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8691 uint32_t wait_event; /* wait event() caller address */
8692 uint32_t l2p_control; /* L2pControlField */
8693 uint32_t l2p_duration; /* L2pDurationField */
8694 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8695 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8696 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8697 * (LMPM_PMG_SEL) */
8698 uint32_t u_timestamp; /* indicate when the date and time of the
8699 * compilation */
8700 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8701 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8702
8703 /*
8704 * UMAC error struct - relevant starting from family 8000 chip.
8705 * Note: This structure is read from the device with IO accesses,
8706 * and the reading already does the endian conversion. As it is
8707 * read with u32-sized accesses, any members with a different size
8708 * need to be ordered correctly though!
8709 */
8710 struct iwx_umac_error_event_table {
8711 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8712 uint32_t error_id; /* type of error */
8713 uint32_t blink1; /* branch link */
8714 uint32_t blink2; /* branch link */
8715 uint32_t ilink1; /* interrupt link */
8716 uint32_t ilink2; /* interrupt link */
8717 uint32_t data1; /* error-specific data */
8718 uint32_t data2; /* error-specific data */
8719 uint32_t data3; /* error-specific data */
8720 uint32_t umac_major;
8721 uint32_t umac_minor;
8722 uint32_t frame_pointer; /* core register 27*/
8723 uint32_t stack_pointer; /* core register 28 */
8724 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8725 uint32_t nic_isr_pref; /* ISR status register */
8726 } __packed;
8727
8728 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8729 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8730
8731 static void
iwx_nic_umac_error(struct iwx_softc * sc)8732 iwx_nic_umac_error(struct iwx_softc *sc)
8733 {
8734 struct iwx_umac_error_event_table table;
8735 uint32_t base;
8736
8737 base = sc->sc_uc.uc_umac_error_event_table;
8738
8739 if (base < 0x400000) {
8740 printf("%s: Invalid error log pointer 0x%08x\n",
8741 DEVNAME(sc), base);
8742 return;
8743 }
8744
8745 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8746 printf("%s: reading errlog failed\n", DEVNAME(sc));
8747 return;
8748 }
8749
8750 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8751 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8752 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8753 sc->sc_flags, table.valid);
8754 }
8755
8756 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8757 iwx_desc_lookup(table.error_id));
8758 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8759 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8760 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8761 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8762 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8763 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8764 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8765 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8766 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8767 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8768 table.frame_pointer);
8769 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8770 table.stack_pointer);
8771 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8772 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8773 table.nic_isr_pref);
8774 }
8775
8776 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8777 static struct {
8778 const char *name;
8779 uint8_t num;
8780 } advanced_lookup[] = {
8781 { "NMI_INTERRUPT_WDG", 0x34 },
8782 { "SYSASSERT", 0x35 },
8783 { "UCODE_VERSION_MISMATCH", 0x37 },
8784 { "BAD_COMMAND", 0x38 },
8785 { "BAD_COMMAND", 0x39 },
8786 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8787 { "FATAL_ERROR", 0x3D },
8788 { "NMI_TRM_HW_ERR", 0x46 },
8789 { "NMI_INTERRUPT_TRM", 0x4C },
8790 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8791 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8792 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8793 { "NMI_INTERRUPT_HOST", 0x66 },
8794 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8795 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8796 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8797 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8798 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8799 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8800 { "ADVANCED_SYSASSERT", 0 },
8801 };
8802
8803 static const char *
iwx_desc_lookup(uint32_t num)8804 iwx_desc_lookup(uint32_t num)
8805 {
8806 int i;
8807
8808 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8809 if (advanced_lookup[i].num ==
8810 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8811 return advanced_lookup[i].name;
8812
8813 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8814 return advanced_lookup[i].name;
8815 }
8816
8817 /*
8818 * Support for dumping the error log seemed like a good idea ...
8819 * but it's mostly hex junk and the only sensible thing is the
8820 * hw/ucode revision (which we know anyway). Since it's here,
8821 * I'll just leave it in, just in case e.g. the Intel guys want to
8822 * help us decipher some "ADVANCED_SYSASSERT" later.
8823 */
8824 static void
iwx_nic_error(struct iwx_softc * sc)8825 iwx_nic_error(struct iwx_softc *sc)
8826 {
8827 struct iwx_error_event_table table;
8828 uint32_t base;
8829
8830 printf("%s: dumping device error log\n", DEVNAME(sc));
8831 printf("%s: GOS-3758: 1\n", __func__);
8832 base = sc->sc_uc.uc_lmac_error_event_table[0];
8833 printf("%s: GOS-3758: 2\n", __func__);
8834 if (base < 0x400000) {
8835 printf("%s: Invalid error log pointer 0x%08x\n",
8836 DEVNAME(sc), base);
8837 return;
8838 }
8839
8840 printf("%s: GOS-3758: 3\n", __func__);
8841 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8842 printf("%s: reading errlog failed\n", DEVNAME(sc));
8843 return;
8844 }
8845
8846 printf("%s: GOS-3758: 4\n", __func__);
8847 if (!table.valid) {
8848 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8849 return;
8850 }
8851
8852 printf("%s: GOS-3758: 5\n", __func__);
8853 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8854 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8855 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8856 sc->sc_flags, table.valid);
8857 }
8858
8859 printf("%s: GOS-3758: 6\n", __func__);
8860 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8861 iwx_desc_lookup(table.error_id));
8862 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8863 table.trm_hw_status0);
8864 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8865 table.trm_hw_status1);
8866 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8867 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8868 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8869 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8870 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8871 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8872 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8873 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8874 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8875 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8876 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8877 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8878 table.fw_rev_type);
8879 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8880 table.major);
8881 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8882 table.minor);
8883 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8884 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8885 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8886 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8887 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8888 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8889 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8890 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8891 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8892 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8893 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8894 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8895 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8896 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8897 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8898 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8899 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8900
8901 if (sc->sc_uc.uc_umac_error_event_table)
8902 iwx_nic_umac_error(sc);
8903 }
8904
8905 static void
iwx_dump_driver_status(struct iwx_softc * sc)8906 iwx_dump_driver_status(struct iwx_softc *sc)
8907 {
8908 struct ieee80211com *ic = &sc->sc_ic;
8909 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8910 enum ieee80211_state state = vap->iv_state;
8911 int i;
8912
8913 printf("driver status:\n");
8914 for (i = 0; i < nitems(sc->txq); i++) {
8915 struct iwx_tx_ring *ring = &sc->txq[i];
8916 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8917 "cur_hw=%-3d queued=%-3d\n",
8918 i, ring->qid, ring->cur, ring->cur_hw,
8919 ring->queued);
8920 }
8921 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8922 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8923 }
8924
8925 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8926 do { \
8927 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8928 _var_ = (void *)((_pkt_)+1); \
8929 } while (/*CONSTCOND*/0)
8930
8931 static int
iwx_rx_pkt_valid(struct iwx_rx_packet * pkt)8932 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8933 {
8934 int qid, idx, code;
8935
8936 qid = pkt->hdr.qid & ~0x80;
8937 idx = pkt->hdr.idx;
8938 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8939
8940 return (!(qid == 0 && idx == 0 && code == 0) &&
8941 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8942 }
8943
8944 static void
iwx_rx_pkt(struct iwx_softc * sc,struct iwx_rx_data * data,struct mbuf * ml)8945 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8946 {
8947 struct ieee80211com *ic = &sc->sc_ic;
8948 struct iwx_rx_packet *pkt, *nextpkt;
8949 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8950 struct mbuf *m0, *m;
8951 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8952 int qid, idx, code, handled = 1;
8953
8954 m0 = data->m;
8955 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8956 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8957 qid = pkt->hdr.qid;
8958 idx = pkt->hdr.idx;
8959 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8960
8961 if (!iwx_rx_pkt_valid(pkt))
8962 break;
8963
8964 /*
8965 * XXX Intel inside (tm)
8966 * Any commands in the LONG_GROUP could actually be in the
8967 * LEGACY group. Firmware API versions >= 50 reject commands
8968 * in group 0, forcing us to use this hack.
8969 */
8970 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8971 struct iwx_tx_ring *ring = &sc->txq[qid];
8972 struct iwx_tx_data *txdata = &ring->data[idx];
8973 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8974 code = iwx_cmd_opcode(code);
8975 }
8976
8977 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8978 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8979 break;
8980
8981 // TODO ???
8982 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8983 /* Take mbuf m0 off the RX ring. */
8984 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8985 break;
8986 }
8987 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8988 }
8989
8990 switch (code) {
8991 case IWX_REPLY_RX_PHY_CMD:
8992 /* XXX-THJ: I've not managed to hit this path in testing */
8993 iwx_rx_rx_phy_cmd(sc, pkt, data);
8994 break;
8995
8996 case IWX_REPLY_RX_MPDU_CMD: {
8997 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8998 nextoff = offset +
8999 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9000 nextpkt = (struct iwx_rx_packet *)
9001 (m0->m_data + nextoff);
9002 /* AX210 devices ship only one packet per Rx buffer. */
9003 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9004 nextoff + minsz >= IWX_RBUF_SIZE ||
9005 !iwx_rx_pkt_valid(nextpkt)) {
9006 /* No need to copy last frame in buffer. */
9007 if (offset > 0)
9008 m_adj(m0, offset);
9009 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
9010 m0 = NULL; /* stack owns m0 now; abort loop */
9011 } else {
9012 /*
9013 * Create an mbuf which points to the current
9014 * packet. Always copy from offset zero to
9015 * preserve m_pkthdr.
9016 */
9017 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
9018 if (m == NULL) {
9019 m_freem(m0);
9020 m0 = NULL;
9021 break;
9022 }
9023 m_adj(m, offset);
9024 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
9025 }
9026 break;
9027 }
9028
9029 // case IWX_BAR_FRAME_RELEASE:
9030 // iwx_rx_bar_frame_release(sc, pkt, ml);
9031 // break;
9032 //
9033 case IWX_TX_CMD:
9034 iwx_rx_tx_cmd(sc, pkt, data);
9035 break;
9036
9037 case IWX_BA_NOTIF:
9038 iwx_rx_compressed_ba(sc, pkt);
9039 break;
9040
9041 case IWX_MISSED_BEACONS_NOTIFICATION:
9042 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
9043 "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
9044 __func__);
9045 iwx_rx_bmiss(sc, pkt, data);
9046 break;
9047
9048 case IWX_MFUART_LOAD_NOTIFICATION:
9049 break;
9050
9051 case IWX_ALIVE: {
9052 struct iwx_alive_resp_v4 *resp4;
9053 struct iwx_alive_resp_v5 *resp5;
9054 struct iwx_alive_resp_v6 *resp6;
9055
9056 DPRINTF(("%s: firmware alive\n", __func__));
9057 sc->sc_uc.uc_ok = 0;
9058
9059 /*
9060 * For v5 and above, we can check the version, for older
9061 * versions we need to check the size.
9062 */
9063 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9064 IWX_ALIVE) == 6) {
9065 SYNC_RESP_STRUCT(resp6, pkt);
9066 if (iwx_rx_packet_payload_len(pkt) !=
9067 sizeof(*resp6)) {
9068 sc->sc_uc.uc_intr = 1;
9069 wakeup(&sc->sc_uc);
9070 break;
9071 }
9072 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9073 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9074 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9075 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9076 sc->sc_uc.uc_log_event_table = le32toh(
9077 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9078 sc->sc_uc.uc_umac_error_event_table = le32toh(
9079 resp6->umac_data.dbg_ptrs.error_info_addr);
9080 sc->sc_sku_id[0] =
9081 le32toh(resp6->sku_id.data[0]);
9082 sc->sc_sku_id[1] =
9083 le32toh(resp6->sku_id.data[1]);
9084 sc->sc_sku_id[2] =
9085 le32toh(resp6->sku_id.data[2]);
9086 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9087 sc->sc_uc.uc_ok = 1;
9088 }
9089 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9090 IWX_ALIVE) == 5) {
9091 SYNC_RESP_STRUCT(resp5, pkt);
9092 if (iwx_rx_packet_payload_len(pkt) !=
9093 sizeof(*resp5)) {
9094 sc->sc_uc.uc_intr = 1;
9095 wakeup(&sc->sc_uc);
9096 break;
9097 }
9098 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9099 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9100 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9101 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9102 sc->sc_uc.uc_log_event_table = le32toh(
9103 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9104 sc->sc_uc.uc_umac_error_event_table = le32toh(
9105 resp5->umac_data.dbg_ptrs.error_info_addr);
9106 sc->sc_sku_id[0] =
9107 le32toh(resp5->sku_id.data[0]);
9108 sc->sc_sku_id[1] =
9109 le32toh(resp5->sku_id.data[1]);
9110 sc->sc_sku_id[2] =
9111 le32toh(resp5->sku_id.data[2]);
9112 if (resp5->status == IWX_ALIVE_STATUS_OK)
9113 sc->sc_uc.uc_ok = 1;
9114 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9115 SYNC_RESP_STRUCT(resp4, pkt);
9116 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9117 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9118 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9119 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9120 sc->sc_uc.uc_log_event_table = le32toh(
9121 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9122 sc->sc_uc.uc_umac_error_event_table = le32toh(
9123 resp4->umac_data.dbg_ptrs.error_info_addr);
9124 if (resp4->status == IWX_ALIVE_STATUS_OK)
9125 sc->sc_uc.uc_ok = 1;
9126 } else
9127 printf("unknown payload version");
9128
9129 sc->sc_uc.uc_intr = 1;
9130 wakeup(&sc->sc_uc);
9131 break;
9132 }
9133
9134 case IWX_STATISTICS_NOTIFICATION: {
9135 struct iwx_notif_statistics *stats;
9136 SYNC_RESP_STRUCT(stats, pkt);
9137 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9138 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9139 break;
9140 }
9141
9142 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9143 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9144 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9145 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9146 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9147 break;
9148
9149 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9150 IWX_CT_KILL_NOTIFICATION): {
9151 struct iwx_ct_kill_notif *notif;
9152 SYNC_RESP_STRUCT(notif, pkt);
9153 printf("%s: device at critical temperature (%u degC), "
9154 "stopping device\n",
9155 DEVNAME(sc), le16toh(notif->temperature));
9156 sc->sc_flags |= IWX_FLAG_HW_ERR;
9157 ieee80211_restart_all(ic);
9158 break;
9159 }
9160
9161 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9162 IWX_SCD_QUEUE_CONFIG_CMD):
9163 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9164 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9165 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9166 IWX_SESSION_PROTECTION_CMD):
9167 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9168 IWX_NVM_GET_INFO):
9169 case IWX_ADD_STA_KEY:
9170 case IWX_PHY_CONFIGURATION_CMD:
9171 case IWX_TX_ANT_CONFIGURATION_CMD:
9172 case IWX_ADD_STA:
9173 case IWX_MAC_CONTEXT_CMD:
9174 case IWX_REPLY_SF_CFG_CMD:
9175 case IWX_POWER_TABLE_CMD:
9176 case IWX_LTR_CONFIG:
9177 case IWX_PHY_CONTEXT_CMD:
9178 case IWX_BINDING_CONTEXT_CMD:
9179 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9180 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9181 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9182 case IWX_REPLY_BEACON_FILTERING_CMD:
9183 case IWX_MAC_PM_POWER_TABLE:
9184 case IWX_TIME_QUOTA_CMD:
9185 case IWX_REMOVE_STA:
9186 case IWX_TXPATH_FLUSH:
9187 case IWX_BT_CONFIG:
9188 case IWX_MCC_UPDATE_CMD:
9189 case IWX_TIME_EVENT_CMD:
9190 case IWX_STATISTICS_CMD:
9191 case IWX_SCD_QUEUE_CFG: {
9192 size_t pkt_len;
9193
9194 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9195 break;
9196
9197 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9198 BUS_DMASYNC_POSTREAD);
9199
9200 pkt_len = sizeof(pkt->len_n_flags) +
9201 iwx_rx_packet_len(pkt);
9202
9203 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9204 pkt_len < sizeof(*pkt) ||
9205 pkt_len > sc->sc_cmd_resp_len[idx]) {
9206 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9207 sc->sc_cmd_resp_pkt[idx] = NULL;
9208 break;
9209 }
9210
9211 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9212 BUS_DMASYNC_POSTREAD);
9213 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9214 break;
9215 }
9216
9217 case IWX_INIT_COMPLETE_NOTIF:
9218 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9219 wakeup(&sc->sc_init_complete);
9220 break;
9221
9222 case IWX_SCAN_COMPLETE_UMAC: {
9223 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9224 struct iwx_umac_scan_complete *notif __attribute__((unused));
9225 SYNC_RESP_STRUCT(notif, pkt);
9226 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9227 notif->status));
9228 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9229 iwx_endscan(sc);
9230 break;
9231 }
9232
9233 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9234 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9235 __func__));
9236 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9237 SYNC_RESP_STRUCT(notif, pkt);
9238 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9239 notif->status));
9240 iwx_endscan(sc);
9241 break;
9242 }
9243
9244 case IWX_MCC_CHUB_UPDATE_CMD: {
9245 struct iwx_mcc_chub_notif *notif;
9246 SYNC_RESP_STRUCT(notif, pkt);
9247 iwx_mcc_update(sc, notif);
9248 break;
9249 }
9250
9251 case IWX_REPLY_ERROR: {
9252 struct iwx_error_resp *resp;
9253 SYNC_RESP_STRUCT(resp, pkt);
9254 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9255 DEVNAME(sc), le32toh(resp->error_type),
9256 resp->cmd_id);
9257 break;
9258 }
9259
9260 case IWX_TIME_EVENT_NOTIFICATION: {
9261 struct iwx_time_event_notif *notif;
9262 uint32_t action;
9263 SYNC_RESP_STRUCT(notif, pkt);
9264
9265 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9266 break;
9267 action = le32toh(notif->action);
9268 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9269 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9270 break;
9271 }
9272
9273 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9274 IWX_SESSION_PROTECTION_NOTIF): {
9275 struct iwx_session_prot_notif *notif;
9276 uint32_t status, start, conf_id;
9277
9278 SYNC_RESP_STRUCT(notif, pkt);
9279
9280 status = le32toh(notif->status);
9281 start = le32toh(notif->start);
9282 conf_id = le32toh(notif->conf_id);
9283 /* Check for end of successful PROTECT_CONF_ASSOC. */
9284 if (status == 1 && start == 0 &&
9285 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9286 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9287 break;
9288 }
9289
9290 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9291 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9292 break;
9293
9294 /*
9295 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9296 * messages. Just ignore them for now.
9297 */
9298 case IWX_DEBUG_LOG_MSG:
9299 break;
9300
9301 case IWX_MCAST_FILTER_CMD:
9302 break;
9303
9304 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9305 break;
9306
9307 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9308 break;
9309
9310 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9311 break;
9312
9313 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9314 IWX_NVM_ACCESS_COMPLETE):
9315 break;
9316
9317 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9318 break; /* happens in monitor mode; ignore for now */
9319
9320 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9321 break;
9322
9323 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9324 IWX_TLC_MNG_UPDATE_NOTIF): {
9325 struct iwx_tlc_update_notif *notif;
9326 SYNC_RESP_STRUCT(notif, pkt);
9327 (void)notif;
9328 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9329 iwx_rs_update(sc, notif);
9330 break;
9331 }
9332
9333 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9334 break;
9335
9336 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9337 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9338 break;
9339
9340 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9341 IWX_PNVM_INIT_COMPLETE):
9342 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9343 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9344 wakeup(&sc->sc_init_complete);
9345 break;
9346
9347 default:
9348 handled = 0;
9349 /* XXX wulf: Get rid of bluetooth-related spam */
9350 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9351 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9352 break;
9353 printf("%s: unhandled firmware response 0x%x/0x%x "
9354 "rx ring %d[%d]\n",
9355 DEVNAME(sc), code, pkt->len_n_flags,
9356 (qid & ~0x80), idx);
9357 break;
9358 }
9359
9360 /*
9361 * uCode sets bit 0x80 when it originates the notification,
9362 * i.e. when the notification is not a direct response to a
9363 * command sent by the driver.
9364 * For example, uCode issues IWX_REPLY_RX when it sends a
9365 * received frame to the driver.
9366 */
9367 if (handled && !(qid & (1 << 7))) {
9368 iwx_cmd_done(sc, qid, idx, code);
9369 }
9370
9371 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9372
9373 /* AX210 devices ship only one packet per Rx buffer. */
9374 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9375 break;
9376 }
9377
9378 if (m0 && m0 != data->m)
9379 m_freem(m0);
9380 }
9381
9382 static void
iwx_notif_intr(struct iwx_softc * sc)9383 iwx_notif_intr(struct iwx_softc *sc)
9384 {
9385 struct mbuf m;
9386 uint16_t hw;
9387
9388 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9389 BUS_DMASYNC_POSTREAD);
9390
9391 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9392 uint16_t *status = sc->rxq.stat_dma.vaddr;
9393 hw = le16toh(*status) & 0xfff;
9394 } else
9395 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9396 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9397 while (sc->rxq.cur != hw) {
9398 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9399
9400 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9401 BUS_DMASYNC_POSTREAD);
9402
9403 iwx_rx_pkt(sc, data, &m);
9404 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9405 }
9406
9407 /*
9408 * Tell the firmware what we have processed.
9409 * Seems like the hardware gets upset unless we align the write by 8??
9410 */
9411 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9412 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9413 }
9414
9415 #if 0
9416 int
9417 iwx_intr(void *arg)
9418 {
9419 struct iwx_softc *sc = arg;
9420 struct ieee80211com *ic = &sc->sc_ic;
9421 struct ifnet *ifp = IC2IFP(ic);
9422 int r1, r2, rv = 0;
9423
9424 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9425
9426 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9427 uint32_t *ict = sc->ict_dma.vaddr;
9428 int tmp;
9429
9430 tmp = htole32(ict[sc->ict_cur]);
9431 if (!tmp)
9432 goto out_ena;
9433
9434 /*
9435 * ok, there was something. keep plowing until we have all.
9436 */
9437 r1 = r2 = 0;
9438 while (tmp) {
9439 r1 |= tmp;
9440 ict[sc->ict_cur] = 0;
9441 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9442 tmp = htole32(ict[sc->ict_cur]);
9443 }
9444
9445 /* this is where the fun begins. don't ask */
9446 if (r1 == 0xffffffff)
9447 r1 = 0;
9448
9449 /* i am not expected to understand this */
9450 if (r1 & 0xc0000)
9451 r1 |= 0x8000;
9452 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9453 } else {
9454 r1 = IWX_READ(sc, IWX_CSR_INT);
9455 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9456 goto out;
9457 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9458 }
9459 if (r1 == 0 && r2 == 0) {
9460 goto out_ena;
9461 }
9462
9463 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9464
9465 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9466 #if 0
9467 int i;
9468 /* Firmware has now configured the RFH. */
9469 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9470 iwx_update_rx_desc(sc, &sc->rxq, i);
9471 #endif
9472 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9473 }
9474
9475
9476 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9477 iwx_check_rfkill(sc);
9478 rv = 1;
9479 goto out_ena;
9480 }
9481
9482 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9483 if (ifp->if_flags & IFF_DEBUG) {
9484 iwx_nic_error(sc);
9485 iwx_dump_driver_status(sc);
9486 }
9487 printf("%s: fatal firmware error\n", DEVNAME(sc));
9488 ieee80211_restart_all(ic);
9489 rv = 1;
9490 goto out;
9491
9492 }
9493
9494 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9495 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9496 iwx_stop(sc);
9497 rv = 1;
9498 goto out;
9499 }
9500
9501 /* firmware chunk loaded */
9502 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9503 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9504
9505 sc->sc_fw_chunk_done = 1;
9506 wakeup(&sc->sc_fw);
9507 }
9508
9509 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9510 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9511 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9512 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9513 }
9514 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9515 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9516 }
9517
9518 /* Disable periodic interrupt; we use it as just a one-shot. */
9519 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9520
9521 /*
9522 * Enable periodic interrupt in 8 msec only if we received
9523 * real RX interrupt (instead of just periodic int), to catch
9524 * any dangling Rx interrupt. If it was just the periodic
9525 * interrupt, there was no dangling Rx activity, and no need
9526 * to extend the periodic interrupt; one-shot is enough.
9527 */
9528 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9529 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9530 IWX_CSR_INT_PERIODIC_ENA);
9531
9532 iwx_notif_intr(sc);
9533 }
9534
9535 rv = 1;
9536
9537 out_ena:
9538 iwx_restore_interrupts(sc);
9539 out:
9540 return rv;
9541 }
9542 #endif
9543
9544 static void
iwx_intr_msix(void * arg)9545 iwx_intr_msix(void *arg)
9546 {
9547 struct iwx_softc *sc = arg;
9548 struct ieee80211com *ic = &sc->sc_ic;
9549 uint32_t inta_fh, inta_hw;
9550 int vector = 0;
9551
9552 IWX_LOCK(sc);
9553
9554 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9555 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9556 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9557 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9558 inta_fh &= sc->sc_fh_mask;
9559 inta_hw &= sc->sc_hw_mask;
9560
9561 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9562 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9563 iwx_notif_intr(sc);
9564 }
9565
9566 /* firmware chunk loaded */
9567 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9568 sc->sc_fw_chunk_done = 1;
9569 wakeup(&sc->sc_fw);
9570 }
9571
9572 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9573 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9574 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9575 if (sc->sc_debug) {
9576 iwx_nic_error(sc);
9577 iwx_dump_driver_status(sc);
9578 }
9579 printf("%s: fatal firmware error\n", DEVNAME(sc));
9580 ieee80211_restart_all(ic);
9581 goto out;
9582 }
9583
9584 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9585 iwx_check_rfkill(sc);
9586 }
9587
9588 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9589 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9590 sc->sc_flags |= IWX_FLAG_HW_ERR;
9591 iwx_stop(sc);
9592 goto out;
9593 }
9594
9595 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9596 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9597 "%s:%d WARNING: Skipping rx desc update\n",
9598 __func__, __LINE__);
9599 #if 0
9600 /*
9601 * XXX-THJ: we don't have the dma segment handy. This is hacked
9602 * out in the fc release, return to it if we ever get this
9603 * warning.
9604 */
9605 /* Firmware has now configured the RFH. */
9606 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9607 iwx_update_rx_desc(sc, &sc->rxq, i);
9608 #endif
9609 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9610 }
9611
9612 /*
9613 * Before sending the interrupt the HW disables it to prevent
9614 * a nested interrupt. This is done by writing 1 to the corresponding
9615 * bit in the mask register. After handling the interrupt, it should be
9616 * re-enabled by clearing this bit. This register is defined as
9617 * write 1 clear (W1C) register, meaning that it's being clear
9618 * by writing 1 to the bit.
9619 */
9620 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9621 out:
9622 IWX_UNLOCK(sc);
9623 return;
9624 }
9625
9626 /*
9627 * The device info table below contains device-specific config overrides.
9628 * The most important parameter derived from this table is the name of the
9629 * firmware image to load.
9630 *
9631 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9632 * The "old" table matches devices based on PCI vendor/product IDs only.
9633 * The "new" table extends this with various device parameters derived
9634 * from MAC type, and RF type.
9635 *
9636 * In iwlwifi "old" and "new" tables share the same array, where "old"
9637 * entries contain dummy values for data defined only for "new" entries.
9638 * As of 2022, Linux developers are still in the process of moving entries
9639 * from "old" to "new" style and it looks like this effort has stalled in
9640 * in some work-in-progress state for quite a while. Linux commits moving
9641 * entries from "old" to "new" have at times been reverted due to regressions.
9642 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9643 * devices in the same driver.
9644 *
9645 * Our table below contains mostly "new" entries declared in iwlwifi
9646 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9647 * Other devices are matched based on PCI vendor/product ID as usual,
9648 * unless matching specific PCI subsystem vendor/product IDs is required.
9649 *
9650 * Some "old"-style entries are required to identify the firmware image to use.
9651 * Others might be used to print a specific marketing name into Linux dmesg,
9652 * but we can't be sure whether the corresponding devices would be matched
9653 * correctly in the absence of their entries. So we include them just in case.
9654 */
9655
9656 struct iwx_dev_info {
9657 uint16_t device;
9658 uint16_t subdevice;
9659 uint16_t mac_type;
9660 uint16_t rf_type;
9661 uint8_t mac_step;
9662 uint8_t rf_id;
9663 uint8_t no_160;
9664 uint8_t cores;
9665 uint8_t cdb;
9666 uint8_t jacket;
9667 const struct iwx_device_cfg *cfg;
9668 };
9669
9670 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9671 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9672 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9673 .mac_type = _mac_type, .rf_type = _rf_type, \
9674 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9675 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9676
9677 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9678 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9679 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9680 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9681
9682 /*
9683 * When adding entries to this table keep in mind that entries must
9684 * be listed in the same order as in the Linux driver. Code walks this
9685 * table backwards and uses the first matching entry it finds.
9686 * Device firmware must be available in fw_update(8).
9687 */
9688 static const struct iwx_dev_info iwx_dev_info_table[] = {
9689 /* So with HR */
9690 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9691 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9692 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9693 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9694 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9695 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9696 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9697 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9698 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9699 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9700 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9701 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9702 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9703 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9704 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9705 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9706 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9707 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9708 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9709 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9710 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9711 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9712 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9713 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9714 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9715 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9716 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9717 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9718 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9719 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9720 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9721 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9722 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9723 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9724 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9725 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9726
9727 /* So with GF2 */
9728 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9729 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9730 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9731 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9732 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9733 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9734 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9735 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9736 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9737 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9738 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9739 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9740
9741 /* Qu with Jf, C step */
9742 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9743 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9744 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9745 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9746 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9747 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9748 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9749 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9750 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9751 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9752 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9753 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9754 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9755 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9756 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9757 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9758 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9759 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9760 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9761 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9762 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9763 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9764 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9765 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9766 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9767 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9768 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9769 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9770 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9771 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9772 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9773 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9774 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9775 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9776 IWX_CFG_ANY,
9777 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9778 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9779 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9780 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9781 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9782 IWX_CFG_ANY,
9783 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9784
9785 /* QuZ with Jf */
9786 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9787 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9788 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9789 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9790 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9791 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9792 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9793 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9794 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9795 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9796 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9797 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9798 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9799 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9800 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9801 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9802 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9803 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9804 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9805 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9806 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9807 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9808 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9809 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9810 IWX_CFG_ANY,
9811 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9812 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9813 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9814 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9815 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9816 IWX_CFG_ANY,
9817 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9818
9819 /* Qu with Hr, B step */
9820 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9821 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9822 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9823 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9824 iwx_qu_b0_hr1_b0), /* AX101 */
9825 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9826 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9827 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9828 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9829 iwx_qu_b0_hr_b0), /* AX203 */
9830
9831 /* Qu with Hr, C step */
9832 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9833 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9834 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9835 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9836 iwx_qu_c0_hr1_b0), /* AX101 */
9837 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9838 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9839 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9840 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9841 iwx_qu_c0_hr_b0), /* AX203 */
9842 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9843 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9844 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9845 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9846 iwx_qu_c0_hr_b0), /* AX201 */
9847
9848 /* QuZ with Hr */
9849 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9850 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9851 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9852 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9853 iwx_quz_a0_hr1_b0), /* AX101 */
9854 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9855 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9856 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9857 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9858 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9859
9860 /* SoF with JF2 */
9861 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9862 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9863 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9864 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9865 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9866 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9867 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9868 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9869 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9870 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9871
9872 /* SoF with JF */
9873 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9874 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9875 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9876 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9877 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9878 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9879 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9880 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9881 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9882 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9883 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9884 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9885 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9886 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9887 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9888 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9889 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9890 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9891 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9892 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9893
9894 /* So with Hr */
9895 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9896 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9897 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9898 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9899 iwx_cfg_so_a0_hr_b0), /* AX203 */
9900 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9901 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9902 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9903 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9904 iwx_cfg_so_a0_hr_b0), /* ax101 */
9905 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9906 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9907 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9908 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9909 iwx_cfg_so_a0_hr_b0), /* ax201 */
9910
9911 /* So-F with Hr */
9912 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9913 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9914 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9915 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9916 iwx_cfg_so_a0_hr_b0), /* AX203 */
9917 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9918 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9919 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9920 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9921 iwx_cfg_so_a0_hr_b0), /* AX101 */
9922 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9923 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9924 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9925 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9926 iwx_cfg_so_a0_hr_b0), /* AX201 */
9927
9928 /* So-F with GF */
9929 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9930 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9931 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9932 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9933 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9934 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9935 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9936 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9937 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9938 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9939
9940 /* So with GF */
9941 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9942 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9943 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9944 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9945 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9946 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9947 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9948 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9949 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9950 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9951
9952 /* So with JF2 */
9953 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9954 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9955 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9956 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9957 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9958 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9959 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9960 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9961 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9962 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9963
9964 /* So with JF */
9965 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9966 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9967 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9968 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9969 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9970 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9971 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9972 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9973 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9974 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9975 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9976 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9977 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9978 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9979 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9980 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9981 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9982 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9983 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9984 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9985 };
9986
9987 static int
iwx_preinit(struct iwx_softc * sc)9988 iwx_preinit(struct iwx_softc *sc)
9989 {
9990 struct ieee80211com *ic = &sc->sc_ic;
9991 int err;
9992
9993 err = iwx_prepare_card_hw(sc);
9994 if (err) {
9995 printf("%s: could not initialize hardware\n", DEVNAME(sc));
9996 return err;
9997 }
9998
9999 if (sc->attached) {
10000 return 0;
10001 }
10002
10003 err = iwx_start_hw(sc);
10004 if (err) {
10005 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10006 return err;
10007 }
10008
10009 err = iwx_run_init_mvm_ucode(sc, 1);
10010 iwx_stop_device(sc);
10011 if (err) {
10012 printf("%s: failed to stop device\n", DEVNAME(sc));
10013 return err;
10014 }
10015
10016 /* Print version info and MAC address on first successful fw load. */
10017 sc->attached = 1;
10018 if (sc->sc_pnvm_ver) {
10019 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10020 "address %s\n",
10021 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10022 sc->sc_fwver, sc->sc_pnvm_ver,
10023 ether_sprintf(sc->sc_nvm.hw_addr));
10024 } else {
10025 printf("%s: hw rev 0x%x, fw %s, address %s\n",
10026 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10027 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10028 }
10029
10030 /* not all hardware can do 5GHz band */
10031 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10032 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10033 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10034
10035 return 0;
10036 }
10037
10038 static void
iwx_attach_hook(void * self)10039 iwx_attach_hook(void *self)
10040 {
10041 struct iwx_softc *sc = (void *)self;
10042 struct ieee80211com *ic = &sc->sc_ic;
10043 int err;
10044
10045 IWX_LOCK(sc);
10046 err = iwx_preinit(sc);
10047 IWX_UNLOCK(sc);
10048 if (err != 0)
10049 goto out;
10050
10051 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
10052 ic->ic_channels);
10053
10054 ieee80211_ifattach(ic);
10055 ic->ic_vap_create = iwx_vap_create;
10056 ic->ic_vap_delete = iwx_vap_delete;
10057 ic->ic_raw_xmit = iwx_raw_xmit;
10058 ic->ic_node_alloc = iwx_node_alloc;
10059 ic->ic_scan_start = iwx_scan_start;
10060 ic->ic_scan_end = iwx_scan_end;
10061 ic->ic_update_mcast = iwx_update_mcast;
10062 ic->ic_getradiocaps = iwx_init_channel_map;
10063
10064 ic->ic_set_channel = iwx_set_channel;
10065 ic->ic_scan_curchan = iwx_scan_curchan;
10066 ic->ic_scan_mindwell = iwx_scan_mindwell;
10067 ic->ic_wme.wme_update = iwx_wme_update;
10068 ic->ic_parent = iwx_parent;
10069 ic->ic_transmit = iwx_transmit;
10070
10071 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10072 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10073 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10074 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10075
10076 sc->sc_addba_request = ic->ic_addba_request;
10077 ic->ic_addba_request = iwx_addba_request;
10078 sc->sc_addba_response = ic->ic_addba_response;
10079 ic->ic_addba_response = iwx_addba_response;
10080
10081 iwx_radiotap_attach(sc);
10082 ieee80211_announce(ic);
10083 out:
10084 config_intrhook_disestablish(&sc->sc_preinit_hook);
10085 }
10086
10087 const struct iwx_device_cfg *
iwx_find_device_cfg(struct iwx_softc * sc)10088 iwx_find_device_cfg(struct iwx_softc *sc)
10089 {
10090 uint16_t sdev_id, mac_type, rf_type;
10091 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10092 int i;
10093
10094 sdev_id = pci_get_subdevice(sc->sc_dev);
10095 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10096 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10097 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10098 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10099 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10100
10101 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10102 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10103 cores = IWX_SUBDEVICE_CORES(sdev_id);
10104
10105 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10106 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10107
10108 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10109 dev_info->device != sc->sc_pid)
10110 continue;
10111
10112 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10113 dev_info->subdevice != sdev_id)
10114 continue;
10115
10116 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10117 dev_info->mac_type != mac_type)
10118 continue;
10119
10120 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10121 dev_info->mac_step != mac_step)
10122 continue;
10123
10124 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10125 dev_info->rf_type != rf_type)
10126 continue;
10127
10128 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10129 dev_info->cdb != cdb)
10130 continue;
10131
10132 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10133 dev_info->jacket != jacket)
10134 continue;
10135
10136 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10137 dev_info->rf_id != rf_id)
10138 continue;
10139
10140 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10141 dev_info->no_160 != no_160)
10142 continue;
10143
10144 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10145 dev_info->cores != cores)
10146 continue;
10147
10148 return dev_info->cfg;
10149 }
10150
10151 return NULL;
10152 }
10153
10154 static int
iwx_probe(device_t dev)10155 iwx_probe(device_t dev)
10156 {
10157 int i;
10158
10159 for (i = 0; i < nitems(iwx_devices); i++) {
10160 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10161 pci_get_device(dev) == iwx_devices[i].device) {
10162 device_set_desc(dev, iwx_devices[i].name);
10163
10164 /*
10165 * Due to significant existing deployments using
10166 * iwlwifi lower the priority of iwx.
10167 *
10168 * This inverts the advice in bus.h where drivers
10169 * supporting newer hardware should return
10170 * BUS_PROBE_DEFAULT and drivers for older devices
10171 * return BUS_PROBE_LOW_PRIORITY.
10172 *
10173 */
10174 return (BUS_PROBE_LOW_PRIORITY);
10175 }
10176 }
10177
10178 return (ENXIO);
10179 }
10180
10181 static int
iwx_attach(device_t dev)10182 iwx_attach(device_t dev)
10183 {
10184 struct iwx_softc *sc = device_get_softc(dev);
10185 struct ieee80211com *ic = &sc->sc_ic;
10186 const struct iwx_device_cfg *cfg;
10187 int err;
10188 int txq_i, i, j;
10189 size_t ctxt_info_size;
10190 int rid;
10191 int count;
10192 int error;
10193 sc->sc_dev = dev;
10194 sc->sc_pid = pci_get_device(dev);
10195 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10196
10197 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10198 IWX_LOCK_INIT(sc);
10199 mbufq_init(&sc->sc_snd, ifqmaxlen);
10200 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10201 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10202 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10203 taskqueue_thread_enqueue, &sc->sc_tq);
10204 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10205 if (error != 0) {
10206 device_printf(dev, "can't start taskq thread, error %d\n",
10207 error);
10208 return (ENXIO);
10209 }
10210
10211 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10212 if (sc->sc_cap_off == 0) {
10213 device_printf(dev, "PCIe capability structure not found!\n");
10214 return (ENXIO);
10215 }
10216
10217 /*
10218 * We disable the RETRY_TIMEOUT register (0x41) to keep
10219 * PCI Tx retries from interfering with C3 CPU state.
10220 */
10221 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10222
10223 if (pci_msix_count(dev)) {
10224 sc->sc_msix = 1;
10225 } else {
10226 device_printf(dev, "no MSI-X found\n");
10227 return (ENXIO);
10228 }
10229
10230 pci_enable_busmaster(dev);
10231 rid = PCIR_BAR(0);
10232 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10233 RF_ACTIVE);
10234 if (sc->sc_mem == NULL) {
10235 device_printf(sc->sc_dev, "can't map mem space\n");
10236 return (ENXIO);
10237 }
10238 sc->sc_st = rman_get_bustag(sc->sc_mem);
10239 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10240
10241 count = 1;
10242 rid = 0;
10243 if (pci_alloc_msix(dev, &count) == 0)
10244 rid = 1;
10245 DPRINTF(("%s: count=%d\n", __func__, count));
10246 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10247 (rid != 0 ? 0 : RF_SHAREABLE));
10248 if (sc->sc_irq == NULL) {
10249 device_printf(dev, "can't map interrupt\n");
10250 return (ENXIO);
10251 }
10252 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10253 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10254 if (error != 0) {
10255 device_printf(dev, "can't establish interrupt\n");
10256 return (ENXIO);
10257 }
10258
10259 /* Clear pending interrupts. */
10260 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10261 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10262 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10263
10264 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10265 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10266 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10267 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10268
10269 /*
10270 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10271 * changed, and now the revision step also includes bit 0-1 (no more
10272 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10273 * in the old format.
10274 */
10275 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10276 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10277
10278 switch (sc->sc_pid) {
10279 case PCI_PRODUCT_INTEL_WL_22500_1:
10280 sc->sc_fwname = IWX_CC_A_FW;
10281 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10282 sc->sc_integrated = 0;
10283 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10284 sc->sc_low_latency_xtal = 0;
10285 sc->sc_xtal_latency = 0;
10286 sc->sc_tx_with_siso_diversity = 0;
10287 sc->sc_uhb_supported = 0;
10288 break;
10289 case PCI_PRODUCT_INTEL_WL_22500_2:
10290 case PCI_PRODUCT_INTEL_WL_22500_5:
10291 /* These devices should be QuZ only. */
10292 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10293 device_printf(dev, "unsupported AX201 adapter\n");
10294 return (ENXIO);
10295 }
10296 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10297 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10298 sc->sc_integrated = 1;
10299 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10300 sc->sc_low_latency_xtal = 0;
10301 sc->sc_xtal_latency = 500;
10302 sc->sc_tx_with_siso_diversity = 0;
10303 sc->sc_uhb_supported = 0;
10304 break;
10305 case PCI_PRODUCT_INTEL_WL_22500_3:
10306 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10307 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10308 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10309 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10310 else
10311 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10312 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10313 sc->sc_integrated = 1;
10314 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10315 sc->sc_low_latency_xtal = 0;
10316 sc->sc_xtal_latency = 500;
10317 sc->sc_tx_with_siso_diversity = 0;
10318 sc->sc_uhb_supported = 0;
10319 break;
10320 case PCI_PRODUCT_INTEL_WL_22500_4:
10321 case PCI_PRODUCT_INTEL_WL_22500_7:
10322 case PCI_PRODUCT_INTEL_WL_22500_8:
10323 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10324 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10325 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10326 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10327 else
10328 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10329 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10330 sc->sc_integrated = 1;
10331 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10332 sc->sc_low_latency_xtal = 0;
10333 sc->sc_xtal_latency = 1820;
10334 sc->sc_tx_with_siso_diversity = 0;
10335 sc->sc_uhb_supported = 0;
10336 break;
10337 case PCI_PRODUCT_INTEL_WL_22500_6:
10338 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10339 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10340 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10341 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10342 else
10343 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10344 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10345 sc->sc_integrated = 1;
10346 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10347 sc->sc_low_latency_xtal = 1;
10348 sc->sc_xtal_latency = 12000;
10349 sc->sc_tx_with_siso_diversity = 0;
10350 sc->sc_uhb_supported = 0;
10351 break;
10352 case PCI_PRODUCT_INTEL_WL_22500_9:
10353 case PCI_PRODUCT_INTEL_WL_22500_10:
10354 case PCI_PRODUCT_INTEL_WL_22500_11:
10355 case PCI_PRODUCT_INTEL_WL_22500_13:
10356 /* _14 is an MA device, not yet supported */
10357 case PCI_PRODUCT_INTEL_WL_22500_15:
10358 case PCI_PRODUCT_INTEL_WL_22500_16:
10359 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10360 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10361 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10362 sc->sc_integrated = 0;
10363 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10364 sc->sc_low_latency_xtal = 0;
10365 sc->sc_xtal_latency = 0;
10366 sc->sc_tx_with_siso_diversity = 0;
10367 sc->sc_uhb_supported = 1;
10368 break;
10369 case PCI_PRODUCT_INTEL_WL_22500_12:
10370 case PCI_PRODUCT_INTEL_WL_22500_17:
10371 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10372 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10373 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10374 sc->sc_integrated = 1;
10375 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10376 sc->sc_low_latency_xtal = 1;
10377 sc->sc_xtal_latency = 12000;
10378 sc->sc_tx_with_siso_diversity = 0;
10379 sc->sc_uhb_supported = 0;
10380 sc->sc_imr_enabled = 1;
10381 break;
10382 default:
10383 device_printf(dev, "unknown adapter type\n");
10384 return (ENXIO);
10385 }
10386
10387 cfg = iwx_find_device_cfg(sc);
10388 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10389 if (cfg) {
10390 sc->sc_fwname = cfg->fw_name;
10391 sc->sc_pnvm_name = cfg->pnvm_name;
10392 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10393 sc->sc_uhb_supported = cfg->uhb_supported;
10394 if (cfg->xtal_latency) {
10395 sc->sc_xtal_latency = cfg->xtal_latency;
10396 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10397 }
10398 }
10399
10400 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10401
10402 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10403 sc->sc_umac_prph_offset = 0x300000;
10404 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10405 } else
10406 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10407
10408 /* Allocate DMA memory for loading firmware. */
10409 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10410 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10411 else
10412 ctxt_info_size = sizeof(struct iwx_context_info);
10413 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10414 ctxt_info_size, 1);
10415 if (err) {
10416 device_printf(dev,
10417 "could not allocate memory for loading firmware\n");
10418 return (ENXIO);
10419 }
10420
10421 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10422 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10423 sizeof(struct iwx_prph_scratch), 1);
10424 if (err) {
10425 device_printf(dev,
10426 "could not allocate prph scratch memory\n");
10427 goto fail1;
10428 }
10429
10430 /*
10431 * Allocate prph information. The driver doesn't use this.
10432 * We use the second half of this page to give the device
10433 * some dummy TR/CR tail pointers - which shouldn't be
10434 * necessary as we don't use this, but the hardware still
10435 * reads/writes there and we can't let it go do that with
10436 * a NULL pointer.
10437 */
10438 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10439 ("iwx_prph_info has wrong size"));
10440 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10441 PAGE_SIZE, 1);
10442 if (err) {
10443 device_printf(dev,
10444 "could not allocate prph info memory\n");
10445 goto fail1;
10446 }
10447 }
10448
10449 /* Allocate interrupt cause table (ICT).*/
10450 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10451 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10452 if (err) {
10453 device_printf(dev, "could not allocate ICT table\n");
10454 goto fail1;
10455 }
10456
10457 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10458 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10459 if (err) {
10460 device_printf(dev, "could not allocate TX ring %d\n",
10461 txq_i);
10462 goto fail4;
10463 }
10464 }
10465
10466 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10467 if (err) {
10468 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10469 goto fail4;
10470 }
10471
10472 #ifdef IWX_DEBUG
10473 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10474 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10475 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10476
10477 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10479 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10480 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10481 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10482 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10483
10484 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10485 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10486 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10487
10488 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10489 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10490 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10491 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10492 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10493 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10494 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10495 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10496 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10497 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10498 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10499 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10500 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10501 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10502 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10503 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10504 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10505 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10506 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10507 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10508 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10509 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10510 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10511 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10512 #endif
10513 ic->ic_softc = sc;
10514 ic->ic_name = device_get_nameunit(sc->sc_dev);
10515 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10516 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10517
10518 /* Set device capabilities. */
10519 ic->ic_caps =
10520 IEEE80211_C_STA |
10521 IEEE80211_C_MONITOR |
10522 IEEE80211_C_WPA | /* WPA/RSN */
10523 IEEE80211_C_WME |
10524 IEEE80211_C_PMGT |
10525 IEEE80211_C_SHSLOT | /* short slot time supported */
10526 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10527 IEEE80211_C_BGSCAN /* capable of bg scanning */
10528 ;
10529 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10530 /* Enable seqno offload */
10531 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
10532 /* Don't send null data frames; let firmware do it */
10533 ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
10534
10535 ic->ic_txstream = 2;
10536 ic->ic_rxstream = 2;
10537 ic->ic_htcaps |= IEEE80211_HTC_HT
10538 | IEEE80211_HTCAP_SMPS_OFF
10539 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10540 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10541 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10542 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10543 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10544 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10545
10546 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10547
10548 /*
10549 * XXX: setupcurchan() expects vhtcaps to be non-zero
10550 * https://bugs.freebsd.org/274156
10551 */
10552 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10553 | IEEE80211_VHTCAP_SHORT_GI_80
10554 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10555 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10556 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10557
10558 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10559 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10560 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10561 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10562 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10563 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10564 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10565 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10566 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10567 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10568 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10569
10570 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10571 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10572 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10573 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10574 rxba->sc = sc;
10575 for (j = 0; j < nitems(rxba->entries); j++)
10576 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10577 }
10578
10579 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10580 sc->sc_preinit_hook.ich_arg = sc;
10581 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10582 device_printf(dev,
10583 "config_intrhook_establish failed\n");
10584 goto fail4;
10585 }
10586
10587 return (0);
10588
10589 fail4:
10590 while (--txq_i >= 0)
10591 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10592 iwx_free_rx_ring(sc, &sc->rxq);
10593 if (sc->ict_dma.vaddr != NULL)
10594 iwx_dma_contig_free(&sc->ict_dma);
10595
10596 fail1:
10597 iwx_dma_contig_free(&sc->ctxt_info_dma);
10598 iwx_dma_contig_free(&sc->prph_scratch_dma);
10599 iwx_dma_contig_free(&sc->prph_info_dma);
10600 return (ENXIO);
10601 }
10602
10603 static int
iwx_detach(device_t dev)10604 iwx_detach(device_t dev)
10605 {
10606 struct iwx_softc *sc = device_get_softc(dev);
10607 int txq_i;
10608
10609 iwx_stop_device(sc);
10610
10611 taskqueue_drain_all(sc->sc_tq);
10612 taskqueue_free(sc->sc_tq);
10613
10614 ieee80211_ifdetach(&sc->sc_ic);
10615
10616 callout_drain(&sc->watchdog_to);
10617
10618 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10619 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10620 iwx_free_rx_ring(sc, &sc->rxq);
10621
10622 if (sc->sc_fwp != NULL) {
10623 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10624 sc->sc_fwp = NULL;
10625 }
10626
10627 if (sc->sc_pnvm != NULL) {
10628 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10629 sc->sc_pnvm = NULL;
10630 }
10631
10632 if (sc->sc_irq != NULL) {
10633 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10634 bus_release_resource(dev, SYS_RES_IRQ,
10635 rman_get_rid(sc->sc_irq), sc->sc_irq);
10636 pci_release_msi(dev);
10637 }
10638 if (sc->sc_mem != NULL)
10639 bus_release_resource(dev, SYS_RES_MEMORY,
10640 rman_get_rid(sc->sc_mem), sc->sc_mem);
10641
10642 IWX_LOCK_DESTROY(sc);
10643
10644 return (0);
10645 }
10646
10647 static void
iwx_radiotap_attach(struct iwx_softc * sc)10648 iwx_radiotap_attach(struct iwx_softc *sc)
10649 {
10650 struct ieee80211com *ic = &sc->sc_ic;
10651
10652 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10653 "->%s begin\n", __func__);
10654
10655 ieee80211_radiotap_attach(ic,
10656 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10657 IWX_TX_RADIOTAP_PRESENT,
10658 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10659 IWX_RX_RADIOTAP_PRESENT);
10660
10661 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10662 "->%s end\n", __func__);
10663 }
10664
10665 struct ieee80211vap *
iwx_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])10666 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10667 enum ieee80211_opmode opmode, int flags,
10668 const uint8_t bssid[IEEE80211_ADDR_LEN],
10669 const uint8_t mac[IEEE80211_ADDR_LEN])
10670 {
10671 struct iwx_vap *ivp;
10672 struct ieee80211vap *vap;
10673
10674 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10675 return NULL;
10676 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10677 vap = &ivp->iv_vap;
10678 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10679 vap->iv_bmissthreshold = 10; /* override default */
10680 /* Override with driver methods. */
10681 ivp->iv_newstate = vap->iv_newstate;
10682 vap->iv_newstate = iwx_newstate;
10683
10684 ivp->id = IWX_DEFAULT_MACID;
10685 ivp->color = IWX_DEFAULT_COLOR;
10686
10687 ivp->have_wme = TRUE;
10688 ivp->ps_disabled = FALSE;
10689
10690 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10691 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10692
10693 /* h/w crypto support */
10694 vap->iv_key_alloc = iwx_key_alloc;
10695 vap->iv_key_delete = iwx_key_delete;
10696 vap->iv_key_set = iwx_key_set;
10697 vap->iv_key_update_begin = iwx_key_update_begin;
10698 vap->iv_key_update_end = iwx_key_update_end;
10699
10700 ieee80211_ratectl_init(vap);
10701 /* Complete setup. */
10702 ieee80211_vap_attach(vap, ieee80211_media_change,
10703 ieee80211_media_status, mac);
10704 ic->ic_opmode = opmode;
10705
10706 return vap;
10707 }
10708
10709 static void
iwx_vap_delete(struct ieee80211vap * vap)10710 iwx_vap_delete(struct ieee80211vap *vap)
10711 {
10712 struct iwx_vap *ivp = IWX_VAP(vap);
10713
10714 ieee80211_ratectl_deinit(vap);
10715 ieee80211_vap_detach(vap);
10716 free(ivp, M_80211_VAP);
10717 }
10718
10719 static void
iwx_parent(struct ieee80211com * ic)10720 iwx_parent(struct ieee80211com *ic)
10721 {
10722 struct iwx_softc *sc = ic->ic_softc;
10723 IWX_LOCK(sc);
10724
10725 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10726 iwx_stop(sc);
10727 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10728 } else {
10729 iwx_init(sc);
10730 ieee80211_start_all(ic);
10731 }
10732 IWX_UNLOCK(sc);
10733 }
10734
10735 static int
iwx_suspend(device_t dev)10736 iwx_suspend(device_t dev)
10737 {
10738 struct iwx_softc *sc = device_get_softc(dev);
10739 struct ieee80211com *ic = &sc->sc_ic;
10740
10741 /*
10742 * Suspend everything first, then shutdown hardware if it's
10743 * still up.
10744 */
10745 ieee80211_suspend_all(ic);
10746
10747 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10748 iwx_stop(sc);
10749 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10750 }
10751 return (0);
10752 }
10753
10754 static int
iwx_resume(device_t dev)10755 iwx_resume(device_t dev)
10756 {
10757 struct iwx_softc *sc = device_get_softc(dev);
10758 struct ieee80211com *ic = &sc->sc_ic;
10759
10760 /*
10761 * We disable the RETRY_TIMEOUT register (0x41) to keep
10762 * PCI Tx retries from interfering with C3 CPU state.
10763 */
10764 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10765
10766 IWX_LOCK(sc);
10767
10768 /* Stop the hardware here if it's still thought of as "up" */
10769 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10770 iwx_stop(sc);
10771 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10772 }
10773
10774 IWX_UNLOCK(sc);
10775
10776 /* Start the VAPs, which will bring the hardware back up again */
10777 ieee80211_resume_all(ic);
10778 return (0);
10779 }
10780
10781 static void
iwx_scan_start(struct ieee80211com * ic)10782 iwx_scan_start(struct ieee80211com *ic)
10783 {
10784 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10785 struct iwx_softc *sc = ic->ic_softc;
10786 int err;
10787
10788 IWX_LOCK(sc);
10789 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10790 err = iwx_scan(sc);
10791 else
10792 err = iwx_bgscan(ic);
10793 IWX_UNLOCK(sc);
10794 if (err)
10795 ieee80211_cancel_scan(vap);
10796
10797 return;
10798 }
10799
10800 static void
iwx_update_mcast(struct ieee80211com * ic)10801 iwx_update_mcast(struct ieee80211com *ic)
10802 {
10803 }
10804
10805 static void
iwx_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)10806 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10807 {
10808 }
10809
10810 static void
iwx_scan_mindwell(struct ieee80211_scan_state * ss)10811 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10812 {
10813 }
10814
10815 static void
iwx_scan_end(struct ieee80211com * ic)10816 iwx_scan_end(struct ieee80211com *ic)
10817 {
10818 iwx_endscan(ic->ic_softc);
10819 }
10820
10821 static void
iwx_set_channel(struct ieee80211com * ic)10822 iwx_set_channel(struct ieee80211com *ic)
10823 {
10824 #if 0
10825 struct iwx_softc *sc = ic->ic_softc;
10826 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10827
10828 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10829 iwx_phy_ctxt_task((void *)sc);
10830 #endif
10831 }
10832
10833 static void
iwx_endscan_cb(void * arg,int pending)10834 iwx_endscan_cb(void *arg, int pending)
10835 {
10836 struct iwx_softc *sc = arg;
10837 struct ieee80211com *ic = &sc->sc_ic;
10838
10839 DPRINTF(("scan ended\n"));
10840 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10841 }
10842
10843 static int
iwx_wme_update(struct ieee80211com * ic)10844 iwx_wme_update(struct ieee80211com *ic)
10845 {
10846 return 0;
10847 }
10848
10849 static int
iwx_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)10850 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10851 const struct ieee80211_bpf_params *params)
10852 {
10853 struct ieee80211com *ic = ni->ni_ic;
10854 struct iwx_softc *sc = ic->ic_softc;
10855 int err;
10856
10857 IWX_LOCK(sc);
10858 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10859 err = iwx_tx(sc, m, ni);
10860 IWX_UNLOCK(sc);
10861 return err;
10862 } else {
10863 IWX_UNLOCK(sc);
10864 return EIO;
10865 }
10866 }
10867
10868 static int
iwx_transmit(struct ieee80211com * ic,struct mbuf * m)10869 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10870 {
10871 struct iwx_softc *sc = ic->ic_softc;
10872 int error;
10873
10874 // TODO: mbufq_enqueue in iwm
10875 // TODO dequeue in iwm_start, counters, locking
10876 IWX_LOCK(sc);
10877 error = mbufq_enqueue(&sc->sc_snd, m);
10878 if (error) {
10879 IWX_UNLOCK(sc);
10880 return (error);
10881 }
10882
10883 iwx_start(sc);
10884 IWX_UNLOCK(sc);
10885 return (0);
10886 }
10887
10888 static int
iwx_ampdu_rx_start(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap,int baparamset,int batimeout,int baseqctl)10889 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10890 int baparamset, int batimeout, int baseqctl)
10891 {
10892 struct ieee80211com *ic = ni->ni_ic;
10893 struct iwx_softc *sc = ic->ic_softc;
10894 int tid;
10895
10896 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10897 sc->ni_rx_ba[tid].ba_winstart =
10898 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10899 sc->ni_rx_ba[tid].ba_winsize =
10900 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10901 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10902
10903 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10904 tid >= IWX_MAX_TID_COUNT)
10905 return ENOSPC;
10906
10907 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10908 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10909 return EBUSY;
10910 }
10911 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10912
10913 sc->ba_rx.start_tidmask |= (1 << tid);
10914 DPRINTF(("%s: tid=%i\n", __func__, tid));
10915 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10916 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10917 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10918
10919 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10920
10921 // TODO:misha move to ba_task (serialize)
10922 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10923
10924 return (0);
10925 }
10926
10927 static void
iwx_ampdu_rx_stop(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap)10928 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10929 {
10930 return;
10931 }
10932
10933 /**
10934 * @brief Called by net80211 to request an A-MPDU session be established.
10935 *
10936 * This is called by net80211 to see if an A-MPDU session can be established.
10937 * However, the iwx(4) firmware will take care of establishing the BA
10938 * session for us. net80211 doesn't have to send any action frames here;
10939 * it just needs to plumb up the ampdu session once the BA has been sent.
10940 *
10941 * If we return 0 here then the firmware will set up the state but net80211
10942 * will not; so it's on us to actually complete it via a call to
10943 * ieee80211_ampdu_tx_request_active_ext() .
10944 *
10945 * @param ni ieee80211_node to establish A-MPDU session for
10946 * @param tap pointer to the per-TID state struct
10947 * @param dialogtoken dialogtoken field from the BA request
10948 * @param baparamset baparamset field from the BA request
10949 * @param batimeout batimeout field from the BA request
10950 *
10951 * @returns 0 so net80211 doesn't send the BA action frame to establish A-MPDU.
10952 */
10953 static int
iwx_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)10954 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10955 int dialogtoken, int baparamset, int batimeout)
10956 {
10957 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10958 int tid;
10959
10960 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10961 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
10962 "%s: queuing AMPDU start on tid %i\n", __func__, tid);
10963
10964 /* There's no nice way right now to tell net80211 that we're in the
10965 * middle of an asynchronous ADDBA setup session. So, bump the timeout
10966 * to hz ticks, hopefully we'll get a response by then.
10967 */
10968 tap->txa_nextrequest = ticks + hz;
10969
10970 IWX_LOCK(sc);
10971 sc->ba_tx.start_tidmask |= (1 << tid);
10972 IWX_UNLOCK(sc);
10973
10974 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10975
10976 return (0);
10977 }
10978
10979
10980 static int
iwx_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)10981 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10982 int code, int baparamset, int batimeout)
10983 {
10984 return 0;
10985 }
10986
10987 static void
iwx_key_update_begin(struct ieee80211vap * vap)10988 iwx_key_update_begin(struct ieee80211vap *vap)
10989 {
10990 return;
10991 }
10992
10993 static void
iwx_key_update_end(struct ieee80211vap * vap)10994 iwx_key_update_end(struct ieee80211vap *vap)
10995 {
10996 return;
10997 }
10998
10999 static int
iwx_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)11000 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
11001 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
11002 {
11003
11004 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
11005 return (1);
11006 }
11007
11008 if (ieee80211_is_key_unicast(vap, k)) {
11009 *keyix = 0; /* NB: use key index 0 for ucast key */
11010 } else if (ieee80211_is_key_global(vap, k)) {
11011 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
11012 } else {
11013 net80211_vap_printf(vap, "%s: invalid crypto key type\n",
11014 __func__);
11015 return (0);
11016 }
11017 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
11018 return (1);
11019 }
11020
11021 static int
iwx_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)11022 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
11023 {
11024 struct ieee80211com *ic = vap->iv_ic;
11025 struct iwx_softc *sc = ic->ic_softc;
11026 struct iwx_add_sta_key_cmd cmd;
11027 uint32_t status;
11028 int err;
11029 int id;
11030
11031 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
11032 return 1;
11033 }
11034
11035 /*
11036 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
11037 * Currently we only implement station mode where 'ni' is always
11038 * ic->ic_bss so there is no need to validate arguments beyond this:
11039 */
11040
11041 memset(&cmd, 0, sizeof(cmd));
11042
11043 if (ieee80211_is_key_global(vap, k)) {
11044 id = ieee80211_crypto_get_key_wepidx(vap, k);
11045 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding group key\n",
11046 __func__);
11047 } else if (ieee80211_is_key_unicast(vap, k)) {
11048 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding key\n",
11049 __func__);
11050 id = 0; /* net80211 currently only supports unicast key 0 */
11051 } else {
11052 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11053 return (ENXIO);
11054 }
11055
11056 IWX_LOCK(sc);
11057
11058 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
11059 IWX_STA_KEY_FLG_WEP_KEY_MAP |
11060 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
11061 IWX_STA_KEY_FLG_KEYID_MSK));
11062 if (ieee80211_is_key_global(vap, k)) {
11063 cmd.common.key_offset = 1;
11064 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
11065 } else if (ieee80211_is_key_unicast(vap, k)) {
11066 cmd.common.key_offset = 0;
11067 } else {
11068 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11069 IWX_UNLOCK(sc);
11070 return (ENXIO);
11071 }
11072 memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
11073 k->wk_keylen));
11074 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: key: id=%d, len=%i, key=%*D\n",
11075 __func__, id, k->wk_keylen, k->wk_keylen,
11076 (const unsigned char *) k->wk_key, "");
11077 cmd.common.sta_id = IWX_STATION_ID;
11078
11079 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
11080 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: k->wk_keytsc=%lu\n", __func__,
11081 k->wk_keytsc);
11082
11083 status = IWX_ADD_STA_SUCCESS;
11084 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
11085 &status);
11086 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
11087 err = EIO;
11088 if (err) {
11089 net80211_vap_printf(vap,
11090 "%s: can't set wpa2 keys (error %d)\n", __func__, err);
11091 IWX_UNLOCK(sc);
11092 return err;
11093 } else
11094 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT,
11095 "%s: key added successfully\n", __func__);
11096 IWX_UNLOCK(sc);
11097 return (1);
11098 }
11099
11100 static int
iwx_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)11101 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11102 {
11103 /*
11104 * Note: since there's no key allocations to track - it's either
11105 * the 4 static WEP keys or the single unicast key - there's nothing
11106 * else to do here.
11107 *
11108 * This would need some further work to support IBSS/mesh/AP modes.
11109 */
11110 return (1);
11111 }
11112
11113 static device_method_t iwx_pci_methods[] = {
11114 /* Device interface */
11115 DEVMETHOD(device_probe, iwx_probe),
11116 DEVMETHOD(device_attach, iwx_attach),
11117 DEVMETHOD(device_detach, iwx_detach),
11118 DEVMETHOD(device_suspend, iwx_suspend),
11119 DEVMETHOD(device_resume, iwx_resume),
11120
11121 DEVMETHOD_END
11122 };
11123
11124 static driver_t iwx_pci_driver = {
11125 "iwx",
11126 iwx_pci_methods,
11127 sizeof (struct iwx_softc)
11128 };
11129
11130 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11131 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11132 iwx_devices, nitems(iwx_devices));
11133 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11134 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11135 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11136