xref: /freebsd/sys/contrib/dev/iwlwifi/mvm/ops.c (revision cc1a53bc1aea0675d64e9547cdca241612906592)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #if defined(__FreeBSD__)
8 #define	LINUXKPI_PARAM_PREFIX	iwlwifi_mvm_
9 #endif
10 #include <linux/module.h>
11 #if defined(__linux__)
12 #include <linux/rtnetlink.h>
13 #endif
14 #include <linux/vmalloc.h>
15 #include <net/mac80211.h>
16 
17 #include "fw/notif-wait.h"
18 #include "iwl-trans.h"
19 #include "iwl-op-mode.h"
20 #include "fw/img.h"
21 #include "iwl-debug.h"
22 #include "iwl-drv.h"
23 #include "iwl-modparams.h"
24 #include "mvm.h"
25 #include "iwl-phy-db.h"
26 #include "iwl-eeprom-parse.h"
27 #include "iwl-csr.h"
28 #include "iwl-io.h"
29 #include "iwl-prph.h"
30 #include "rs.h"
31 #include "fw/api/scan.h"
32 #include "fw/api/rfi.h"
33 #include "time-event.h"
34 #include "fw-api.h"
35 #include "fw/acpi.h"
36 #include "fw/uefi.h"
37 
38 #if defined(__linux__)
39 #define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
40 MODULE_LICENSE("GPL");
41 #elif defined(__FreeBSD__)
42 #define DRV_DESCRIPTION	"The new Intel(R) wireless AGN/AC/AX based driver for FreeBSD"
43 MODULE_LICENSE("BSD");
44 #endif
45 MODULE_DESCRIPTION(DRV_DESCRIPTION);
46 MODULE_IMPORT_NS(IWLWIFI);
47 
48 static const struct iwl_op_mode_ops iwl_mvm_ops;
49 static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
50 
51 struct iwl_mvm_mod_params iwlmvm_mod_params = {
52 #if defined(__FreeBSD__)
53 	.power_scheme = IWL_POWER_SCHEME_CAM,	/* disable default PS */
54 #else
55 	.power_scheme = IWL_POWER_SCHEME_BPS,
56 #endif
57 	/* rest of fields are 0 by default */
58 };
59 
60 module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
61 MODULE_PARM_DESC(init_dbg,
62 		 "set to true to debug an ASSERT in INIT fw (default: false");
63 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
64 MODULE_PARM_DESC(power_scheme,
65 		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
66 
67 /*
68  * module init and exit functions
69  */
70 static int __init iwl_mvm_init(void)
71 {
72 	int ret;
73 
74 	ret = iwl_mvm_rate_control_register();
75 	if (ret) {
76 		pr_err("Unable to register rate control algorithm: %d\n", ret);
77 		return ret;
78 	}
79 
80 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
81 	if (ret)
82 		pr_err("Unable to register MVM op_mode: %d\n", ret);
83 
84 	return ret;
85 }
86 #if defined(__linux__)
87 module_init(iwl_mvm_init);
88 #elif defined(__FreeBSD__)
89 module_init_order(iwl_mvm_init, SI_ORDER_SECOND);
90 #endif
91 
92 static void __exit iwl_mvm_exit(void)
93 {
94 	iwl_opmode_deregister("iwlmvm");
95 	iwl_mvm_rate_control_unregister();
96 }
97 module_exit(iwl_mvm_exit);
98 
99 static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
100 {
101 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
102 	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
103 	u32 reg_val;
104 	u32 phy_config = iwl_mvm_get_phy_config(mvm);
105 
106 	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
107 			 FW_PHY_CFG_RADIO_TYPE_POS;
108 	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
109 			 FW_PHY_CFG_RADIO_STEP_POS;
110 	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
111 			 FW_PHY_CFG_RADIO_DASH_POS;
112 
113 	/* SKU control */
114 	reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
115 
116 	/* radio configuration */
117 	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
118 	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
119 	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
120 
121 	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
122 		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
123 
124 	/*
125 	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
126 	 * sampling, and shouldn't be set to any non-zero value.
127 	 * The same is supposed to be true of the other HW, but unsetting
128 	 * them (such as the 7260) causes automatic tests to fail on seemingly
129 	 * unrelated errors. Need to further investigate this, but for now
130 	 * we'll separate cases.
131 	 */
132 	if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
133 		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
134 
135 	if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
136 		reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
137 
138 	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
139 				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
140 				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
141 				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
142 				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
143 				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
144 				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI   |
145 				CSR_HW_IF_CONFIG_REG_D3_DEBUG,
146 				reg_val);
147 
148 	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
149 		       radio_cfg_step, radio_cfg_dash);
150 
151 	/*
152 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
153 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
154 	 * to lose ownership and not being able to obtain it back.
155 	 */
156 	if (!mvm->trans->cfg->apmg_not_supported)
157 		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
158 				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
159 				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
160 }
161 
162 static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
163 				     struct iwl_rx_cmd_buffer *rxb)
164 {
165 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
166 	struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
167 	struct ieee80211_supported_band *sband;
168 	const struct ieee80211_sta_he_cap *he_cap;
169 	struct ieee80211_vif *vif;
170 
171 	if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
172 		return;
173 
174 	vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
175 	if (!vif || vif->type != NL80211_IFTYPE_STATION)
176 		return;
177 
178 	if (!vif->bss_conf.chandef.chan ||
179 	    vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
180 	    vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
181 		return;
182 
183 	if (!vif->bss_conf.assoc)
184 		return;
185 
186 	/* this shouldn't happen *again*, ignore it */
187 	if (mvm->cca_40mhz_workaround)
188 		return;
189 
190 	/*
191 	 * We'll decrement this on disconnect - so set to 2 since we'll
192 	 * still have to disconnect from the current AP first.
193 	 */
194 	mvm->cca_40mhz_workaround = 2;
195 
196 	/*
197 	 * This capability manipulation isn't really ideal, but it's the
198 	 * easiest choice - otherwise we'd have to do some major changes
199 	 * in mac80211 to support this, which isn't worth it. This does
200 	 * mean that userspace may have outdated information, but that's
201 	 * actually not an issue at all.
202 	 */
203 	sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
204 
205 	WARN_ON(!sband->ht_cap.ht_supported);
206 	WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
207 	sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
208 
209 	he_cap = ieee80211_get_he_iftype_cap(sband,
210 					     ieee80211_vif_type_p2p(vif));
211 
212 	if (he_cap) {
213 		/* we know that ours is writable */
214 		struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
215 
216 		WARN_ON(!he->has_he);
217 		WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
218 				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
219 		he->he_cap_elem.phy_cap_info[0] &=
220 			~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
221 	}
222 
223 	ieee80211_disconnect(vif, true);
224 }
225 
226 void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
227 {
228 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
229 	struct iwl_mvm *mvm = mvmvif->mvm;
230 	enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
231 
232 	if (mvm->fw_static_smps_request &&
233 	    vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 &&
234 	    vif->bss_conf.he_support)
235 		mode = IEEE80211_SMPS_STATIC;
236 
237 	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode);
238 }
239 
240 static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
241 					struct ieee80211_vif *vif)
242 {
243 	iwl_mvm_apply_fw_smps_request(vif);
244 }
245 
246 static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
247 					      struct iwl_rx_cmd_buffer *rxb)
248 {
249 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
250 	struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
251 
252 	/*
253 	 * We could pass it to the iterator data, but also need to remember
254 	 * it for new interfaces that are added while in this state.
255 	 */
256 	mvm->fw_static_smps_request =
257 		req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
258 	ieee80211_iterate_interfaces(mvm->hw,
259 				     IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
260 				     iwl_mvm_intf_dual_chain_req, NULL);
261 }
262 
263 /**
264  * enum iwl_rx_handler_context context for Rx handler
265  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
266  *	which can't acquire mvm->mutex.
267  * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
268  *	(and only in this case!), it should be set as ASYNC. In that case,
269  *	it will be called from a worker with mvm->mutex held.
270  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
271  *	mutex itself, it will be called from a worker without mvm->mutex held.
272  */
273 enum iwl_rx_handler_context {
274 	RX_HANDLER_SYNC,
275 	RX_HANDLER_ASYNC_LOCKED,
276 	RX_HANDLER_ASYNC_UNLOCKED,
277 };
278 
279 /**
280  * struct iwl_rx_handlers handler for FW notification
281  * @cmd_id: command id
282  * @min_size: minimum size to expect for the notification
283  * @context: see &iwl_rx_handler_context
284  * @fn: the function is called when notification is received
285  */
286 struct iwl_rx_handlers {
287 	u16 cmd_id, min_size;
288 	enum iwl_rx_handler_context context;
289 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
290 };
291 
292 #define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context)		\
293 	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
294 #define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context)	\
295 	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
296 #define RX_HANDLER(_cmd_id, _fn, _context, _struct)		\
297 	{ .cmd_id = _cmd_id, .fn = _fn,				\
298 	  .context = _context, .min_size = sizeof(_struct), }
299 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct)	\
300 	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn,		\
301 	  .context = _context, .min_size = sizeof(_struct), }
302 
303 /*
304  * Handlers for fw notifications
305  * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
306  * This list should be in order of frequency for performance purposes.
307  *
308  * The handler can be one from three contexts, see &iwl_rx_handler_context
309  */
310 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
311 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
312 		   struct iwl_mvm_tx_resp),
313 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
314 		   struct iwl_mvm_ba_notif),
315 
316 	RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
317 		       iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
318 		       struct iwl_tlc_update_notif),
319 
320 	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
321 		   RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
322 	RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
323 			   RX_HANDLER_ASYNC_LOCKED),
324 	RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
325 			   RX_HANDLER_ASYNC_LOCKED),
326 
327 	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
328 		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
329 		   struct iwl_ba_window_status_notif),
330 
331 	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
332 		   RX_HANDLER_SYNC, struct iwl_time_event_notif),
333 	RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
334 		       iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
335 		       struct iwl_mvm_session_prot_notif),
336 	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
337 		   RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
338 
339 	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
340 		   struct iwl_mvm_eosp_notification),
341 
342 	RX_HANDLER(SCAN_ITERATION_COMPLETE,
343 		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
344 		   struct iwl_lmac_scan_complete_notif),
345 	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
346 		   iwl_mvm_rx_lmac_scan_complete_notif,
347 		   RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
348 	RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
349 			   iwl_mvm_rx_scan_match_found,
350 			   RX_HANDLER_SYNC),
351 	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
352 		   RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
353 	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
354 		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
355 		   struct iwl_umac_scan_iter_complete_notif),
356 
357 	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
358 		   RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
359 
360 	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
361 		   struct iwl_error_resp),
362 	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
363 		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
364 		   struct iwl_uapsd_misbehaving_ap_notif),
365 	RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
366 			   RX_HANDLER_ASYNC_LOCKED),
367 	RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
368 			       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
369 	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
370 		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
371 		       struct ct_kill_notif),
372 
373 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
374 		   RX_HANDLER_ASYNC_LOCKED,
375 		   struct iwl_tdls_channel_switch_notif),
376 	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
377 		   RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
378 	RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
379 		       iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
380 		       struct iwl_ftm_responder_stats),
381 
382 	RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
383 			       iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
384 	RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
385 			       iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
386 
387 	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
388 		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
389 		       struct iwl_mfu_assert_dump_notif),
390 	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
391 		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
392 		       struct iwl_stored_beacon_notif_v2),
393 	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
394 		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
395 		       struct iwl_mu_group_mgmt_notif),
396 	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
397 		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
398 		       struct iwl_mvm_pm_state_notification),
399 	RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
400 		       iwl_mvm_probe_resp_data_notif,
401 		       RX_HANDLER_ASYNC_LOCKED,
402 		       struct iwl_probe_resp_data_notif),
403 	RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
404 		       iwl_mvm_channel_switch_start_notif,
405 		       RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
406 	RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
407 		       iwl_mvm_channel_switch_error_notif,
408 		       RX_HANDLER_ASYNC_UNLOCKED,
409 		       struct iwl_channel_switch_error_notif),
410 	RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
411 		       iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
412 		       struct iwl_datapath_monitor_notif),
413 
414 	RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
415 		       iwl_mvm_rx_thermal_dual_chain_req,
416 		       RX_HANDLER_ASYNC_LOCKED,
417 		       struct iwl_thermal_dual_chain_request),
418 
419 	RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
420 		       iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
421 		       struct iwl_rfi_deactivate_notif),
422 };
423 #undef RX_HANDLER
424 #undef RX_HANDLER_GRP
425 
426 /* Please keep this array *SORTED* by hex value.
427  * Access is done through binary search
428  */
429 static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
430 	HCMD_NAME(UCODE_ALIVE_NTFY),
431 	HCMD_NAME(REPLY_ERROR),
432 	HCMD_NAME(ECHO_CMD),
433 	HCMD_NAME(INIT_COMPLETE_NOTIF),
434 	HCMD_NAME(PHY_CONTEXT_CMD),
435 	HCMD_NAME(DBG_CFG),
436 	HCMD_NAME(SCAN_CFG_CMD),
437 	HCMD_NAME(SCAN_REQ_UMAC),
438 	HCMD_NAME(SCAN_ABORT_UMAC),
439 	HCMD_NAME(SCAN_COMPLETE_UMAC),
440 	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
441 	HCMD_NAME(ADD_STA_KEY),
442 	HCMD_NAME(ADD_STA),
443 	HCMD_NAME(REMOVE_STA),
444 	HCMD_NAME(FW_GET_ITEM_CMD),
445 	HCMD_NAME(TX_CMD),
446 	HCMD_NAME(SCD_QUEUE_CFG),
447 	HCMD_NAME(TXPATH_FLUSH),
448 	HCMD_NAME(MGMT_MCAST_KEY),
449 	HCMD_NAME(WEP_KEY),
450 	HCMD_NAME(SHARED_MEM_CFG),
451 	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
452 	HCMD_NAME(MAC_CONTEXT_CMD),
453 	HCMD_NAME(TIME_EVENT_CMD),
454 	HCMD_NAME(TIME_EVENT_NOTIFICATION),
455 	HCMD_NAME(BINDING_CONTEXT_CMD),
456 	HCMD_NAME(TIME_QUOTA_CMD),
457 	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
458 	HCMD_NAME(LEDS_CMD),
459 	HCMD_NAME(LQ_CMD),
460 	HCMD_NAME(FW_PAGING_BLOCK_CMD),
461 	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
462 	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
463 	HCMD_NAME(HOT_SPOT_CMD),
464 	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
465 	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
466 	HCMD_NAME(BT_COEX_CI),
467 	HCMD_NAME(PHY_CONFIGURATION_CMD),
468 	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
469 	HCMD_NAME(PHY_DB_CMD),
470 	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
471 	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
472 	HCMD_NAME(POWER_TABLE_CMD),
473 	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
474 	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
475 	HCMD_NAME(NVM_ACCESS_CMD),
476 	HCMD_NAME(BEACON_NOTIFICATION),
477 	HCMD_NAME(BEACON_TEMPLATE_CMD),
478 	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
479 	HCMD_NAME(BT_CONFIG),
480 	HCMD_NAME(STATISTICS_CMD),
481 	HCMD_NAME(STATISTICS_NOTIFICATION),
482 	HCMD_NAME(EOSP_NOTIFICATION),
483 	HCMD_NAME(REDUCE_TX_POWER_CMD),
484 	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
485 	HCMD_NAME(TDLS_CONFIG_CMD),
486 	HCMD_NAME(MAC_PM_POWER_TABLE),
487 	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
488 	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
489 	HCMD_NAME(RSS_CONFIG_CMD),
490 	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
491 	HCMD_NAME(REPLY_RX_PHY_CMD),
492 	HCMD_NAME(REPLY_RX_MPDU_CMD),
493 	HCMD_NAME(BAR_FRAME_RELEASE),
494 	HCMD_NAME(FRAME_RELEASE),
495 	HCMD_NAME(BA_NOTIF),
496 	HCMD_NAME(MCC_UPDATE_CMD),
497 	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
498 	HCMD_NAME(MARKER_CMD),
499 	HCMD_NAME(BT_PROFILE_NOTIFICATION),
500 	HCMD_NAME(BCAST_FILTER_CMD),
501 	HCMD_NAME(MCAST_FILTER_CMD),
502 	HCMD_NAME(REPLY_SF_CFG_CMD),
503 	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
504 	HCMD_NAME(D3_CONFIG_CMD),
505 	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
506 	HCMD_NAME(OFFLOADS_QUERY_CMD),
507 	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
508 	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
509 	HCMD_NAME(WOWLAN_PATTERNS),
510 	HCMD_NAME(WOWLAN_CONFIGURATION),
511 	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
512 	HCMD_NAME(WOWLAN_TKIP_PARAM),
513 	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
514 	HCMD_NAME(WOWLAN_GET_STATUSES),
515 	HCMD_NAME(SCAN_ITERATION_COMPLETE),
516 	HCMD_NAME(D0I3_END_CMD),
517 	HCMD_NAME(LTR_CONFIG),
518 	HCMD_NAME(LDBG_CONFIG_CMD),
519 };
520 
521 /* Please keep this array *SORTED* by hex value.
522  * Access is done through binary search
523  */
524 static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
525 	HCMD_NAME(SHARED_MEM_CFG_CMD),
526 	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
527 	HCMD_NAME(FW_ERROR_RECOVERY_CMD),
528 	HCMD_NAME(RFI_CONFIG_CMD),
529 	HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
530 	HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
531 	HCMD_NAME(RFI_DEACTIVATE_NOTIF),
532 };
533 
534 /* Please keep this array *SORTED* by hex value.
535  * Access is done through binary search
536  */
537 static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
538 	HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
539 	HCMD_NAME(SESSION_PROTECTION_CMD),
540 	HCMD_NAME(SESSION_PROTECTION_NOTIF),
541 	HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
542 };
543 
544 /* Please keep this array *SORTED* by hex value.
545  * Access is done through binary search
546  */
547 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
548 	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
549 	HCMD_NAME(CTDP_CONFIG_CMD),
550 	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
551 	HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
552 	HCMD_NAME(CT_KILL_NOTIFICATION),
553 	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
554 };
555 
556 /* Please keep this array *SORTED* by hex value.
557  * Access is done through binary search
558  */
559 static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
560 	HCMD_NAME(DQA_ENABLE_CMD),
561 	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
562 	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
563 	HCMD_NAME(STA_HE_CTXT_CMD),
564 	HCMD_NAME(RLC_CONFIG_CMD),
565 	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
566 	HCMD_NAME(TLC_MNG_CONFIG_CMD),
567 	HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
568 	HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
569 	HCMD_NAME(MONITOR_NOTIF),
570 	HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
571 	HCMD_NAME(STA_PM_NOTIF),
572 	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
573 	HCMD_NAME(RX_QUEUES_NOTIFICATION),
574 };
575 
576 /* Please keep this array *SORTED* by hex value.
577  * Access is done through binary search
578  */
579 static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
580 	HCMD_NAME(TOF_RANGE_REQ_CMD),
581 	HCMD_NAME(TOF_CONFIG_CMD),
582 	HCMD_NAME(TOF_RANGE_ABORT_CMD),
583 	HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
584 	HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
585 	HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
586 	HCMD_NAME(TOF_LC_NOTIF),
587 	HCMD_NAME(TOF_RESPONDER_STATS),
588 	HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
589 	HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
590 };
591 
592 /* Please keep this array *SORTED* by hex value.
593  * Access is done through binary search
594  */
595 static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
596 	HCMD_NAME(STORED_BEACON_NTF),
597 };
598 
599 /* Please keep this array *SORTED* by hex value.
600  * Access is done through binary search
601  */
602 static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
603 	HCMD_NAME(NVM_ACCESS_COMPLETE),
604 	HCMD_NAME(NVM_GET_INFO),
605 	HCMD_NAME(TAS_CONFIG),
606 };
607 
608 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
609 	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
610 	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
611 	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
612 	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
613 	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
614 	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
615 	[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
616 	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
617 	[REGULATORY_AND_NVM_GROUP] =
618 		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
619 };
620 
621 /* this forward declaration can avoid to export the function */
622 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
623 
624 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
625 {
626 	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
627 	u64 dflt_pwr_limit;
628 
629 	if (!backoff)
630 		return 0;
631 
632 	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
633 
634 	while (backoff->pwr) {
635 		if (dflt_pwr_limit >= backoff->pwr)
636 			return backoff->backoff;
637 
638 		backoff++;
639 	}
640 
641 	return 0;
642 }
643 
644 static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
645 {
646 	struct iwl_mvm *mvm =
647 		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
648 	struct ieee80211_vif *tx_blocked_vif;
649 	struct iwl_mvm_vif *mvmvif;
650 
651 	mutex_lock(&mvm->mutex);
652 
653 	tx_blocked_vif =
654 		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
655 					  lockdep_is_held(&mvm->mutex));
656 
657 	if (!tx_blocked_vif)
658 		goto unlock;
659 
660 	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
661 	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
662 	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
663 unlock:
664 	mutex_unlock(&mvm->mutex);
665 }
666 
667 static void iwl_mvm_fwrt_dump_start(void *ctx)
668 {
669 	struct iwl_mvm *mvm = ctx;
670 
671 	mutex_lock(&mvm->mutex);
672 }
673 
674 static void iwl_mvm_fwrt_dump_end(void *ctx)
675 {
676 	struct iwl_mvm *mvm = ctx;
677 
678 	mutex_unlock(&mvm->mutex);
679 }
680 
681 static bool iwl_mvm_fwrt_fw_running(void *ctx)
682 {
683 	return iwl_mvm_firmware_running(ctx);
684 }
685 
686 static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
687 {
688 	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
689 	int ret;
690 
691 	mutex_lock(&mvm->mutex);
692 	ret = iwl_mvm_send_cmd(mvm, host_cmd);
693 	mutex_unlock(&mvm->mutex);
694 
695 	return ret;
696 }
697 
698 static bool iwl_mvm_d3_debug_enable(void *ctx)
699 {
700 	return IWL_MVM_D3_DEBUG;
701 }
702 
703 static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
704 	.dump_start = iwl_mvm_fwrt_dump_start,
705 	.dump_end = iwl_mvm_fwrt_dump_end,
706 	.fw_running = iwl_mvm_fwrt_fw_running,
707 	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
708 	.d3_debug_enable = iwl_mvm_d3_debug_enable,
709 };
710 
711 static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
712 {
713 	struct iwl_trans *trans = mvm->trans;
714 	int ret;
715 
716 	if (trans->csme_own) {
717 		if (WARN(!mvm->mei_registered,
718 			 "csme is owner, but we aren't registered to iwlmei\n"))
719 			goto get_nvm_from_fw;
720 
721 		mvm->mei_nvm_data = iwl_mei_get_nvm();
722 		if (mvm->mei_nvm_data) {
723 			/*
724 			 * mvm->mei_nvm_data is set and because of that,
725 			 * we'll load the NVM from the FW when we'll get
726 			 * ownership.
727 			 */
728 			mvm->nvm_data =
729 				iwl_parse_mei_nvm_data(trans, trans->cfg,
730 						       mvm->mei_nvm_data, mvm->fw);
731 			return 0;
732 		}
733 
734 		IWL_ERR(mvm,
735 			"Got a NULL NVM from CSME, trying to get it from the device\n");
736 	}
737 
738 get_nvm_from_fw:
739 	rtnl_lock();
740 	wiphy_lock(mvm->hw->wiphy);
741 	mutex_lock(&mvm->mutex);
742 
743 	ret = iwl_trans_start_hw(mvm->trans);
744 	if (ret) {
745 		mutex_unlock(&mvm->mutex);
746 		wiphy_unlock(mvm->hw->wiphy);
747 		rtnl_unlock();
748 		return ret;
749 	}
750 
751 	ret = iwl_run_init_mvm_ucode(mvm);
752 	if (ret && ret != -ERFKILL)
753 		iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
754 	if (!ret && iwl_mvm_is_lar_supported(mvm)) {
755 		mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
756 		ret = iwl_mvm_init_mcc(mvm);
757 	}
758 
759 	if (!iwlmvm_mod_params.init_dbg || !ret)
760 		iwl_mvm_stop_device(mvm);
761 
762 	mutex_unlock(&mvm->mutex);
763 	wiphy_unlock(mvm->hw->wiphy);
764 	rtnl_unlock();
765 
766 	if (ret)
767 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
768 
769 	return ret;
770 }
771 
772 static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
773 {
774 	struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
775 	int ret;
776 
777 	iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
778 
779 	ret = iwl_mvm_mac_setup_register(mvm);
780 	if (ret)
781 		return ret;
782 
783 	mvm->hw_registered = true;
784 
785 	iwl_mvm_dbgfs_register(mvm);
786 
787 	wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
788 					 mvm->mei_rfkill_blocked,
789 					 RFKILL_HARD_BLOCK_NOT_OWNER);
790 
791 	iwl_mvm_mei_set_sw_rfkill_state(mvm);
792 
793 	return 0;
794 }
795 
796 struct iwl_mvm_frob_txf_data {
797 	u8 *buf;
798 	size_t buflen;
799 };
800 
801 static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
802 				      struct ieee80211_vif *vif,
803 				      struct ieee80211_sta *sta,
804 				      struct ieee80211_key_conf *key,
805 				      void *data)
806 {
807 	struct iwl_mvm_frob_txf_data *txf = data;
808 	u8 keylen, match, matchend;
809 	u8 *keydata;
810 	size_t i;
811 
812 	switch (key->cipher) {
813 	case WLAN_CIPHER_SUITE_CCMP:
814 		keydata = key->key;
815 		keylen = key->keylen;
816 		break;
817 	case WLAN_CIPHER_SUITE_WEP40:
818 	case WLAN_CIPHER_SUITE_WEP104:
819 	case WLAN_CIPHER_SUITE_TKIP:
820 		/*
821 		 * WEP has short keys which might show up in the payload,
822 		 * and then you can deduce the key, so in this case just
823 		 * remove all FIFO data.
824 		 * For TKIP, we don't know the phase 2 keys here, so same.
825 		 */
826 		memset(txf->buf, 0xBB, txf->buflen);
827 		return;
828 	default:
829 		return;
830 	}
831 
832 	/* scan for key material and clear it out */
833 	match = 0;
834 	for (i = 0; i < txf->buflen; i++) {
835 		if (txf->buf[i] != keydata[match]) {
836 			match = 0;
837 			continue;
838 		}
839 		match++;
840 		if (match == keylen) {
841 			memset(txf->buf + i - keylen, 0xAA, keylen);
842 			match = 0;
843 		}
844 	}
845 
846 	/* we're dealing with a FIFO, so check wrapped around data */
847 	matchend = match;
848 	for (i = 0; match && i < keylen - match; i++) {
849 		if (txf->buf[i] != keydata[match])
850 			break;
851 		match++;
852 		if (match == keylen) {
853 			memset(txf->buf, 0xAA, i + 1);
854 			memset(txf->buf + txf->buflen - matchend, 0xAA,
855 			       matchend);
856 			break;
857 		}
858 	}
859 }
860 
861 static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
862 {
863 	struct iwl_mvm_frob_txf_data txf = {
864 		.buf = buf,
865 		.buflen = buflen,
866 	};
867 	struct iwl_mvm *mvm = ctx;
868 
869 	/* embedded key material exists only on old API */
870 	if (iwl_mvm_has_new_tx_api(mvm))
871 		return;
872 
873 	rcu_read_lock();
874 	ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
875 	rcu_read_unlock();
876 }
877 
878 static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
879 {
880 	/* we only use wide headers for commands */
881 	struct iwl_cmd_header_wide *hdr = hcmd;
882 	unsigned int frob_start = sizeof(*hdr), frob_end = 0;
883 
884 	if (len < sizeof(hdr))
885 		return;
886 
887 	/* all the commands we care about are in LONG_GROUP */
888 	if (hdr->group_id != LONG_GROUP)
889 		return;
890 
891 	switch (hdr->cmd) {
892 	case WEP_KEY:
893 	case WOWLAN_TKIP_PARAM:
894 	case WOWLAN_KEK_KCK_MATERIAL:
895 	case ADD_STA_KEY:
896 		/*
897 		 * blank out everything here, easier than dealing
898 		 * with the various versions of the command
899 		 */
900 		frob_end = INT_MAX;
901 		break;
902 	case MGMT_MCAST_KEY:
903 		frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
904 		BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
905 			     offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
906 
907 		frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
908 		BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
909 			     offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
910 		break;
911 	}
912 
913 	if (frob_start >= frob_end)
914 		return;
915 
916 	if (frob_end > len)
917 		frob_end = len;
918 
919 	memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
920 }
921 
922 static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
923 {
924 	const struct iwl_dump_exclude *excl;
925 	struct iwl_mvm *mvm = ctx;
926 	int i;
927 
928 	switch (mvm->fwrt.cur_fw_img) {
929 	case IWL_UCODE_INIT:
930 	default:
931 		/* not relevant */
932 		return;
933 	case IWL_UCODE_REGULAR:
934 	case IWL_UCODE_REGULAR_USNIFFER:
935 		excl = mvm->fw->dump_excl;
936 		break;
937 	case IWL_UCODE_WOWLAN:
938 		excl = mvm->fw->dump_excl_wowlan;
939 		break;
940 	}
941 
942 	BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
943 		     sizeof(mvm->fw->dump_excl_wowlan));
944 
945 	for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
946 		u32 start, end;
947 
948 		if (!excl[i].addr || !excl[i].size)
949 			continue;
950 
951 		start = excl[i].addr;
952 		end = start + excl[i].size;
953 
954 		if (end <= mem_addr || start >= mem_addr + buflen)
955 			continue;
956 
957 		if (start < mem_addr)
958 			start = mem_addr;
959 
960 		if (end > mem_addr + buflen)
961 			end = mem_addr + buflen;
962 
963 		memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
964 	}
965 }
966 
967 static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
968 	.frob_txf = iwl_mvm_frob_txf,
969 	.frob_hcmd = iwl_mvm_frob_hcmd,
970 	.frob_mem = iwl_mvm_frob_mem,
971 };
972 
973 static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
974 {
975 	struct iwl_mvm *mvm = priv;
976 	struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
977 
978 	/*
979 	 * This is protected by the guarantee that this function will not be
980 	 * called twice on two different threads
981 	 */
982 	prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
983 
984 	curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
985 	if (!curr_conn_info)
986 		return;
987 
988 	curr_conn_info->conn_info = *conn_info;
989 
990 	rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
991 
992 	if (prev_conn_info)
993 		kfree_rcu(prev_conn_info, rcu_head);
994 }
995 
996 static void iwl_mvm_mei_rfkill(void *priv, bool blocked)
997 {
998 	struct iwl_mvm *mvm = priv;
999 
1000 	mvm->mei_rfkill_blocked = blocked;
1001 	if (!mvm->hw_registered)
1002 		return;
1003 
1004 	wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
1005 					 mvm->mei_rfkill_blocked,
1006 					 RFKILL_HARD_BLOCK_NOT_OWNER);
1007 }
1008 
1009 static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
1010 {
1011 	struct iwl_mvm *mvm = priv;
1012 
1013 	if (!mvm->hw_registered || !mvm->csme_vif)
1014 		return;
1015 
1016 	iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
1017 }
1018 
1019 static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
1020 {
1021 	struct iwl_mvm *mvm =
1022 		container_of(wk, struct iwl_mvm, sap_connected_wk);
1023 	int ret;
1024 
1025 	ret = iwl_mvm_start_get_nvm(mvm);
1026 	if (ret)
1027 		goto out_free;
1028 
1029 	ret = iwl_mvm_start_post_nvm(mvm);
1030 	if (ret)
1031 		goto out_free;
1032 
1033 	return;
1034 
1035 out_free:
1036 	IWL_ERR(mvm, "Couldn't get started...\n");
1037 	iwl_mei_start_unregister();
1038 	iwl_mei_unregister_complete();
1039 	iwl_fw_flush_dumps(&mvm->fwrt);
1040 	iwl_mvm_thermal_exit(mvm);
1041 	iwl_fw_runtime_free(&mvm->fwrt);
1042 	iwl_phy_db_free(mvm->phy_db);
1043 	kfree(mvm->scan_cmd);
1044 	iwl_trans_op_mode_leave(mvm->trans);
1045 	kfree(mvm->nvm_data);
1046 	kfree(mvm->mei_nvm_data);
1047 
1048 	ieee80211_free_hw(mvm->hw);
1049 }
1050 
1051 static void iwl_mvm_mei_sap_connected(void *priv)
1052 {
1053 	struct iwl_mvm *mvm = priv;
1054 
1055 	if (!mvm->hw_registered)
1056 		schedule_work(&mvm->sap_connected_wk);
1057 }
1058 
1059 static void iwl_mvm_mei_nic_stolen(void *priv)
1060 {
1061 	struct iwl_mvm *mvm = priv;
1062 
1063 	rtnl_lock();
1064 	cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
1065 	rtnl_unlock();
1066 }
1067 
1068 static const struct iwl_mei_ops mei_ops = {
1069 	.me_conn_status = iwl_mvm_me_conn_status,
1070 	.rfkill = iwl_mvm_mei_rfkill,
1071 	.roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
1072 	.sap_connected = iwl_mvm_mei_sap_connected,
1073 	.nic_stolen = iwl_mvm_mei_nic_stolen,
1074 };
1075 
1076 static struct iwl_op_mode *
1077 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
1078 		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
1079 {
1080 	struct ieee80211_hw *hw;
1081 	struct iwl_op_mode *op_mode;
1082 	struct iwl_mvm *mvm;
1083 	struct iwl_trans_config trans_cfg = {};
1084 	static const u8 no_reclaim_cmds[] = {
1085 		TX_CMD,
1086 	};
1087 	int scan_size;
1088 	u32 min_backoff;
1089 	struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
1090 
1091 	/*
1092 	 * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
1093 	 * index all over the driver - check that its value corresponds to the
1094 	 * array size.
1095 	 */
1096 	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
1097 		     IWL_MVM_STATION_COUNT_MAX);
1098 
1099 	/********************************
1100 	 * 1. Allocating and configuring HW data
1101 	 ********************************/
1102 	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
1103 				sizeof(struct iwl_mvm),
1104 				&iwl_mvm_hw_ops);
1105 	if (!hw)
1106 		return NULL;
1107 
1108 	hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
1109 
1110 	if (cfg->max_tx_agg_size)
1111 		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
1112 	else
1113 		hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
1114 
1115 	op_mode = hw->priv;
1116 
1117 	mvm = IWL_OP_MODE_GET_MVM(op_mode);
1118 	mvm->dev = trans->dev;
1119 	mvm->trans = trans;
1120 	mvm->cfg = cfg;
1121 	mvm->fw = fw;
1122 	mvm->hw = hw;
1123 
1124 	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
1125 			    &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
1126 
1127 	iwl_mvm_get_acpi_tables(mvm);
1128 	iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
1129 
1130 	mvm->init_status = 0;
1131 
1132 	if (iwl_mvm_has_new_rx_api(mvm)) {
1133 		op_mode->ops = &iwl_mvm_ops_mq;
1134 		trans->rx_mpdu_cmd_hdr_size =
1135 			(trans->trans_cfg->device_family >=
1136 			 IWL_DEVICE_FAMILY_AX210) ?
1137 			sizeof(struct iwl_rx_mpdu_desc) :
1138 			IWL_RX_DESC_SIZE_V1;
1139 	} else {
1140 		op_mode->ops = &iwl_mvm_ops;
1141 		trans->rx_mpdu_cmd_hdr_size =
1142 			sizeof(struct iwl_rx_mpdu_res_start);
1143 
1144 		if (WARN_ON(trans->num_rx_queues > 1))
1145 			goto out_free;
1146 	}
1147 
1148 	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
1149 
1150 	if (iwl_mvm_has_new_tx_api(mvm)) {
1151 		/*
1152 		 * If we have the new TX/queue allocation API initialize them
1153 		 * all to invalid numbers. We'll rewrite the ones that we need
1154 		 * later, but that doesn't happen for all of them all of the
1155 		 * time (e.g. P2P Device is optional), and if a dynamic queue
1156 		 * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
1157 		 * iwl_mvm_is_static_queue() erroneously returns true, and we
1158 		 * might have things getting stuck.
1159 		 */
1160 		mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
1161 		mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
1162 		mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
1163 		mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
1164 	} else {
1165 		mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
1166 		mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
1167 		mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1168 		mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1169 	}
1170 
1171 	mvm->sf_state = SF_UNINIT;
1172 	if (iwl_mvm_has_unified_ucode(mvm))
1173 		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
1174 	else
1175 		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
1176 	mvm->drop_bcn_ap_mode = true;
1177 
1178 	mutex_init(&mvm->mutex);
1179 	spin_lock_init(&mvm->async_handlers_lock);
1180 	INIT_LIST_HEAD(&mvm->time_event_list);
1181 	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
1182 	INIT_LIST_HEAD(&mvm->async_handlers_list);
1183 	spin_lock_init(&mvm->time_event_lock);
1184 	INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
1185 	INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
1186 	INIT_LIST_HEAD(&mvm->resp_pasn_list);
1187 
1188 	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
1189 	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
1190 	INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
1191 	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
1192 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
1193 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
1194 	INIT_LIST_HEAD(&mvm->add_stream_txqs);
1195 
1196 	init_waitqueue_head(&mvm->rx_sync_waitq);
1197 
1198 	mvm->queue_sync_state = 0;
1199 
1200 	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
1201 
1202 	spin_lock_init(&mvm->tcm.lock);
1203 	INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
1204 	mvm->tcm.ts = jiffies;
1205 	mvm->tcm.ll_ts = jiffies;
1206 	mvm->tcm.uapsd_nonagg_ts = jiffies;
1207 
1208 	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
1209 
1210 	mvm->cmd_ver.d0i3_resp =
1211 		iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
1212 					0);
1213 	/* we only support version 1 */
1214 	if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
1215 		goto out_free;
1216 
1217 	mvm->cmd_ver.range_resp =
1218 		iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
1219 					TOF_RANGE_RESPONSE_NOTIF, 5);
1220 	/* we only support up to version 9 */
1221 	if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
1222 		goto out_free;
1223 
1224 	/*
1225 	 * Populate the state variables that the transport layer needs
1226 	 * to know about.
1227 	 */
1228 	trans_cfg.op_mode = op_mode;
1229 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1230 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1231 
1232 	switch (iwlwifi_mod_params.amsdu_size) {
1233 	case IWL_AMSDU_DEF:
1234 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1235 		break;
1236 	case IWL_AMSDU_4K:
1237 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1238 		break;
1239 	case IWL_AMSDU_8K:
1240 		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
1241 		break;
1242 	case IWL_AMSDU_12K:
1243 		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
1244 		break;
1245 	default:
1246 		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
1247 		       iwlwifi_mod_params.amsdu_size);
1248 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1249 	}
1250 
1251 	trans->wide_cmd_header = true;
1252 	trans_cfg.bc_table_dword =
1253 		mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
1254 
1255 	trans_cfg.command_groups = iwl_mvm_groups;
1256 	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
1257 
1258 	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
1259 	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
1260 	trans_cfg.scd_set_active = true;
1261 
1262 	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
1263 					  driver_data[2]);
1264 
1265 	/* Set a short watchdog for the command queue */
1266 	trans_cfg.cmd_q_wdg_timeout =
1267 		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
1268 
1269 	snprintf(mvm->hw->wiphy->fw_version,
1270 		 sizeof(mvm->hw->wiphy->fw_version),
1271 		 "%s", fw->fw_version);
1272 
1273 	trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
1274 						   IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
1275 
1276 	trans_cfg.queue_alloc_cmd_ver =
1277 		iwl_fw_lookup_cmd_ver(mvm->fw,
1278 				      WIDE_ID(DATA_PATH_GROUP,
1279 					      SCD_QUEUE_CONFIG_CMD),
1280 				      0);
1281 	mvm->sta_remove_requires_queue_remove =
1282 		trans_cfg.queue_alloc_cmd_ver > 0;
1283 
1284 	/* Configure transport layer */
1285 	iwl_trans_configure(mvm->trans, &trans_cfg);
1286 
1287 	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1288 	trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
1289 	trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
1290 	memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
1291 	       sizeof(trans->dbg.conf_tlv));
1292 	trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
1293 
1294 	trans->iml = mvm->fw->iml;
1295 	trans->iml_len = mvm->fw->iml_len;
1296 
1297 	/* set up notification wait support */
1298 	iwl_notification_wait_init(&mvm->notif_wait);
1299 
1300 	/* Init phy db */
1301 	mvm->phy_db = iwl_phy_db_init(trans);
1302 	if (!mvm->phy_db) {
1303 		IWL_ERR(mvm, "Cannot init phy_db\n");
1304 		goto out_free;
1305 	}
1306 
1307 	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
1308 		 mvm->trans->name, mvm->trans->hw_rev);
1309 
1310 	if (iwlwifi_mod_params.nvm_file)
1311 		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
1312 	else
1313 		IWL_DEBUG_EEPROM(mvm->trans->dev,
1314 				 "working without external nvm file\n");
1315 
1316 	scan_size = iwl_mvm_scan_size(mvm);
1317 
1318 	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
1319 	if (!mvm->scan_cmd)
1320 		goto out_free;
1321 
1322 	/* invalidate ids to prevent accidental removal of sta_id 0 */
1323 	mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
1324 	mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
1325 
1326 	/* Set EBS as successful as long as not stated otherwise by the FW. */
1327 	mvm->last_ebs_successful = true;
1328 
1329 	min_backoff = iwl_mvm_min_backoff(mvm);
1330 	iwl_mvm_thermal_initialize(mvm, min_backoff);
1331 
1332 	if (!iwl_mvm_has_new_rx_stats_api(mvm))
1333 		memset(&mvm->rx_stats_v3, 0,
1334 		       sizeof(struct mvm_statistics_rx_v3));
1335 	else
1336 		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
1337 
1338 	mvm->debugfs_dir = dbgfs_dir;
1339 
1340 	mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
1341 
1342 	if (iwl_mvm_start_get_nvm(mvm)) {
1343 		/*
1344 		 * Getting NVM failed while CSME is the owner, but we are
1345 		 * registered to MEI, we'll get the NVM later when it'll be
1346 		 * possible to get it from CSME.
1347 		 */
1348 		if (trans->csme_own && mvm->mei_registered)
1349 			return op_mode;
1350 
1351 		goto out_thermal_exit;
1352 	}
1353 
1354 
1355 	if (iwl_mvm_start_post_nvm(mvm))
1356 		goto out_thermal_exit;
1357 
1358 	return op_mode;
1359 
1360  out_thermal_exit:
1361 	iwl_mvm_thermal_exit(mvm);
1362 	if (mvm->mei_registered) {
1363 		iwl_mei_start_unregister();
1364 		iwl_mei_unregister_complete();
1365 	}
1366  out_free:
1367 	iwl_fw_flush_dumps(&mvm->fwrt);
1368 	iwl_fw_runtime_free(&mvm->fwrt);
1369 
1370 	if (iwlmvm_mod_params.init_dbg)
1371 		return op_mode;
1372 	iwl_phy_db_free(mvm->phy_db);
1373 	kfree(mvm->scan_cmd);
1374 	iwl_trans_op_mode_leave(trans);
1375 
1376 	ieee80211_free_hw(mvm->hw);
1377 	return NULL;
1378 }
1379 
1380 void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1381 {
1382 	lockdep_assert_held(&mvm->mutex);
1383 
1384 	iwl_fw_cancel_timestamp(&mvm->fwrt);
1385 
1386 	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1387 
1388 	iwl_fw_dbg_stop_sync(&mvm->fwrt);
1389 	iwl_trans_stop_device(mvm->trans);
1390 	iwl_free_fw_paging(&mvm->fwrt);
1391 	iwl_fw_dump_conf_clear(&mvm->fwrt);
1392 	iwl_mvm_mei_device_down(mvm);
1393 }
1394 
1395 static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
1396 {
1397 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1398 	int i;
1399 
1400 	if (mvm->mei_registered) {
1401 		rtnl_lock();
1402 		iwl_mei_set_netdev(NULL);
1403 		rtnl_unlock();
1404 		iwl_mei_start_unregister();
1405 	}
1406 
1407 	/*
1408 	 * After we unregister from mei, the worker can't be scheduled
1409 	 * anymore.
1410 	 */
1411 	cancel_work_sync(&mvm->sap_connected_wk);
1412 
1413 	iwl_mvm_leds_exit(mvm);
1414 
1415 	iwl_mvm_thermal_exit(mvm);
1416 
1417 	/*
1418 	 * If we couldn't get ownership on the device and we couldn't
1419 	 * get the NVM from CSME, we haven't registered to mac80211.
1420 	 * In that case, we didn't fail op_mode_start, because we are
1421 	 * waiting for CSME to allow us to get the NVM to register to
1422 	 * mac80211. If that didn't happen, we haven't registered to
1423 	 * mac80211, hence the if below.
1424 	 */
1425 	if (mvm->hw_registered)
1426 		ieee80211_unregister_hw(mvm->hw);
1427 
1428 	kfree(mvm->scan_cmd);
1429 	kfree(mvm->mcast_filter_cmd);
1430 	mvm->mcast_filter_cmd = NULL;
1431 
1432 	kfree(mvm->error_recovery_buf);
1433 	mvm->error_recovery_buf = NULL;
1434 
1435 	iwl_trans_op_mode_leave(mvm->trans);
1436 
1437 	iwl_phy_db_free(mvm->phy_db);
1438 	mvm->phy_db = NULL;
1439 
1440 	kfree(mvm->nvm_data);
1441 	kfree(mvm->mei_nvm_data);
1442 	kfree(rcu_access_pointer(mvm->csme_conn_info));
1443 	kfree(mvm->temp_nvm_data);
1444 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
1445 		kfree(mvm->nvm_sections[i].data);
1446 
1447 	cancel_delayed_work_sync(&mvm->tcm.work);
1448 
1449 	iwl_fw_runtime_free(&mvm->fwrt);
1450 	mutex_destroy(&mvm->mutex);
1451 
1452 	if (mvm->mei_registered)
1453 		iwl_mei_unregister_complete();
1454 
1455 	ieee80211_free_hw(mvm->hw);
1456 }
1457 
1458 struct iwl_async_handler_entry {
1459 	struct list_head list;
1460 	struct iwl_rx_cmd_buffer rxb;
1461 	enum iwl_rx_handler_context context;
1462 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1463 };
1464 
1465 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
1466 {
1467 	struct iwl_async_handler_entry *entry, *tmp;
1468 
1469 	spin_lock_bh(&mvm->async_handlers_lock);
1470 	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
1471 		iwl_free_rxb(&entry->rxb);
1472 		list_del(&entry->list);
1473 		kfree(entry);
1474 	}
1475 	spin_unlock_bh(&mvm->async_handlers_lock);
1476 }
1477 
1478 static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
1479 {
1480 	struct iwl_mvm *mvm =
1481 		container_of(wk, struct iwl_mvm, async_handlers_wk);
1482 	struct iwl_async_handler_entry *entry, *tmp;
1483 	LIST_HEAD(local_list);
1484 
1485 	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
1486 
1487 	/*
1488 	 * Sync with Rx path with a lock. Remove all the entries from this list,
1489 	 * add them to a local one (lock free), and then handle them.
1490 	 */
1491 	spin_lock_bh(&mvm->async_handlers_lock);
1492 	list_splice_init(&mvm->async_handlers_list, &local_list);
1493 	spin_unlock_bh(&mvm->async_handlers_lock);
1494 
1495 	list_for_each_entry_safe(entry, tmp, &local_list, list) {
1496 		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
1497 			mutex_lock(&mvm->mutex);
1498 		entry->fn(mvm, &entry->rxb);
1499 		iwl_free_rxb(&entry->rxb);
1500 		list_del(&entry->list);
1501 		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
1502 			mutex_unlock(&mvm->mutex);
1503 		kfree(entry);
1504 	}
1505 }
1506 
1507 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
1508 					    struct iwl_rx_packet *pkt)
1509 {
1510 	struct iwl_fw_dbg_trigger_tlv *trig;
1511 	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
1512 	int i;
1513 
1514 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1515 				     FW_DBG_TRIGGER_FW_NOTIF);
1516 	if (!trig)
1517 		return;
1518 
1519 	cmds_trig = (void *)trig->data;
1520 
1521 	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
1522 		/* don't collect on CMD 0 */
1523 		if (!cmds_trig->cmds[i].cmd_id)
1524 			break;
1525 
1526 		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
1527 		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
1528 			continue;
1529 
1530 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1531 					"CMD 0x%02x.%02x received",
1532 					pkt->hdr.group_id, pkt->hdr.cmd);
1533 		break;
1534 	}
1535 }
1536 
1537 static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
1538 			      struct iwl_rx_cmd_buffer *rxb,
1539 			      struct iwl_rx_packet *pkt)
1540 {
1541 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1542 	int i;
1543 	union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
1544 
1545 	iwl_dbg_tlv_time_point(&mvm->fwrt,
1546 			       IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
1547 	iwl_mvm_rx_check_trigger(mvm, pkt);
1548 
1549 	/*
1550 	 * Do the notification wait before RX handlers so
1551 	 * even if the RX handler consumes the RXB we have
1552 	 * access to it in the notification wait entry.
1553 	 */
1554 	iwl_notification_wait_notify(&mvm->notif_wait, pkt);
1555 
1556 	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
1557 		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
1558 		struct iwl_async_handler_entry *entry;
1559 
1560 		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
1561 			continue;
1562 
1563 		if (unlikely(pkt_len < rx_h->min_size))
1564 			return;
1565 
1566 		if (rx_h->context == RX_HANDLER_SYNC) {
1567 			rx_h->fn(mvm, rxb);
1568 			return;
1569 		}
1570 
1571 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1572 		/* we can't do much... */
1573 		if (!entry)
1574 			return;
1575 
1576 		entry->rxb._page = rxb_steal_page(rxb);
1577 		entry->rxb._offset = rxb->_offset;
1578 		entry->rxb._rx_page_order = rxb->_rx_page_order;
1579 		entry->fn = rx_h->fn;
1580 		entry->context = rx_h->context;
1581 		spin_lock(&mvm->async_handlers_lock);
1582 		list_add_tail(&entry->list, &mvm->async_handlers_list);
1583 		spin_unlock(&mvm->async_handlers_lock);
1584 		schedule_work(&mvm->async_handlers_wk);
1585 		break;
1586 	}
1587 }
1588 
1589 static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
1590 		       struct napi_struct *napi,
1591 		       struct iwl_rx_cmd_buffer *rxb)
1592 {
1593 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1594 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1595 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1596 
1597 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1598 		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1599 	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1600 		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1601 	else
1602 		iwl_mvm_rx_common(mvm, rxb, pkt);
1603 }
1604 
1605 void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1606 		   struct napi_struct *napi,
1607 		   struct iwl_rx_cmd_buffer *rxb)
1608 {
1609 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1610 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1611 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1612 
1613 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1614 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1615 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1616 					 RX_QUEUES_NOTIFICATION)))
1617 		iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
1618 	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1619 		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1620 	else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
1621 		iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
1622 	else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
1623 		iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
1624 	else
1625 		iwl_mvm_rx_common(mvm, rxb, pkt);
1626 }
1627 
1628 static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
1629 			     const struct iwl_device_cmd *cmd)
1630 {
1631 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1632 
1633 	/*
1634 	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
1635 	 * commands that need to block the Tx queues.
1636 	 */
1637 	iwl_trans_block_txq_ptrs(mvm->trans, false);
1638 }
1639 
1640 static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
1641 {
1642 	return queue == mvm->aux_queue || queue == mvm->probe_queue ||
1643 		queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
1644 }
1645 
1646 static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
1647 				       int hw_queue, bool start)
1648 {
1649 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1650 	struct ieee80211_sta *sta;
1651 	struct ieee80211_txq *txq;
1652 	struct iwl_mvm_txq *mvmtxq;
1653 	int i;
1654 	unsigned long tid_bitmap;
1655 	struct iwl_mvm_sta *mvmsta;
1656 	u8 sta_id;
1657 
1658 	sta_id = iwl_mvm_has_new_tx_api(mvm) ?
1659 		mvm->tvqm_info[hw_queue].sta_id :
1660 		mvm->queue_info[hw_queue].ra_sta_id;
1661 
1662 	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
1663 		return;
1664 
1665 	rcu_read_lock();
1666 
1667 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1668 	if (IS_ERR_OR_NULL(sta))
1669 		goto out;
1670 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1671 
1672 	if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
1673 		if (!start)
1674 			ieee80211_stop_queues(mvm->hw);
1675 		else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
1676 			ieee80211_wake_queues(mvm->hw);
1677 
1678 		goto out;
1679 	}
1680 
1681 	if (iwl_mvm_has_new_tx_api(mvm)) {
1682 		int tid = mvm->tvqm_info[hw_queue].txq_tid;
1683 
1684 		tid_bitmap = BIT(tid);
1685 	} else {
1686 		tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
1687 	}
1688 
1689 	for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1690 		int tid = i;
1691 
1692 		if (tid == IWL_MAX_TID_COUNT)
1693 			tid = IEEE80211_NUM_TIDS;
1694 
1695 		txq = sta->txq[tid];
1696 		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
1697 		mvmtxq->stopped = !start;
1698 
1699 		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
1700 			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1701 	}
1702 
1703 out:
1704 	rcu_read_unlock();
1705 }
1706 
1707 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1708 {
1709 	iwl_mvm_queue_state_change(op_mode, hw_queue, false);
1710 }
1711 
1712 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1713 {
1714 	iwl_mvm_queue_state_change(op_mode, hw_queue, true);
1715 }
1716 
1717 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1718 {
1719 	bool state = iwl_mvm_is_radio_killed(mvm);
1720 
1721 	if (state)
1722 		wake_up(&mvm->rx_sync_waitq);
1723 
1724 	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
1725 }
1726 
1727 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1728 {
1729 	if (state)
1730 		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1731 	else
1732 		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1733 
1734 	iwl_mvm_set_rfkill_state(mvm);
1735 }
1736 
1737 struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
1738 {
1739 	return rcu_dereference_protected(mvm->csme_conn_info,
1740 					 lockdep_is_held(&mvm->mutex));
1741 }
1742 
1743 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1744 {
1745 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1746 	bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
1747 	bool unified = iwl_mvm_has_unified_ucode(mvm);
1748 
1749 	if (state)
1750 		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1751 	else
1752 		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1753 
1754 	iwl_mvm_set_rfkill_state(mvm);
1755 
1756 	 /* iwl_run_init_mvm_ucode is waiting for results, abort it. */
1757 	if (rfkill_safe_init_done)
1758 		iwl_abort_notification_waits(&mvm->notif_wait);
1759 
1760 	/*
1761 	 * Don't ask the transport to stop the firmware. We'll do it
1762 	 * after cfg80211 takes us down.
1763 	 */
1764 	if (unified)
1765 		return false;
1766 
1767 	/*
1768 	 * Stop the device if we run OPERATIONAL firmware or if we are in the
1769 	 * middle of the calibrations.
1770 	 */
1771 	return state && rfkill_safe_init_done;
1772 }
1773 
1774 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1775 {
1776 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1777 	struct ieee80211_tx_info *info;
1778 
1779 	info = IEEE80211_SKB_CB(skb);
1780 	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1781 	ieee80211_free_txskb(mvm->hw, skb);
1782 }
1783 
1784 struct iwl_mvm_reprobe {
1785 	struct device *dev;
1786 	struct work_struct work;
1787 };
1788 
1789 static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1790 {
1791 	struct iwl_mvm_reprobe *reprobe;
1792 
1793 	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1794 	if (device_reprobe(reprobe->dev))
1795 		dev_err(reprobe->dev, "reprobe failed!\n");
1796 	put_device(reprobe->dev);
1797 	kfree(reprobe);
1798 	module_put(THIS_MODULE);
1799 }
1800 
1801 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1802 {
1803 	iwl_abort_notification_waits(&mvm->notif_wait);
1804 	iwl_dbg_tlv_del_timers(mvm->trans);
1805 
1806 	/*
1807 	 * This is a bit racy, but worst case we tell mac80211 about
1808 	 * a stopped/aborted scan when that was already done which
1809 	 * is not a problem. It is necessary to abort any os scan
1810 	 * here because mac80211 requires having the scan cleared
1811 	 * before restarting.
1812 	 * We'll reset the scan_status to NONE in restart cleanup in
1813 	 * the next start() call from mac80211. If restart isn't called
1814 	 * (no fw restart) scan status will stay busy.
1815 	 */
1816 	iwl_mvm_report_scan_aborted(mvm);
1817 
1818 	/*
1819 	 * If we're restarting already, don't cycle restarts.
1820 	 * If INIT fw asserted, it will likely fail again.
1821 	 * If WoWLAN fw asserted, don't restart either, mac80211
1822 	 * can't recover this since we're already half suspended.
1823 	 */
1824 	if (!mvm->fw_restart && fw_error) {
1825 		iwl_fw_error_collect(&mvm->fwrt, false);
1826 	} else if (test_bit(IWL_MVM_STATUS_STARTING,
1827 			    &mvm->status)) {
1828 		IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
1829 	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1830 		struct iwl_mvm_reprobe *reprobe;
1831 
1832 		IWL_ERR(mvm,
1833 			"Firmware error during reconfiguration - reprobe!\n");
1834 
1835 		/*
1836 		 * get a module reference to avoid doing this while unloading
1837 		 * anyway and to avoid scheduling a work with code that's
1838 		 * being removed.
1839 		 */
1840 		if (!try_module_get(THIS_MODULE)) {
1841 			IWL_ERR(mvm, "Module is being unloaded - abort\n");
1842 			return;
1843 		}
1844 
1845 		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1846 		if (!reprobe) {
1847 			module_put(THIS_MODULE);
1848 			return;
1849 		}
1850 		reprobe->dev = get_device(mvm->trans->dev);
1851 		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1852 		schedule_work(&reprobe->work);
1853 	} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1854 			    &mvm->status)) {
1855 		IWL_ERR(mvm, "HW restart already requested, but not started\n");
1856 	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1857 		   mvm->hw_registered &&
1858 		   !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1859 		/* This should be first thing before trying to collect any
1860 		 * data to avoid endless loops if any HW error happens while
1861 		 * collecting debug data.
1862 		 */
1863 		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1864 
1865 		if (mvm->fw->ucode_capa.error_log_size) {
1866 			u32 src_size = mvm->fw->ucode_capa.error_log_size;
1867 			u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
1868 			u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);
1869 
1870 			if (recover_buf) {
1871 				mvm->error_recovery_buf = recover_buf;
1872 				iwl_trans_read_mem_bytes(mvm->trans,
1873 							 src_addr,
1874 							 recover_buf,
1875 							 src_size);
1876 			}
1877 		}
1878 
1879 		iwl_fw_error_collect(&mvm->fwrt, false);
1880 
1881 		if (fw_error && mvm->fw_restart > 0) {
1882 			mvm->fw_restart--;
1883 			ieee80211_restart_hw(mvm->hw);
1884 		} else if (mvm->fwrt.trans->dbg.restart_required) {
1885 			IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
1886 			mvm->fwrt.trans->dbg.restart_required = FALSE;
1887 			ieee80211_restart_hw(mvm->hw);
1888 		} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
1889 			ieee80211_restart_hw(mvm->hw);
1890 		}
1891 	}
1892 }
1893 
1894 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
1895 {
1896 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1897 
1898 	if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
1899 	    !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
1900 				&mvm->status))
1901 		iwl_mvm_dump_nic_error_log(mvm);
1902 
1903 	if (sync) {
1904 		iwl_fw_error_collect(&mvm->fwrt, true);
1905 		/*
1906 		 * Currently, the only case for sync=true is during
1907 		 * shutdown, so just stop in this case. If/when that
1908 		 * changes, we need to be a bit smarter here.
1909 		 */
1910 		return;
1911 	}
1912 
1913 	/*
1914 	 * If the firmware crashes while we're already considering it
1915 	 * to be dead then don't ask for a restart, that cannot do
1916 	 * anything useful anyway.
1917 	 */
1918 	if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
1919 		return;
1920 
1921 	iwl_mvm_nic_restart(mvm, false);
1922 }
1923 
1924 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1925 {
1926 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1927 
1928 	WARN_ON(1);
1929 	iwl_mvm_nic_restart(mvm, true);
1930 }
1931 
1932 static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
1933 				       enum iwl_fw_ini_time_point tp_id,
1934 				       union iwl_dbg_tlv_tp_data *tp_data)
1935 {
1936 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1937 
1938 	iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
1939 }
1940 
1941 #define IWL_MVM_COMMON_OPS					\
1942 	/* these could be differentiated */			\
1943 	.async_cb = iwl_mvm_async_cb,				\
1944 	.queue_full = iwl_mvm_stop_sw_queue,			\
1945 	.queue_not_full = iwl_mvm_wake_sw_queue,		\
1946 	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
1947 	.free_skb = iwl_mvm_free_skb,				\
1948 	.nic_error = iwl_mvm_nic_error,				\
1949 	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
1950 	.nic_config = iwl_mvm_nic_config,			\
1951 	/* as we only register one, these MUST be common! */	\
1952 	.start = iwl_op_mode_mvm_start,				\
1953 	.stop = iwl_op_mode_mvm_stop,				\
1954 	.time_point = iwl_op_mode_mvm_time_point
1955 
1956 static const struct iwl_op_mode_ops iwl_mvm_ops = {
1957 	IWL_MVM_COMMON_OPS,
1958 	.rx = iwl_mvm_rx,
1959 };
1960 
1961 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1962 			      struct napi_struct *napi,
1963 			      struct iwl_rx_cmd_buffer *rxb,
1964 			      unsigned int queue)
1965 {
1966 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1967 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1968 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1969 
1970 	if (unlikely(queue >= mvm->trans->num_rx_queues))
1971 		return;
1972 
1973 	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1974 		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1975 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1976 					 RX_QUEUES_NOTIFICATION)))
1977 		iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
1978 	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1979 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1980 }
1981 
1982 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1983 	IWL_MVM_COMMON_OPS,
1984 	.rx = iwl_mvm_rx_mq,
1985 	.rx_rss = iwl_mvm_rx_mq_rss,
1986 };
1987