xref: /linux/drivers/net/wireless/intel/iwlwifi/mvm/fw.c (revision d3d344a1ca69d8fb2413e29e6400f3ad58a05c06)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <net/mac80211.h>
8 #include <linux/netdevice.h>
9 #include <linux/dmi.h>
10 
11 #include "iwl-trans.h"
12 #include "iwl-op-mode.h"
13 #include "fw/img.h"
14 #include "iwl-debug.h"
15 #include "iwl-prph.h"
16 #include "fw/acpi.h"
17 #include "fw/pnvm.h"
18 #include "fw/uefi.h"
19 
20 #include "mvm.h"
21 #include "fw/dbg.h"
22 #include "iwl-phy-db.h"
23 #include "iwl-modparams.h"
24 #include "iwl-nvm-parse.h"
25 #include "time-sync.h"
26 
27 #define MVM_UCODE_ALIVE_TIMEOUT	(2 * HZ)
28 #define MVM_UCODE_CALIB_TIMEOUT	(2 * HZ)
29 
30 #define IWL_UATS_VLP_AP_SUPPORTED BIT(29)
31 #define IWL_UATS_AFC_AP_SUPPORTED BIT(30)
32 
33 struct iwl_mvm_alive_data {
34 	bool valid;
35 	u32 scd_base_addr;
36 };
37 
38 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
39 {
40 	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
41 		.valid = cpu_to_le32(valid_tx_ant),
42 	};
43 
44 	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
45 	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
46 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
47 }
48 
49 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
50 {
51 	int i;
52 	struct iwl_rss_config_cmd cmd = {
53 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
54 		.hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
55 			     BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
56 			     BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
57 			     BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
58 			     BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
59 			     BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
60 	};
61 
62 	if (mvm->trans->num_rx_queues == 1)
63 		return 0;
64 
65 	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
66 	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
67 		cmd.indirection_table[i] =
68 			1 + (i % (mvm->trans->num_rx_queues - 1));
69 	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
70 
71 	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
72 }
73 
74 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
75 {
76 	struct iwl_dqa_enable_cmd dqa_cmd = {
77 		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
78 	};
79 	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
80 	int ret;
81 
82 	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
83 	if (ret)
84 		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
85 	else
86 		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
87 
88 	return ret;
89 }
90 
91 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
92 				   struct iwl_rx_cmd_buffer *rxb)
93 {
94 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
95 	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
96 	__le32 *dump_data = mfu_dump_notif->data;
97 	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
98 	int i;
99 
100 	if (mfu_dump_notif->index_num == 0)
101 		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
102 			 le32_to_cpu(mfu_dump_notif->assert_id));
103 
104 	for (i = 0; i < n_words; i++)
105 		IWL_DEBUG_INFO(mvm,
106 			       "MFUART assert dump, dword %u: 0x%08x\n",
107 			       le16_to_cpu(mfu_dump_notif->index_num) *
108 			       n_words + i,
109 			       le32_to_cpu(dump_data[i]));
110 }
111 
112 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
113 			 struct iwl_rx_packet *pkt, void *data)
114 {
115 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
116 	struct iwl_mvm *mvm =
117 		container_of(notif_wait, struct iwl_mvm, notif_wait);
118 	struct iwl_mvm_alive_data *alive_data = data;
119 	struct iwl_umac_alive *umac;
120 	struct iwl_lmac_alive *lmac1;
121 	struct iwl_lmac_alive *lmac2 = NULL;
122 	u16 status;
123 	u32 lmac_error_event_table, umac_error_table;
124 	u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
125 					      UCODE_ALIVE_NTFY, 0);
126 	u32 i;
127 
128 
129 	if (version == 6) {
130 		struct iwl_alive_ntf_v6 *palive;
131 
132 		if (pkt_len < sizeof(*palive))
133 			return false;
134 
135 		palive = (void *)pkt->data;
136 		mvm->trans->dbg.imr_data.imr_enable =
137 			le32_to_cpu(palive->imr.enabled);
138 		mvm->trans->dbg.imr_data.imr_size =
139 			le32_to_cpu(palive->imr.size);
140 		mvm->trans->dbg.imr_data.imr2sram_remainbyte =
141 			mvm->trans->dbg.imr_data.imr_size;
142 		mvm->trans->dbg.imr_data.imr_base_addr =
143 			palive->imr.base_addr;
144 		mvm->trans->dbg.imr_data.imr_curr_addr =
145 			le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
146 		IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x  size 0x0%x Address 0x%016llx\n",
147 			     mvm->trans->dbg.imr_data.imr_enable,
148 			     mvm->trans->dbg.imr_data.imr_size,
149 			     le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
150 
151 		if (!mvm->trans->dbg.imr_data.imr_enable) {
152 			for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
153 				struct iwl_ucode_tlv *reg_tlv;
154 				struct iwl_fw_ini_region_tlv *reg;
155 
156 				reg_tlv = mvm->trans->dbg.active_regions[i];
157 				if (!reg_tlv)
158 					continue;
159 
160 				reg = (void *)reg_tlv->data;
161 				/*
162 				 * We have only one DRAM IMR region, so we
163 				 * can break as soon as we find the first
164 				 * one.
165 				 */
166 				if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
167 					mvm->trans->dbg.unsupported_region_msk |= BIT(i);
168 					break;
169 				}
170 			}
171 		}
172 	}
173 
174 	if (version >= 5) {
175 		struct iwl_alive_ntf_v5 *palive;
176 
177 		if (pkt_len < sizeof(*palive))
178 			return false;
179 
180 		palive = (void *)pkt->data;
181 		umac = &palive->umac_data;
182 		lmac1 = &palive->lmac_data[0];
183 		lmac2 = &palive->lmac_data[1];
184 		status = le16_to_cpu(palive->status);
185 
186 		mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
187 		mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
188 		mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
189 
190 		IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
191 			     mvm->trans->sku_id[0],
192 			     mvm->trans->sku_id[1],
193 			     mvm->trans->sku_id[2]);
194 	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
195 		struct iwl_alive_ntf_v4 *palive;
196 
197 		if (pkt_len < sizeof(*palive))
198 			return false;
199 
200 		palive = (void *)pkt->data;
201 		umac = &palive->umac_data;
202 		lmac1 = &palive->lmac_data[0];
203 		lmac2 = &palive->lmac_data[1];
204 		status = le16_to_cpu(palive->status);
205 	} else if (iwl_rx_packet_payload_len(pkt) ==
206 		   sizeof(struct iwl_alive_ntf_v3)) {
207 		struct iwl_alive_ntf_v3 *palive3;
208 
209 		if (pkt_len < sizeof(*palive3))
210 			return false;
211 
212 		palive3 = (void *)pkt->data;
213 		umac = &palive3->umac_data;
214 		lmac1 = &palive3->lmac_data;
215 		status = le16_to_cpu(palive3->status);
216 	} else {
217 		WARN(1, "unsupported alive notification (size %d)\n",
218 		     iwl_rx_packet_payload_len(pkt));
219 		/* get timeout later */
220 		return false;
221 	}
222 
223 	lmac_error_event_table =
224 		le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
225 	iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
226 
227 	if (lmac2)
228 		mvm->trans->dbg.lmac_error_event_table[1] =
229 			le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
230 
231 	umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
232 							~FW_ADDR_CACHE_CONTROL;
233 
234 	if (umac_error_table) {
235 		if (umac_error_table >=
236 		    mvm->trans->cfg->min_umac_error_event_table) {
237 			iwl_fw_umac_set_alive_err_table(mvm->trans,
238 							umac_error_table);
239 		} else {
240 			IWL_ERR(mvm,
241 				"Not valid error log pointer 0x%08X for %s uCode\n",
242 				umac_error_table,
243 				(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
244 				"Init" : "RT");
245 		}
246 	}
247 
248 	alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
249 	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
250 
251 	IWL_DEBUG_FW(mvm,
252 		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
253 		     status, lmac1->ver_type, lmac1->ver_subtype);
254 
255 	if (lmac2)
256 		IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
257 
258 	IWL_DEBUG_FW(mvm,
259 		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
260 		     le32_to_cpu(umac->umac_major),
261 		     le32_to_cpu(umac->umac_minor));
262 
263 	iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
264 
265 	return true;
266 }
267 
268 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
269 				   struct iwl_rx_packet *pkt, void *data)
270 {
271 	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
272 
273 	return true;
274 }
275 
276 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
277 				  struct iwl_rx_packet *pkt, void *data)
278 {
279 	struct iwl_phy_db *phy_db = data;
280 
281 	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
282 		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
283 		return true;
284 	}
285 
286 	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
287 
288 	return false;
289 }
290 
291 static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
292 {
293 #define IWL_FW_PRINT_REG_INFO(reg_name) \
294 	IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
295 
296 	struct iwl_trans *trans = mvm->trans;
297 	enum iwl_device_family device_family = trans->trans_cfg->device_family;
298 
299 	if (device_family < IWL_DEVICE_FAMILY_8000)
300 		return;
301 
302 	if (device_family <= IWL_DEVICE_FAMILY_9000)
303 		IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
304 	else
305 		IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
306 
307 	IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
308 
309 	/* print OPT info */
310 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
311 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
312 }
313 
314 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
315 					 enum iwl_ucode_type ucode_type)
316 {
317 	struct iwl_notification_wait alive_wait;
318 	struct iwl_mvm_alive_data alive_data = {};
319 	const struct fw_img *fw;
320 	int ret;
321 	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
322 	static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
323 	bool run_in_rfkill =
324 		ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
325 	u8 count;
326 	struct iwl_pc_data *pc_data;
327 
328 	if (ucode_type == IWL_UCODE_REGULAR &&
329 	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
330 	    !(fw_has_capa(&mvm->fw->ucode_capa,
331 			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
332 		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
333 	else
334 		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
335 	if (WARN_ON(!fw))
336 		return -EINVAL;
337 	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
338 	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
339 
340 	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
341 				   alive_cmd, ARRAY_SIZE(alive_cmd),
342 				   iwl_alive_fn, &alive_data);
343 
344 	/*
345 	 * We want to load the INIT firmware even in RFKILL
346 	 * For the unified firmware case, the ucode_type is not
347 	 * INIT, but we still need to run it.
348 	 */
349 	ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
350 	if (ret) {
351 		iwl_fw_set_current_image(&mvm->fwrt, old_type);
352 		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
353 		return ret;
354 	}
355 
356 	/*
357 	 * Some things may run in the background now, but we
358 	 * just wait for the ALIVE notification here.
359 	 */
360 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
361 				    MVM_UCODE_ALIVE_TIMEOUT);
362 
363 	if (mvm->trans->trans_cfg->device_family ==
364 	    IWL_DEVICE_FAMILY_AX210) {
365 		/* print these registers regardless of alive fail/success */
366 		IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
367 			 iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
368 		IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
369 			 iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
370 		IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
371 			 iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
372 		IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
373 			 iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
374 	}
375 
376 	if (ret) {
377 		struct iwl_trans *trans = mvm->trans;
378 
379 		/* SecBoot info */
380 		if (trans->trans_cfg->device_family >=
381 					IWL_DEVICE_FAMILY_22000) {
382 			IWL_ERR(mvm,
383 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
384 				iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
385 				iwl_read_umac_prph(trans,
386 						   UMAG_SB_CPU_2_STATUS));
387 		} else if (trans->trans_cfg->device_family >=
388 			   IWL_DEVICE_FAMILY_8000) {
389 			IWL_ERR(mvm,
390 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
391 				iwl_read_prph(trans, SB_CPU_1_STATUS),
392 				iwl_read_prph(trans, SB_CPU_2_STATUS));
393 		}
394 
395 		iwl_mvm_print_pd_notification(mvm);
396 
397 		/* LMAC/UMAC PC info */
398 		if (trans->trans_cfg->device_family >=
399 					IWL_DEVICE_FAMILY_22000) {
400 			pc_data = trans->dbg.pc_data;
401 			for (count = 0; count < trans->dbg.num_pc;
402 			     count++, pc_data++)
403 				IWL_ERR(mvm, "%s: 0x%x\n",
404 					pc_data->pc_name,
405 					pc_data->pc_address);
406 		} else if (trans->trans_cfg->device_family >=
407 					IWL_DEVICE_FAMILY_9000) {
408 			IWL_ERR(mvm, "UMAC PC: 0x%x\n",
409 				iwl_read_umac_prph(trans,
410 						   UREG_UMAC_CURRENT_PC));
411 			IWL_ERR(mvm, "LMAC PC: 0x%x\n",
412 				iwl_read_umac_prph(trans,
413 						   UREG_LMAC1_CURRENT_PC));
414 			if (iwl_mvm_is_cdb_supported(mvm))
415 				IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
416 					iwl_read_umac_prph(trans,
417 						UREG_LMAC2_CURRENT_PC));
418 		}
419 
420 		if (ret == -ETIMEDOUT && !mvm->pldr_sync)
421 			iwl_fw_dbg_error_collect(&mvm->fwrt,
422 						 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
423 
424 		iwl_fw_set_current_image(&mvm->fwrt, old_type);
425 		return ret;
426 	}
427 
428 	if (!alive_data.valid) {
429 		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
430 		iwl_fw_set_current_image(&mvm->fwrt, old_type);
431 		return -EIO;
432 	}
433 
434 	/* if reached this point, Alive notification was received */
435 	iwl_mei_alive_notif(true);
436 
437 	ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
438 			    &mvm->fw->ucode_capa);
439 	if (ret) {
440 		IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
441 		iwl_fw_set_current_image(&mvm->fwrt, old_type);
442 		return ret;
443 	}
444 
445 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
446 
447 	/*
448 	 * Note: all the queues are enabled as part of the interface
449 	 * initialization, but in firmware restart scenarios they
450 	 * could be stopped, so wake them up. In firmware restart,
451 	 * mac80211 will have the queues stopped as well until the
452 	 * reconfiguration completes. During normal startup, they
453 	 * will be empty.
454 	 */
455 
456 	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
457 	/*
458 	 * Set a 'fake' TID for the command queue, since we use the
459 	 * hweight() of the tid_bitmap as a refcount now. Not that
460 	 * we ever even consider the command queue as one we might
461 	 * want to reuse, but be safe nevertheless.
462 	 */
463 	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
464 		BIT(IWL_MAX_TID_COUNT + 2);
465 
466 	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
467 #ifdef CONFIG_IWLWIFI_DEBUGFS
468 	iwl_fw_set_dbg_rec_on(&mvm->fwrt);
469 #endif
470 
471 	/*
472 	 * All the BSSes in the BSS table include the GP2 in the system
473 	 * at the beacon Rx time, this is of course no longer relevant
474 	 * since we are resetting the firmware.
475 	 * Purge all the BSS table.
476 	 */
477 	cfg80211_bss_flush(mvm->hw->wiphy);
478 
479 	return 0;
480 }
481 
482 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
483 				    struct iwl_phy_specific_cfg *phy_filters)
484 {
485 #ifdef CONFIG_ACPI
486 	*phy_filters = mvm->phy_filters;
487 #endif /* CONFIG_ACPI */
488 }
489 
490 #if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
491 static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
492 {
493 	u8 cmd_ver;
494 	int ret;
495 	struct iwl_host_cmd cmd = {
496 		.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
497 			      UATS_TABLE_CMD),
498 		.flags = 0,
499 		.data[0] = &mvm->fwrt.uats_table,
500 		.len[0] =  sizeof(mvm->fwrt.uats_table),
501 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
502 	};
503 
504 	if (!(mvm->trans->trans_cfg->device_family >=
505 	      IWL_DEVICE_FAMILY_AX210)) {
506 		IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
507 		return;
508 	}
509 
510 	if (!mvm->fwrt.uats_enabled) {
511 		IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n");
512 		return;
513 	}
514 
515 	cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
516 					IWL_FW_CMD_VER_UNKNOWN);
517 	if (cmd_ver != 1) {
518 		IWL_DEBUG_RADIO(mvm,
519 				"UATS_TABLE_CMD ver %d not supported\n",
520 				cmd_ver);
521 		return;
522 	}
523 
524 	ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
525 	if (ret < 0) {
526 		IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret);
527 		return;
528 	}
529 
530 	ret = iwl_mvm_send_cmd(mvm, &cmd);
531 	if (ret < 0)
532 		IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret);
533 	else
534 		IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n");
535 }
536 
537 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
538 {
539 	u8 cmd_ver;
540 	int ret;
541 	struct iwl_host_cmd cmd = {
542 		.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
543 			      SAR_OFFSET_MAPPING_TABLE_CMD),
544 		.flags = 0,
545 		.data[0] = &mvm->fwrt.sgom_table,
546 		.len[0] =  sizeof(mvm->fwrt.sgom_table),
547 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
548 	};
549 
550 	if (!mvm->fwrt.sgom_enabled) {
551 		IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
552 		return 0;
553 	}
554 
555 	cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
556 					IWL_FW_CMD_VER_UNKNOWN);
557 
558 	if (cmd_ver != 2) {
559 		IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
560 				cmd_ver);
561 		return 0;
562 	}
563 
564 	ret = iwl_mvm_send_cmd(mvm, &cmd);
565 	if (ret < 0)
566 		IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
567 
568 	return ret;
569 }
570 #else
571 
572 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
573 {
574 	return 0;
575 }
576 
577 static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
578 {
579 }
580 #endif
581 
582 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
583 {
584 	u32 cmd_id = PHY_CONFIGURATION_CMD;
585 	struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
586 	enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
587 	u8 cmd_ver;
588 	size_t cmd_size;
589 
590 	if (iwl_mvm_has_unified_ucode(mvm) &&
591 	    !mvm->trans->cfg->tx_with_siso_diversity)
592 		return 0;
593 
594 	if (mvm->trans->cfg->tx_with_siso_diversity) {
595 		/*
596 		 * TODO: currently we don't set the antenna but letting the NIC
597 		 * to decide which antenna to use. This should come from BIOS.
598 		 */
599 		phy_cfg_cmd.phy_cfg =
600 			cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
601 	}
602 
603 	/* Set parameters */
604 	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
605 
606 	/* set flags extra PHY configuration flags from the device's cfg */
607 	phy_cfg_cmd.phy_cfg |=
608 		cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
609 
610 	phy_cfg_cmd.calib_control.event_trigger =
611 		mvm->fw->default_calib[ucode_type].event_trigger;
612 	phy_cfg_cmd.calib_control.flow_trigger =
613 		mvm->fw->default_calib[ucode_type].flow_trigger;
614 
615 	cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
616 					IWL_FW_CMD_VER_UNKNOWN);
617 	if (cmd_ver >= 3)
618 		iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg);
619 
620 	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
621 		       phy_cfg_cmd.phy_cfg);
622 	cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
623 				    sizeof(struct iwl_phy_cfg_cmd_v1);
624 	return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
625 }
626 
627 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
628 {
629 	struct iwl_notification_wait init_wait;
630 	struct iwl_nvm_access_complete_cmd nvm_complete = {};
631 	struct iwl_init_extended_cfg_cmd init_cfg = {
632 		.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
633 	};
634 	static const u16 init_complete[] = {
635 		INIT_COMPLETE_NOTIF,
636 	};
637 	u32 sb_cfg;
638 	int ret;
639 
640 	if (mvm->trans->cfg->tx_with_siso_diversity)
641 		init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
642 
643 	lockdep_assert_held(&mvm->mutex);
644 
645 	mvm->rfkill_safe_init_done = false;
646 
647 	if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
648 		sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
649 		/* if needed, we'll reset this on our way out later */
650 		mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM;
651 		if (mvm->pldr_sync && iwl_mei_pldr_req())
652 			return -EBUSY;
653 	}
654 
655 	iwl_init_notification_wait(&mvm->notif_wait,
656 				   &init_wait,
657 				   init_complete,
658 				   ARRAY_SIZE(init_complete),
659 				   iwl_wait_init_complete,
660 				   NULL);
661 
662 	iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
663 
664 	/* Will also start the device */
665 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
666 	if (ret) {
667 		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
668 
669 		/* if we needed reset then fail here, but notify and remove */
670 		if (mvm->pldr_sync) {
671 			iwl_mei_alive_notif(false);
672 			iwl_trans_pcie_remove(mvm->trans, true);
673 		}
674 
675 		goto error;
676 	}
677 	iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
678 			       NULL);
679 
680 	/* Send init config command to mark that we are sending NVM access
681 	 * commands
682 	 */
683 	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
684 						INIT_EXTENDED_CFG_CMD),
685 				   CMD_SEND_IN_RFKILL,
686 				   sizeof(init_cfg), &init_cfg);
687 	if (ret) {
688 		IWL_ERR(mvm, "Failed to run init config command: %d\n",
689 			ret);
690 		goto error;
691 	}
692 
693 	/* Load NVM to NIC if needed */
694 	if (mvm->nvm_file_name) {
695 		ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
696 					    mvm->nvm_sections);
697 		if (ret)
698 			goto error;
699 		ret = iwl_mvm_load_nvm_to_nic(mvm);
700 		if (ret)
701 			goto error;
702 	}
703 
704 	if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
705 		ret = iwl_nvm_init(mvm);
706 		if (ret) {
707 			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
708 			goto error;
709 		}
710 	}
711 
712 	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
713 						NVM_ACCESS_COMPLETE),
714 				   CMD_SEND_IN_RFKILL,
715 				   sizeof(nvm_complete), &nvm_complete);
716 	if (ret) {
717 		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
718 			ret);
719 		goto error;
720 	}
721 
722 	ret = iwl_send_phy_cfg_cmd(mvm);
723 	if (ret) {
724 		IWL_ERR(mvm, "Failed to run PHY configuration: %d\n",
725 			ret);
726 		goto error;
727 	}
728 
729 	/* We wait for the INIT complete notification */
730 	ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
731 				    MVM_UCODE_ALIVE_TIMEOUT);
732 	if (ret)
733 		return ret;
734 
735 	/* Read the NVM only at driver load time, no need to do this twice */
736 	if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
737 		mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
738 					    mvm->set_tx_ant, mvm->set_rx_ant);
739 		if (IS_ERR(mvm->nvm_data)) {
740 			ret = PTR_ERR(mvm->nvm_data);
741 			mvm->nvm_data = NULL;
742 			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
743 			return ret;
744 		}
745 	}
746 
747 	mvm->rfkill_safe_init_done = true;
748 
749 	return 0;
750 
751 error:
752 	iwl_remove_notification(&mvm->notif_wait, &init_wait);
753 	return ret;
754 }
755 
756 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
757 {
758 	struct iwl_notification_wait calib_wait;
759 	static const u16 init_complete[] = {
760 		INIT_COMPLETE_NOTIF,
761 		CALIB_RES_NOTIF_PHY_DB
762 	};
763 	int ret;
764 
765 	if (iwl_mvm_has_unified_ucode(mvm))
766 		return iwl_run_unified_mvm_ucode(mvm);
767 
768 	lockdep_assert_held(&mvm->mutex);
769 
770 	mvm->rfkill_safe_init_done = false;
771 
772 	iwl_init_notification_wait(&mvm->notif_wait,
773 				   &calib_wait,
774 				   init_complete,
775 				   ARRAY_SIZE(init_complete),
776 				   iwl_wait_phy_db_entry,
777 				   mvm->phy_db);
778 
779 	iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
780 
781 	/* Will also start the device */
782 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
783 	if (ret) {
784 		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
785 		goto remove_notif;
786 	}
787 
788 	if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
789 		ret = iwl_mvm_send_bt_init_conf(mvm);
790 		if (ret)
791 			goto remove_notif;
792 	}
793 
794 	/* Read the NVM only at driver load time, no need to do this twice */
795 	if (!mvm->nvm_data) {
796 		ret = iwl_nvm_init(mvm);
797 		if (ret) {
798 			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
799 			goto remove_notif;
800 		}
801 	}
802 
803 	/* In case we read the NVM from external file, load it to the NIC */
804 	if (mvm->nvm_file_name) {
805 		ret = iwl_mvm_load_nvm_to_nic(mvm);
806 		if (ret)
807 			goto remove_notif;
808 	}
809 
810 	WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
811 		  "Too old NVM version (0x%0x, required = 0x%0x)",
812 		  mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
813 
814 	/*
815 	 * abort after reading the nvm in case RF Kill is on, we will complete
816 	 * the init seq later when RF kill will switch to off
817 	 */
818 	if (iwl_mvm_is_radio_hw_killed(mvm)) {
819 		IWL_DEBUG_RF_KILL(mvm,
820 				  "jump over all phy activities due to RF kill\n");
821 		goto remove_notif;
822 	}
823 
824 	mvm->rfkill_safe_init_done = true;
825 
826 	/* Send TX valid antennas before triggering calibrations */
827 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
828 	if (ret)
829 		goto remove_notif;
830 
831 	ret = iwl_send_phy_cfg_cmd(mvm);
832 	if (ret) {
833 		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
834 			ret);
835 		goto remove_notif;
836 	}
837 
838 	/*
839 	 * Some things may run in the background now, but we
840 	 * just wait for the calibration complete notification.
841 	 */
842 	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
843 				    MVM_UCODE_CALIB_TIMEOUT);
844 	if (!ret)
845 		goto out;
846 
847 	if (iwl_mvm_is_radio_hw_killed(mvm)) {
848 		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
849 		ret = 0;
850 	} else {
851 		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
852 			ret);
853 	}
854 
855 	goto out;
856 
857 remove_notif:
858 	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
859 out:
860 	mvm->rfkill_safe_init_done = false;
861 	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
862 		/* we want to debug INIT and we have no NVM - fake */
863 		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
864 					sizeof(struct ieee80211_channel) +
865 					sizeof(struct ieee80211_rate),
866 					GFP_KERNEL);
867 		if (!mvm->nvm_data)
868 			return -ENOMEM;
869 		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
870 		mvm->nvm_data->bands[0].n_channels = 1;
871 		mvm->nvm_data->bands[0].n_bitrates = 1;
872 		mvm->nvm_data->bands[0].bitrates =
873 			(void *)(mvm->nvm_data->channels + 1);
874 		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
875 	}
876 
877 	return ret;
878 }
879 
880 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
881 {
882 	struct iwl_ltr_config_cmd cmd = {
883 		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
884 	};
885 
886 	if (!mvm->trans->ltr_enabled)
887 		return 0;
888 
889 	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
890 				    sizeof(cmd), &cmd);
891 }
892 
893 #ifdef CONFIG_ACPI
894 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
895 {
896 	u32 cmd_id = REDUCE_TX_POWER_CMD;
897 	struct iwl_dev_tx_power_cmd cmd = {
898 		.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
899 	};
900 	__le16 *per_chain;
901 	int ret;
902 	u16 len = 0;
903 	u32 n_subbands;
904 	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
905 					   IWL_FW_CMD_VER_UNKNOWN);
906 	if (cmd_ver == 7) {
907 		len = sizeof(cmd.v7);
908 		n_subbands = IWL_NUM_SUB_BANDS_V2;
909 		per_chain = cmd.v7.per_chain[0][0];
910 		cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
911 	} else if (cmd_ver == 6) {
912 		len = sizeof(cmd.v6);
913 		n_subbands = IWL_NUM_SUB_BANDS_V2;
914 		per_chain = cmd.v6.per_chain[0][0];
915 	} else if (fw_has_api(&mvm->fw->ucode_capa,
916 			      IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
917 		len = sizeof(cmd.v5);
918 		n_subbands = IWL_NUM_SUB_BANDS_V1;
919 		per_chain = cmd.v5.per_chain[0][0];
920 	} else if (fw_has_capa(&mvm->fw->ucode_capa,
921 			       IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
922 		len = sizeof(cmd.v4);
923 		n_subbands = IWL_NUM_SUB_BANDS_V1;
924 		per_chain = cmd.v4.per_chain[0][0];
925 	} else {
926 		len = sizeof(cmd.v3);
927 		n_subbands = IWL_NUM_SUB_BANDS_V1;
928 		per_chain = cmd.v3.per_chain[0][0];
929 	}
930 
931 	/* all structs have the same common part, add it */
932 	len += sizeof(cmd.common);
933 
934 	ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
935 				     IWL_NUM_CHAIN_TABLES,
936 				     n_subbands, prof_a, prof_b);
937 
938 	/* return on error or if the profile is disabled (positive number) */
939 	if (ret)
940 		return ret;
941 
942 	iwl_mei_set_power_limit(per_chain);
943 
944 	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
945 	return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
946 }
947 
948 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
949 {
950 	union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
951 	struct iwl_geo_tx_power_profiles_resp *resp;
952 	u16 len;
953 	int ret;
954 	struct iwl_host_cmd cmd = {
955 		.id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
956 		.flags = CMD_WANT_SKB,
957 		.data = { &geo_tx_cmd },
958 	};
959 	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
960 					   IWL_FW_CMD_VER_UNKNOWN);
961 
962 	/* the ops field is at the same spot for all versions, so set in v1 */
963 	geo_tx_cmd.v1.ops =
964 		cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
965 
966 	if (cmd_ver == 5)
967 		len = sizeof(geo_tx_cmd.v5);
968 	else if (cmd_ver == 4)
969 		len = sizeof(geo_tx_cmd.v4);
970 	else if (cmd_ver == 3)
971 		len = sizeof(geo_tx_cmd.v3);
972 	else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
973 			    IWL_UCODE_TLV_API_SAR_TABLE_VER))
974 		len = sizeof(geo_tx_cmd.v2);
975 	else
976 		len = sizeof(geo_tx_cmd.v1);
977 
978 	if (!iwl_sar_geo_support(&mvm->fwrt))
979 		return -EOPNOTSUPP;
980 
981 	cmd.len[0] = len;
982 
983 	ret = iwl_mvm_send_cmd(mvm, &cmd);
984 	if (ret) {
985 		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
986 		return ret;
987 	}
988 
989 	resp = (void *)cmd.resp_pkt->data;
990 	ret = le32_to_cpu(resp->profile_idx);
991 
992 	if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
993 		ret = -EIO;
994 
995 	iwl_free_resp(&cmd);
996 	return ret;
997 }
998 
999 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1000 {
1001 	u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
1002 	union iwl_geo_tx_power_profiles_cmd cmd;
1003 	u16 len;
1004 	u32 n_bands;
1005 	u32 n_profiles;
1006 	u32 sk = 0;
1007 	int ret;
1008 	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1009 					   IWL_FW_CMD_VER_UNKNOWN);
1010 
1011 	BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
1012 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
1013 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
1014 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
1015 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
1016 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
1017 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
1018 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
1019 
1020 	/* the ops field is at the same spot for all versions, so set in v1 */
1021 	cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
1022 
1023 	if (cmd_ver == 5) {
1024 		len = sizeof(cmd.v5);
1025 		n_bands = ARRAY_SIZE(cmd.v5.table[0]);
1026 		n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
1027 	} else if (cmd_ver == 4) {
1028 		len = sizeof(cmd.v4);
1029 		n_bands = ARRAY_SIZE(cmd.v4.table[0]);
1030 		n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
1031 	} else if (cmd_ver == 3) {
1032 		len = sizeof(cmd.v3);
1033 		n_bands = ARRAY_SIZE(cmd.v3.table[0]);
1034 		n_profiles = ACPI_NUM_GEO_PROFILES;
1035 	} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
1036 			      IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
1037 		len = sizeof(cmd.v2);
1038 		n_bands = ARRAY_SIZE(cmd.v2.table[0]);
1039 		n_profiles = ACPI_NUM_GEO_PROFILES;
1040 	} else {
1041 		len = sizeof(cmd.v1);
1042 		n_bands = ARRAY_SIZE(cmd.v1.table[0]);
1043 		n_profiles = ACPI_NUM_GEO_PROFILES;
1044 	}
1045 
1046 	BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
1047 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
1048 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
1049 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
1050 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
1051 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
1052 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
1053 		     offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
1054 	/* the table is at the same position for all versions, so set use v1 */
1055 	ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
1056 			       n_bands, n_profiles);
1057 
1058 	/*
1059 	 * It is a valid scenario to not support SAR, or miss wgds table,
1060 	 * but in that case there is no need to send the command.
1061 	 */
1062 	if (ret)
1063 		return 0;
1064 
1065 	/* Only set to South Korea if the table revision is 1 */
1066 	if (mvm->fwrt.geo_rev == 1)
1067 		sk = 1;
1068 
1069 	/*
1070 	 * Set the table_revision to South Korea (1) or not (0).  The
1071 	 * element name is misleading, as it doesn't contain the table
1072 	 * revision number, but whether the South Korea variation
1073 	 * should be used.
1074 	 * This must be done after calling iwl_sar_geo_init().
1075 	 */
1076 	if (cmd_ver == 5)
1077 		cmd.v5.table_revision = cpu_to_le32(sk);
1078 	else if (cmd_ver == 4)
1079 		cmd.v4.table_revision = cpu_to_le32(sk);
1080 	else if (cmd_ver == 3)
1081 		cmd.v3.table_revision = cpu_to_le32(sk);
1082 	else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
1083 			    IWL_UCODE_TLV_API_SAR_TABLE_VER))
1084 		cmd.v2.table_revision = cpu_to_le32(sk);
1085 
1086 	return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
1087 }
1088 
1089 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1090 {
1091 	union iwl_ppag_table_cmd cmd;
1092 	int ret, cmd_size;
1093 
1094 	ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
1095 	/* Not supporting PPAG table is a valid scenario */
1096 	if (ret < 0)
1097 		return 0;
1098 
1099 	IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
1100 	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
1101 						PER_PLATFORM_ANT_GAIN_CMD),
1102 				   0, cmd_size, &cmd);
1103 	if (ret < 0)
1104 		IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
1105 			ret);
1106 
1107 	return ret;
1108 }
1109 
1110 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1111 {
1112 	/* no need to read the table, done in INIT stage */
1113 	if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
1114 		return 0;
1115 
1116 	return iwl_mvm_ppag_send_cmd(mvm);
1117 }
1118 
1119 static const struct dmi_system_id dmi_tas_approved_list[] = {
1120 	{ .ident = "HP",
1121 	  .matches = {
1122 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1123 		},
1124 	},
1125 	{ .ident = "SAMSUNG",
1126 	  .matches = {
1127 			DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
1128 		},
1129 	},
1130 		{ .ident = "LENOVO",
1131 	  .matches = {
1132 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1133 		},
1134 	},
1135 	{ .ident = "DELL",
1136 	  .matches = {
1137 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1138 		},
1139 	},
1140 	{ .ident = "MSFT",
1141 	  .matches = {
1142 			DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
1143 		},
1144 	},
1145 	{ .ident = "Acer",
1146 	  .matches = {
1147 			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1148 		},
1149 	},
1150 	{ .ident = "ASUS",
1151 	  .matches = {
1152 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1153 		},
1154 	},
1155 	{ .ident = "GOOGLE-HP",
1156 	  .matches = {
1157 			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
1158 			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
1159 		},
1160 	},
1161 	{ .ident = "MSI",
1162 	  .matches = {
1163 			DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
1164 		},
1165 	},
1166 	{ .ident = "Honor",
1167 	  .matches = {
1168 			DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
1169 		},
1170 	},
1171 	/* keep last */
1172 	{}
1173 };
1174 
1175 bool iwl_mvm_is_vendor_in_approved_list(void)
1176 {
1177 	return dmi_check_system(dmi_tas_approved_list);
1178 }
1179 
1180 static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
1181 {
1182 	int i;
1183 	u32 size = le32_to_cpu(*le_size);
1184 
1185 	/* Verify that there is room for another country */
1186 	if (size >= IWL_TAS_BLOCK_LIST_MAX)
1187 		return false;
1188 
1189 	for (i = 0; i < size; i++) {
1190 		if (list[i] == cpu_to_le32(mcc))
1191 			return true;
1192 	}
1193 
1194 	list[size++] = cpu_to_le32(mcc);
1195 	*le_size = cpu_to_le32(size);
1196 	return true;
1197 }
1198 
1199 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1200 {
1201 	u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
1202 	int ret;
1203 	union iwl_tas_config_cmd cmd = {};
1204 	int cmd_size, fw_ver;
1205 
1206 	BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
1207 		     APCI_WTAS_BLACK_LIST_MAX);
1208 
1209 	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
1210 		IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
1211 		return;
1212 	}
1213 
1214 	fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1215 				       IWL_FW_CMD_VER_UNKNOWN);
1216 
1217 	ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
1218 	if (ret < 0) {
1219 		IWL_DEBUG_RADIO(mvm,
1220 				"TAS table invalid or unavailable. (%d)\n",
1221 				ret);
1222 		return;
1223 	}
1224 
1225 	if (ret == 0)
1226 		return;
1227 
1228 	if (!iwl_mvm_is_vendor_in_approved_list()) {
1229 		IWL_DEBUG_RADIO(mvm,
1230 				"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
1231 				dmi_get_system_info(DMI_SYS_VENDOR));
1232 		if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
1233 						    &cmd.v4.block_list_size,
1234 							IWL_MCC_US)) ||
1235 		    (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
1236 						    &cmd.v4.block_list_size,
1237 							IWL_MCC_CANADA))) {
1238 			IWL_DEBUG_RADIO(mvm,
1239 					"Unable to add US/Canada to TAS block list, disabling TAS\n");
1240 			return;
1241 		}
1242 	} else {
1243 		IWL_DEBUG_RADIO(mvm,
1244 				"System vendor '%s' is in the approved list.\n",
1245 				dmi_get_system_info(DMI_SYS_VENDOR));
1246 	}
1247 
1248 	/* v4 is the same size as v3, so no need to differentiate here */
1249 	cmd_size = fw_ver < 3 ?
1250 		sizeof(struct iwl_tas_config_cmd_v2) :
1251 		sizeof(struct iwl_tas_config_cmd_v3);
1252 
1253 	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
1254 	if (ret < 0)
1255 		IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
1256 }
1257 
1258 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
1259 {
1260 	u8 value;
1261 	int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
1262 				      &iwl_rfi_guid, &value);
1263 
1264 	if (ret < 0) {
1265 		IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
1266 
1267 	} else if (value >= DSM_VALUE_RFI_MAX) {
1268 		IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
1269 				value);
1270 
1271 	} else if (value == DSM_VALUE_RFI_ENABLE) {
1272 		IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
1273 		return DSM_VALUE_RFI_ENABLE;
1274 	}
1275 
1276 	IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
1277 
1278 	/* default behaviour is disabled */
1279 	return DSM_VALUE_RFI_DISABLE;
1280 }
1281 
1282 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1283 {
1284 	int ret;
1285 	u32 value;
1286 	struct iwl_lari_config_change_cmd_v7 cmd = {};
1287 	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
1288 					   WIDE_ID(REGULATORY_AND_NVM_GROUP,
1289 						   LARI_CONFIG_CHANGE), 1);
1290 
1291 	cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
1292 
1293 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
1294 				   &iwl_guid, &value);
1295 	if (!ret)
1296 		cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
1297 
1298 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1299 				   DSM_FUNC_ENABLE_UNII4_CHAN,
1300 				   &iwl_guid, &value);
1301 	if (!ret)
1302 		cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
1303 
1304 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1305 				   DSM_FUNC_ACTIVATE_CHANNEL,
1306 				   &iwl_guid, &value);
1307 	if (!ret) {
1308 		if (cmd_ver < 8)
1309 			value &= ~ACTIVATE_5G2_IN_WW_MASK;
1310 		cmd.chan_state_active_bitmap = cpu_to_le32(value);
1311 	}
1312 
1313 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1314 				   DSM_FUNC_ENABLE_6E,
1315 				   &iwl_guid, &value);
1316 	if (!ret)
1317 		cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
1318 
1319 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1320 				   DSM_FUNC_FORCE_DISABLE_CHANNELS,
1321 				   &iwl_guid, &value);
1322 	if (!ret)
1323 		cmd.force_disable_channels_bitmap = cpu_to_le32(value);
1324 
1325 	ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1326 				   DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
1327 				   &iwl_guid, &value);
1328 	if (!ret)
1329 		cmd.edt_bitmap = cpu_to_le32(value);
1330 
1331 	if (cmd.config_bitmap ||
1332 	    cmd.oem_uhb_allow_bitmap ||
1333 	    cmd.oem_11ax_allow_bitmap ||
1334 	    cmd.oem_unii4_allow_bitmap ||
1335 	    cmd.chan_state_active_bitmap ||
1336 	    cmd.force_disable_channels_bitmap ||
1337 	    cmd.edt_bitmap) {
1338 		size_t cmd_size;
1339 
1340 		switch (cmd_ver) {
1341 		case 8:
1342 		case 7:
1343 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
1344 			break;
1345 		case 6:
1346 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
1347 			break;
1348 		case 5:
1349 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
1350 			break;
1351 		case 4:
1352 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
1353 			break;
1354 		case 3:
1355 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
1356 			break;
1357 		case 2:
1358 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
1359 			break;
1360 		default:
1361 			cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
1362 			break;
1363 		}
1364 
1365 		IWL_DEBUG_RADIO(mvm,
1366 				"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
1367 				le32_to_cpu(cmd.config_bitmap),
1368 				le32_to_cpu(cmd.oem_11ax_allow_bitmap));
1369 		IWL_DEBUG_RADIO(mvm,
1370 				"sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
1371 				le32_to_cpu(cmd.oem_unii4_allow_bitmap),
1372 				le32_to_cpu(cmd.chan_state_active_bitmap),
1373 				cmd_ver);
1374 		IWL_DEBUG_RADIO(mvm,
1375 				"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
1376 				le32_to_cpu(cmd.oem_uhb_allow_bitmap),
1377 				le32_to_cpu(cmd.force_disable_channels_bitmap));
1378 		IWL_DEBUG_RADIO(mvm,
1379 				"sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n",
1380 				le32_to_cpu(cmd.edt_bitmap));
1381 		ret = iwl_mvm_send_cmd_pdu(mvm,
1382 					   WIDE_ID(REGULATORY_AND_NVM_GROUP,
1383 						   LARI_CONFIG_CHANGE),
1384 					   0, cmd_size, &cmd);
1385 		if (ret < 0)
1386 			IWL_DEBUG_RADIO(mvm,
1387 					"Failed to send LARI_CONFIG_CHANGE (%d)\n",
1388 					ret);
1389 	}
1390 
1391 	if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
1392 	    le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
1393 		mvm->fwrt.uats_enabled = TRUE;
1394 }
1395 
1396 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
1397 {
1398 	int ret;
1399 
1400 	/* read PPAG table */
1401 	ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
1402 	if (ret < 0) {
1403 		IWL_DEBUG_RADIO(mvm,
1404 				"PPAG BIOS table invalid or unavailable. (%d)\n",
1405 				ret);
1406 	}
1407 
1408 	/* read SAR tables */
1409 	ret = iwl_sar_get_wrds_table(&mvm->fwrt);
1410 	if (ret < 0) {
1411 		IWL_DEBUG_RADIO(mvm,
1412 				"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1413 				ret);
1414 		/*
1415 		 * If not available, don't fail and don't bother with EWRD and
1416 		 * WGDS */
1417 
1418 		if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
1419 			/*
1420 			 * If basic SAR is not available, we check for WGDS,
1421 			 * which should *not* be available either.  If it is
1422 			 * available, issue an error, because we can't use SAR
1423 			 * Geo without basic SAR.
1424 			 */
1425 			IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1426 		}
1427 
1428 	} else {
1429 		ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
1430 		/* if EWRD is not available, we can still use
1431 		* WRDS, so don't fail */
1432 		if (ret < 0)
1433 			IWL_DEBUG_RADIO(mvm,
1434 					"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1435 					ret);
1436 
1437 		/* read geo SAR table */
1438 		if (iwl_sar_geo_support(&mvm->fwrt)) {
1439 			ret = iwl_sar_get_wgds_table(&mvm->fwrt);
1440 			if (ret < 0)
1441 				IWL_DEBUG_RADIO(mvm,
1442 						"Geo SAR BIOS table invalid or unavailable. (%d)\n",
1443 						ret);
1444 				/* we don't fail if the table is not available */
1445 		}
1446 	}
1447 
1448 	iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
1449 }
1450 #else /* CONFIG_ACPI */
1451 
1452 inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
1453 				      int prof_a, int prof_b)
1454 {
1455 	return 1;
1456 }
1457 
1458 inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1459 {
1460 	return -ENOENT;
1461 }
1462 
1463 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1464 {
1465 	return 0;
1466 }
1467 
1468 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1469 {
1470 	return -ENOENT;
1471 }
1472 
1473 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1474 {
1475 	return 0;
1476 }
1477 
1478 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1479 {
1480 }
1481 
1482 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1483 {
1484 }
1485 
1486 bool iwl_mvm_is_vendor_in_approved_list(void)
1487 {
1488 	return false;
1489 }
1490 
1491 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
1492 {
1493 	return DSM_VALUE_RFI_DISABLE;
1494 }
1495 
1496 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
1497 {
1498 }
1499 
1500 #endif /* CONFIG_ACPI */
1501 
1502 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
1503 {
1504 	u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
1505 	int ret;
1506 	u32 resp;
1507 
1508 	struct iwl_fw_error_recovery_cmd recovery_cmd = {
1509 		.flags = cpu_to_le32(flags),
1510 		.buf_size = 0,
1511 	};
1512 	struct iwl_host_cmd host_cmd = {
1513 		.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
1514 		.flags = CMD_WANT_SKB,
1515 		.data = {&recovery_cmd, },
1516 		.len = {sizeof(recovery_cmd), },
1517 	};
1518 
1519 	/* no error log was defined in TLV */
1520 	if (!error_log_size)
1521 		return;
1522 
1523 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
1524 		/* no buf was allocated while HW reset */
1525 		if (!mvm->error_recovery_buf)
1526 			return;
1527 
1528 		host_cmd.data[1] = mvm->error_recovery_buf;
1529 		host_cmd.len[1] =  error_log_size;
1530 		host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1531 		recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1532 	}
1533 
1534 	ret = iwl_mvm_send_cmd(mvm, &host_cmd);
1535 	kfree(mvm->error_recovery_buf);
1536 	mvm->error_recovery_buf = NULL;
1537 
1538 	if (ret) {
1539 		IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1540 		return;
1541 	}
1542 
1543 	/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1544 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
1545 		resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
1546 		if (resp)
1547 			IWL_ERR(mvm,
1548 				"Failed to send recovery cmd blob was invalid %d\n",
1549 				resp);
1550 	}
1551 }
1552 
1553 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1554 {
1555 	return iwl_mvm_sar_select_profile(mvm, 1, 1);
1556 }
1557 
1558 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1559 {
1560 	int ret;
1561 
1562 	if (iwl_mvm_has_unified_ucode(mvm))
1563 		return iwl_run_unified_mvm_ucode(mvm);
1564 
1565 	ret = iwl_run_init_mvm_ucode(mvm);
1566 
1567 	if (ret) {
1568 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1569 
1570 		if (iwlmvm_mod_params.init_dbg)
1571 			return 0;
1572 		return ret;
1573 	}
1574 
1575 	iwl_fw_dbg_stop_sync(&mvm->fwrt);
1576 	iwl_trans_stop_device(mvm->trans);
1577 	ret = iwl_trans_start_hw(mvm->trans);
1578 	if (ret)
1579 		return ret;
1580 
1581 	mvm->rfkill_safe_init_done = false;
1582 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1583 	if (ret)
1584 		return ret;
1585 
1586 	mvm->rfkill_safe_init_done = true;
1587 
1588 	iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
1589 			       NULL);
1590 
1591 	return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1592 }
1593 
1594 int iwl_mvm_up(struct iwl_mvm *mvm)
1595 {
1596 	int ret, i;
1597 	struct ieee80211_supported_band *sband = NULL;
1598 
1599 	lockdep_assert_held(&mvm->mutex);
1600 
1601 	ret = iwl_trans_start_hw(mvm->trans);
1602 	if (ret)
1603 		return ret;
1604 
1605 	ret = iwl_mvm_load_rt_fw(mvm);
1606 	if (ret) {
1607 		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1608 		if (ret != -ERFKILL && !mvm->pldr_sync)
1609 			iwl_fw_dbg_error_collect(&mvm->fwrt,
1610 						 FW_DBG_TRIGGER_DRIVER);
1611 		goto error;
1612 	}
1613 
1614 	/* FW loaded successfully */
1615 	mvm->pldr_sync = false;
1616 
1617 	iwl_fw_disable_dbg_asserts(&mvm->fwrt);
1618 	iwl_get_shared_mem_conf(&mvm->fwrt);
1619 
1620 	ret = iwl_mvm_sf_update(mvm, NULL, false);
1621 	if (ret)
1622 		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1623 
1624 	if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
1625 		mvm->fwrt.dump.conf = FW_DBG_INVALID;
1626 		/* if we have a destination, assume EARLY START */
1627 		if (mvm->fw->dbg.dest_tlv)
1628 			mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1629 		iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1630 	}
1631 
1632 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1633 	if (ret)
1634 		goto error;
1635 
1636 	if (!iwl_mvm_has_unified_ucode(mvm)) {
1637 		/* Send phy db control command and then phy db calibration */
1638 		ret = iwl_send_phy_db_data(mvm->phy_db);
1639 		if (ret)
1640 			goto error;
1641 		ret = iwl_send_phy_cfg_cmd(mvm);
1642 		if (ret)
1643 			goto error;
1644 	}
1645 
1646 	ret = iwl_mvm_send_bt_init_conf(mvm);
1647 	if (ret)
1648 		goto error;
1649 
1650 	if (fw_has_capa(&mvm->fw->ucode_capa,
1651 			IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
1652 		ret = iwl_set_soc_latency(&mvm->fwrt);
1653 		if (ret)
1654 			goto error;
1655 	}
1656 
1657 	iwl_mvm_lari_cfg(mvm);
1658 
1659 	/* Init RSS configuration */
1660 	ret = iwl_configure_rxq(&mvm->fwrt);
1661 	if (ret)
1662 		goto error;
1663 
1664 	if (iwl_mvm_has_new_rx_api(mvm)) {
1665 		ret = iwl_send_rss_cfg_cmd(mvm);
1666 		if (ret) {
1667 			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1668 				ret);
1669 			goto error;
1670 		}
1671 	}
1672 
1673 	/* init the fw <-> mac80211 STA mapping */
1674 	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
1675 		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1676 		RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
1677 	}
1678 
1679 	for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
1680 		RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
1681 
1682 	memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map));
1683 
1684 	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1685 
1686 	/* reset quota debouncing buffer - 0xff will yield invalid data */
1687 	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1688 
1689 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
1690 		ret = iwl_mvm_send_dqa_cmd(mvm);
1691 		if (ret)
1692 			goto error;
1693 	}
1694 
1695 	/*
1696 	 * Add auxiliary station for scanning.
1697 	 * Newer versions of this command implies that the fw uses
1698 	 * internal aux station for all aux activities that don't
1699 	 * requires a dedicated data queue.
1700 	 */
1701 	if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1702 		 /*
1703 		  * In old version the aux station uses mac id like other
1704 		  * station and not lmac id
1705 		  */
1706 		ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1707 		if (ret)
1708 			goto error;
1709 	}
1710 
1711 	/* Add all the PHY contexts */
1712 	i = 0;
1713 	while (!sband && i < NUM_NL80211_BANDS)
1714 		sband = mvm->hw->wiphy->bands[i++];
1715 
1716 	if (WARN_ON_ONCE(!sband)) {
1717 		ret = -ENODEV;
1718 		goto error;
1719 	}
1720 
1721 	if (iwl_mvm_is_tt_in_fw(mvm)) {
1722 		/* in order to give the responsibility of ct-kill and
1723 		 * TX backoff to FW we need to send empty temperature reporting
1724 		 * cmd during init time
1725 		 */
1726 		iwl_mvm_send_temp_report_ths_cmd(mvm);
1727 	} else {
1728 		/* Initialize tx backoffs to the minimal possible */
1729 		iwl_mvm_tt_tx_backoff(mvm, 0);
1730 	}
1731 
1732 #ifdef CONFIG_THERMAL
1733 	/* TODO: read the budget from BIOS / Platform NVM */
1734 
1735 	/*
1736 	 * In case there is no budget from BIOS / Platform NVM the default
1737 	 * budget should be 2000mW (cooling state 0).
1738 	 */
1739 	if (iwl_mvm_is_ctdp_supported(mvm)) {
1740 		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1741 					   mvm->cooling_dev.cur_state);
1742 		if (ret)
1743 			goto error;
1744 	}
1745 #endif
1746 
1747 	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
1748 		WARN_ON(iwl_mvm_config_ltr(mvm));
1749 
1750 	ret = iwl_mvm_power_update_device(mvm);
1751 	if (ret)
1752 		goto error;
1753 
1754 	/*
1755 	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1756 	 * anyway, so don't init MCC.
1757 	 */
1758 	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1759 		ret = iwl_mvm_init_mcc(mvm);
1760 		if (ret)
1761 			goto error;
1762 	}
1763 
1764 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1765 		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1766 		mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1767 		ret = iwl_mvm_config_scan(mvm);
1768 		if (ret)
1769 			goto error;
1770 	}
1771 
1772 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1773 		iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1774 
1775 		if (mvm->time_sync.active)
1776 			iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr,
1777 						 IWL_TIME_SYNC_PROTOCOL_TM |
1778 						 IWL_TIME_SYNC_PROTOCOL_FTM);
1779 	}
1780 
1781 	if (!mvm->ptp_data.ptp_clock)
1782 		iwl_mvm_ptp_init(mvm);
1783 
1784 	if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1785 		IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1786 
1787 	ret = iwl_mvm_ppag_init(mvm);
1788 	if (ret)
1789 		goto error;
1790 
1791 	ret = iwl_mvm_sar_init(mvm);
1792 	if (ret == 0)
1793 		ret = iwl_mvm_sar_geo_init(mvm);
1794 	if (ret < 0)
1795 		goto error;
1796 
1797 	ret = iwl_mvm_sgom_init(mvm);
1798 	if (ret)
1799 		goto error;
1800 
1801 	iwl_mvm_tas_init(mvm);
1802 	iwl_mvm_leds_sync(mvm);
1803 	iwl_mvm_uats_init(mvm);
1804 
1805 	if (iwl_rfi_supported(mvm)) {
1806 		if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
1807 			iwl_rfi_send_config_cmd(mvm, NULL);
1808 	}
1809 
1810 	iwl_mvm_mei_device_state(mvm, true);
1811 
1812 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1813 	return 0;
1814  error:
1815 	if (!iwlmvm_mod_params.init_dbg || !ret)
1816 		iwl_mvm_stop_device(mvm);
1817 	return ret;
1818 }
1819 
1820 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1821 {
1822 	int ret, i;
1823 
1824 	lockdep_assert_held(&mvm->mutex);
1825 
1826 	ret = iwl_trans_start_hw(mvm->trans);
1827 	if (ret)
1828 		return ret;
1829 
1830 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1831 	if (ret) {
1832 		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1833 		goto error;
1834 	}
1835 
1836 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1837 	if (ret)
1838 		goto error;
1839 
1840 	/* Send phy db control command and then phy db calibration*/
1841 	ret = iwl_send_phy_db_data(mvm->phy_db);
1842 	if (ret)
1843 		goto error;
1844 
1845 	ret = iwl_send_phy_cfg_cmd(mvm);
1846 	if (ret)
1847 		goto error;
1848 
1849 	/* init the fw <-> mac80211 STA mapping */
1850 	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
1851 		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1852 		RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
1853 	}
1854 
1855 	if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1856 		/*
1857 		 * Add auxiliary station for scanning.
1858 		 * Newer versions of this command implies that the fw uses
1859 		 * internal aux station for all aux activities that don't
1860 		 * requires a dedicated data queue.
1861 		 * In old version the aux station uses mac id like other
1862 		 * station and not lmac id
1863 		 */
1864 		ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1865 		if (ret)
1866 			goto error;
1867 	}
1868 
1869 	return 0;
1870  error:
1871 	iwl_mvm_stop_device(mvm);
1872 	return ret;
1873 }
1874 
1875 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1876 			     struct iwl_rx_cmd_buffer *rxb)
1877 {
1878 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1879 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1880 
1881 	IWL_DEBUG_INFO(mvm,
1882 		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1883 		       le32_to_cpu(mfuart_notif->installed_ver),
1884 		       le32_to_cpu(mfuart_notif->external_ver),
1885 		       le32_to_cpu(mfuart_notif->status),
1886 		       le32_to_cpu(mfuart_notif->duration));
1887 
1888 	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1889 		IWL_DEBUG_INFO(mvm,
1890 			       "MFUART: image size: 0x%08x\n",
1891 			       le32_to_cpu(mfuart_notif->image_size));
1892 }
1893