xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/fw.c (revision 2c7e4a2663a1ab5a740c59c31991579b6b865a26)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 
6 #include "mld.h"
7 
8 #include "fw/api/alive.h"
9 #include "fw/api/scan.h"
10 #include "fw/api/rx.h"
11 #include "phy.h"
12 #include "fw/dbg.h"
13 #include "fw/pnvm.h"
14 #include "hcmd.h"
15 #include "power.h"
16 #include "mcc.h"
17 #include "led.h"
18 #include "coex.h"
19 #include "regulatory.h"
20 #include "thermal.h"
21 
iwl_mld_send_tx_ant_cfg(struct iwl_mld * mld)22 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
23 {
24 	struct iwl_tx_ant_cfg_cmd cmd;
25 
26 	lockdep_assert_wiphy(mld->wiphy);
27 
28 	cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
29 
30 	IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
31 
32 	return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
33 }
34 
iwl_mld_send_rss_cfg_cmd(struct iwl_mld * mld)35 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
36 {
37 	struct iwl_rss_config_cmd cmd = {
38 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
39 		.hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
40 			     BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
41 			     BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
42 			     BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
43 			     BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
44 			     BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
45 	};
46 
47 	lockdep_assert_wiphy(mld->wiphy);
48 
49 	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
50 	for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
51 		cmd.indirection_table[i] =
52 			1 + (i % (mld->trans->info.num_rxqs - 1));
53 	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
54 
55 	return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
56 }
57 
iwl_mld_config_scan(struct iwl_mld * mld)58 static int iwl_mld_config_scan(struct iwl_mld *mld)
59 {
60 	struct iwl_scan_config cmd = {
61 		.tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
62 		.rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
63 	};
64 
65 	return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
66 				    &cmd);
67 }
68 
iwl_mld_alive_imr_data(struct iwl_trans * trans,const struct iwl_imr_alive_info * imr_info)69 static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
70 				   const struct iwl_imr_alive_info *imr_info)
71 {
72 	struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
73 
74 	imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
75 	imr_data->imr_size = le32_to_cpu(imr_info->size);
76 	imr_data->imr2sram_remainbyte = imr_data->imr_size;
77 	imr_data->imr_base_addr = imr_info->base_addr;
78 	imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
79 
80 	if (imr_data->imr_enable)
81 		return;
82 
83 	for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
84 		struct iwl_fw_ini_region_tlv *reg;
85 
86 		if (!trans->dbg.active_regions[i])
87 			continue;
88 
89 		reg = (void *)trans->dbg.active_regions[i]->data;
90 
91 		/* We have only one DRAM IMR region, so we
92 		 * can break as soon as we find the first
93 		 * one.
94 		 */
95 		if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
96 			trans->dbg.unsupported_region_msk |= BIT(i);
97 			break;
98 		}
99 	}
100 }
101 
102 struct iwl_mld_alive_data {
103 	__le32 sku_id[3];
104 	bool valid;
105 };
106 
iwl_alive_fn(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)107 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
108 			 struct iwl_rx_packet *pkt, void *data)
109 {
110 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
111 	unsigned int expected_sz;
112 	struct iwl_mld *mld =
113 		container_of(notif_wait, struct iwl_mld, notif_wait);
114 	struct iwl_trans *trans = mld->trans;
115 	u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
116 					      UCODE_ALIVE_NTFY, 0);
117 	struct iwl_mld_alive_data *alive_data = data;
118 	struct iwl_alive_ntf *palive;
119 	struct iwl_umac_alive *umac;
120 	struct iwl_lmac_alive *lmac1;
121 	struct iwl_lmac_alive *lmac2 = NULL;
122 	u32 lmac_error_event_table;
123 	u32 umac_error_table;
124 	u16 status;
125 
126 	switch (version) {
127 	case 6:
128 	case 7:
129 		expected_sz = sizeof(struct iwl_alive_ntf_v6);
130 		break;
131 	case 8:
132 		expected_sz = sizeof(struct iwl_alive_ntf);
133 		break;
134 	default:
135 		return false;
136 	}
137 
138 	if (pkt_len != expected_sz)
139 		return false;
140 
141 	palive = (void *)pkt->data;
142 
143 	iwl_mld_alive_imr_data(trans, &palive->imr);
144 
145 	umac = &palive->umac_data;
146 	lmac1 = &palive->lmac_data[0];
147 	lmac2 = &palive->lmac_data[1];
148 	status = le16_to_cpu(palive->status);
149 
150 	BUILD_BUG_ON(sizeof(alive_data->sku_id) !=
151 		     sizeof(palive->sku_id.data));
152 	memcpy(alive_data->sku_id, palive->sku_id.data,
153 	       sizeof(palive->sku_id.data));
154 
155 	IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
156 		     le32_to_cpu(alive_data->sku_id[0]),
157 		     le32_to_cpu(alive_data->sku_id[1]),
158 		     le32_to_cpu(alive_data->sku_id[2]));
159 
160 	lmac_error_event_table =
161 		le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
162 	iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
163 
164 	if (lmac2)
165 		trans->dbg.lmac_error_event_table[1] =
166 			le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
167 
168 	umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
169 		~FW_ADDR_CACHE_CONTROL;
170 
171 	if (umac_error_table >= trans->mac_cfg->base->min_umac_error_event_table)
172 		iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
173 	else
174 		IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
175 			umac_error_table);
176 
177 	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
178 
179 	IWL_DEBUG_FW(mld,
180 		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
181 		     status, lmac1->ver_type, lmac1->ver_subtype);
182 
183 	if (lmac2)
184 		IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
185 
186 	IWL_DEBUG_FW(mld,
187 		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
188 		     le32_to_cpu(umac->umac_major),
189 		     le32_to_cpu(umac->umac_minor));
190 
191 	if (version >= 7)
192 		IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
193 			     le16_to_cpu(palive->flags));
194 
195 	if (version >= 8)
196 		IWL_DEBUG_FW(mld, "platform_id 0x%llx\n",
197 			     le64_to_cpu(palive->platform_id));
198 
199 	iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
200 
201 	return true;
202 }
203 
204 #define MLD_ALIVE_TIMEOUT		(2 * HZ)
205 #define MLD_INIT_COMPLETE_TIMEOUT	(2 * HZ)
206 
iwl_mld_print_alive_notif_timeout(struct iwl_mld * mld)207 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
208 {
209 	struct iwl_trans *trans = mld->trans;
210 	struct iwl_pc_data *pc_data;
211 	u8 count;
212 
213 	IWL_ERR(mld,
214 		"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
215 		iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
216 		iwl_read_umac_prph(trans,
217 				   UMAG_SB_CPU_2_STATUS));
218 #define IWL_FW_PRINT_REG_INFO(reg_name) \
219 	IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
220 
221 	IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
222 
223 	IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
224 
225 	/* print OTP info */
226 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
227 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
228 #undef IWL_FW_PRINT_REG_INFO
229 
230 	pc_data = trans->dbg.pc_data;
231 	for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
232 		IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
233 			pc_data->pc_address);
234 }
235 
iwl_mld_load_fw_wait_alive(struct iwl_mld * mld,struct iwl_mld_alive_data * alive_data)236 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld,
237 				      struct iwl_mld_alive_data *alive_data)
238 {
239 	static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
240 	struct iwl_notification_wait alive_wait;
241 	int ret;
242 
243 	lockdep_assert_wiphy(mld->wiphy);
244 
245 	iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
246 				   alive_cmd, ARRAY_SIZE(alive_cmd),
247 				   iwl_alive_fn, alive_data);
248 
249 	iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
250 
251 	ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true);
252 	if (ret) {
253 		iwl_remove_notification(&mld->notif_wait, &alive_wait);
254 		return ret;
255 	}
256 
257 	ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
258 				    MLD_ALIVE_TIMEOUT);
259 
260 	if (ret) {
261 		if (ret == -ETIMEDOUT)
262 			iwl_fw_dbg_error_collect(&mld->fwrt,
263 						 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
264 		iwl_mld_print_alive_notif_timeout(mld);
265 		return ret;
266 	}
267 
268 	if (!alive_data->valid) {
269 		IWL_ERR(mld, "Loaded firmware is not valid!\n");
270 		return -EIO;
271 	}
272 
273 	iwl_trans_fw_alive(mld->trans);
274 
275 	return 0;
276 }
277 
iwl_mld_run_fw_init_sequence(struct iwl_mld * mld)278 static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
279 {
280 	struct iwl_notification_wait init_wait;
281 	struct iwl_init_extended_cfg_cmd init_cfg = {
282 		.init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)),
283 	};
284 	struct iwl_mld_alive_data alive_data = {};
285 	static const u16 init_complete[] = {
286 		INIT_COMPLETE_NOTIF,
287 	};
288 	int ret;
289 
290 	lockdep_assert_wiphy(mld->wiphy);
291 
292 	ret = iwl_mld_load_fw_wait_alive(mld, &alive_data);
293 	if (ret)
294 		return ret;
295 
296 	ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
297 			    &mld->fw->ucode_capa, alive_data.sku_id);
298 	if (ret) {
299 		IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
300 		return ret;
301 	}
302 
303 	iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
304 			       NULL);
305 
306 	iwl_init_notification_wait(&mld->notif_wait,
307 				   &init_wait,
308 				   init_complete,
309 				   ARRAY_SIZE(init_complete),
310 				   NULL, NULL);
311 
312 	ret = iwl_mld_send_cmd_pdu(mld,
313 				   WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
314 				   &init_cfg);
315 	if (ret) {
316 		IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
317 		iwl_remove_notification(&mld->notif_wait, &init_wait);
318 		return ret;
319 	}
320 
321 	ret = iwl_mld_send_phy_cfg_cmd(mld);
322 	if (ret) {
323 		IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret);
324 		iwl_remove_notification(&mld->notif_wait, &init_wait);
325 		return ret;
326 	}
327 
328 	ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
329 				    MLD_INIT_COMPLETE_TIMEOUT);
330 	if (ret) {
331 		IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
332 		return ret;
333 	}
334 
335 	return 0;
336 }
337 
iwl_mld_load_fw(struct iwl_mld * mld)338 int iwl_mld_load_fw(struct iwl_mld *mld)
339 {
340 	int ret;
341 
342 	lockdep_assert_wiphy(mld->wiphy);
343 
344 	ret = iwl_trans_start_hw(mld->trans);
345 	if (ret)
346 		return ret;
347 
348 	ret = iwl_mld_run_fw_init_sequence(mld);
349 	if (ret)
350 		goto err;
351 
352 	mld->fw_status.running = true;
353 
354 	return 0;
355 err:
356 	iwl_mld_stop_fw(mld);
357 	return ret;
358 }
359 
iwl_mld_stop_fw(struct iwl_mld * mld)360 void iwl_mld_stop_fw(struct iwl_mld *mld)
361 {
362 	lockdep_assert_wiphy(mld->wiphy);
363 
364 	iwl_abort_notification_waits(&mld->notif_wait);
365 
366 	iwl_fw_dbg_stop_sync(&mld->fwrt);
367 
368 	iwl_trans_stop_device(mld->trans);
369 
370 	/* HW is stopped, no more coming RX. Cancel all notifications in
371 	 * case they were sent just before stopping the HW.
372 	 */
373 	iwl_mld_cancel_async_notifications(mld);
374 
375 	mld->fw_status.running = false;
376 }
377 
iwl_mld_restart_disconnect_iter(void * data,u8 * mac,struct ieee80211_vif * vif)378 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
379 					    struct ieee80211_vif *vif)
380 {
381 	if (vif->type == NL80211_IFTYPE_STATION)
382 		ieee80211_hw_restart_disconnect(vif);
383 }
384 
iwl_mld_send_recovery_cmd(struct iwl_mld * mld,u32 flags)385 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
386 {
387 	u32 error_log_size = mld->fw->ucode_capa.error_log_size;
388 	struct iwl_fw_error_recovery_cmd recovery_cmd = {
389 		.flags = cpu_to_le32(flags),
390 	};
391 	struct iwl_host_cmd cmd = {
392 		.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
393 		.flags = CMD_WANT_SKB,
394 		.data = {&recovery_cmd, },
395 		.len = {sizeof(recovery_cmd), },
396 	};
397 	int ret;
398 
399 	/* no error log was defined in TLV */
400 	if (!error_log_size)
401 		return;
402 
403 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
404 		/* no buf was allocated upon NIC error */
405 		if (!mld->error_recovery_buf)
406 			return;
407 
408 		cmd.data[1] = mld->error_recovery_buf;
409 		cmd.len[1] =  error_log_size;
410 		cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
411 		recovery_cmd.buf_size = cpu_to_le32(error_log_size);
412 	}
413 
414 	ret = iwl_mld_send_cmd(mld, &cmd);
415 
416 	/* we no longer need the recovery buffer */
417 	kfree(mld->error_recovery_buf);
418 	mld->error_recovery_buf = NULL;
419 
420 	if (ret) {
421 		IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
422 		return;
423 	}
424 
425 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
426 		struct iwl_rx_packet *pkt = cmd.resp_pkt;
427 		u32 pkt_len = iwl_rx_packet_payload_len(pkt);
428 		u32 resp;
429 
430 		if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
431 				 "Unexpected recovery cmd response size %u (expected %zu)\n",
432 				 pkt_len, sizeof(resp)))
433 			goto out;
434 
435 		resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
436 		if (!resp)
437 			goto out;
438 
439 		IWL_ERR(mld,
440 			"Failed to send recovery cmd blob was invalid %d\n",
441 			resp);
442 
443 		ieee80211_iterate_interfaces(mld->hw, 0,
444 					     iwl_mld_restart_disconnect_iter,
445 					     NULL);
446 	}
447 
448 out:
449 	iwl_free_resp(&cmd);
450 }
451 
iwl_mld_config_fw(struct iwl_mld * mld)452 static int iwl_mld_config_fw(struct iwl_mld *mld)
453 {
454 	int ret;
455 
456 	lockdep_assert_wiphy(mld->wiphy);
457 
458 	iwl_fw_disable_dbg_asserts(&mld->fwrt);
459 	iwl_get_shared_mem_conf(&mld->fwrt);
460 
461 	ret = iwl_mld_send_tx_ant_cfg(mld);
462 	if (ret)
463 		return ret;
464 
465 	ret = iwl_mld_send_bt_init_conf(mld);
466 	if (ret)
467 		return ret;
468 
469 	ret = iwl_set_soc_latency(&mld->fwrt);
470 	if (ret)
471 		return ret;
472 
473 	iwl_mld_configure_lari(mld);
474 
475 	ret = iwl_mld_config_temp_report_ths(mld);
476 	if (ret)
477 		return ret;
478 
479 #ifdef CONFIG_THERMAL
480 	ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
481 				  CTDP_CMD_OPERATION_START);
482 	if (ret)
483 		return ret;
484 #endif
485 
486 	ret = iwl_configure_rxq(&mld->fwrt);
487 	if (ret)
488 		return ret;
489 
490 	ret = iwl_mld_send_rss_cfg_cmd(mld);
491 	if (ret)
492 		return ret;
493 
494 	ret = iwl_mld_config_scan(mld);
495 	if (ret)
496 		return ret;
497 
498 	ret = iwl_mld_update_device_power(mld, false);
499 	if (ret)
500 		return ret;
501 
502 	if (mld->fw_status.in_hw_restart) {
503 		iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
504 		iwl_mld_time_sync_fw_config(mld);
505 	}
506 
507 	iwl_mld_led_config_fw(mld);
508 
509 	ret = iwl_mld_init_ppag(mld);
510 	if (ret)
511 		return ret;
512 
513 	ret = iwl_mld_init_sar(mld);
514 	if (ret)
515 		return ret;
516 
517 	ret = iwl_mld_init_sgom(mld);
518 	if (ret)
519 		return ret;
520 
521 	iwl_mld_init_tas(mld);
522 	iwl_mld_init_uats(mld);
523 
524 	return 0;
525 }
526 
iwl_mld_start_fw(struct iwl_mld * mld)527 int iwl_mld_start_fw(struct iwl_mld *mld)
528 {
529 	int ret;
530 
531 	lockdep_assert_wiphy(mld->wiphy);
532 
533 	ret = iwl_mld_load_fw(mld);
534 	if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
535 		iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
536 		return ret;
537 	}
538 
539 	IWL_DEBUG_INFO(mld, "uCode started.\n");
540 
541 	ret = iwl_mld_config_fw(mld);
542 	if (ret)
543 		goto error;
544 
545 	ret = iwl_mld_init_mcc(mld);
546 	if (ret)
547 		goto error;
548 
549 	return 0;
550 
551 error:
552 	iwl_mld_stop_fw(mld);
553 	return ret;
554 }
555