1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 6 #include "mld.h" 7 8 #include "fw/api/alive.h" 9 #include "fw/api/scan.h" 10 #include "fw/api/rx.h" 11 #include "fw/dbg.h" 12 #include "fw/pnvm.h" 13 #include "hcmd.h" 14 #include "iwl-nvm-parse.h" 15 #include "power.h" 16 #include "mcc.h" 17 #include "led.h" 18 #include "coex.h" 19 #include "regulatory.h" 20 #include "thermal.h" 21 22 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld) 23 { 24 struct iwl_tx_ant_cfg_cmd cmd; 25 26 lockdep_assert_wiphy(mld->wiphy); 27 28 cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)); 29 30 IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid); 31 32 return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd); 33 } 34 35 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) 36 { 37 struct iwl_rss_config_cmd cmd = { 38 .flags = cpu_to_le32(IWL_RSS_ENABLE), 39 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | 40 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | 41 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | 42 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | 43 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | 44 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), 45 }; 46 47 lockdep_assert_wiphy(mld->wiphy); 48 49 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 50 for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 51 cmd.indirection_table[i] = 52 1 + (i % (mld->trans->num_rx_queues - 1)); 53 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 54 55 return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); 56 } 57 58 static int iwl_mld_config_scan(struct iwl_mld *mld) 59 { 60 struct iwl_scan_config cmd = { 61 .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)), 62 .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld)) 63 }; 64 65 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD), 66 &cmd); 67 } 68 69 static void iwl_mld_alive_imr_data(struct iwl_trans *trans, 70 const struct iwl_imr_alive_info *imr_info) 71 { 72 struct iwl_imr_data *imr_data = &trans->dbg.imr_data; 73 74 imr_data->imr_enable = le32_to_cpu(imr_info->enabled); 75 imr_data->imr_size = le32_to_cpu(imr_info->size); 76 imr_data->imr2sram_remainbyte = imr_data->imr_size; 77 imr_data->imr_base_addr = imr_info->base_addr; 78 imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr); 79 80 if (imr_data->imr_enable) 81 return; 82 83 for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 84 struct iwl_fw_ini_region_tlv *reg; 85 86 if (!trans->dbg.active_regions[i]) 87 continue; 88 89 reg = (void *)trans->dbg.active_regions[i]->data; 90 91 /* We have only one DRAM IMR region, so we 92 * can break as soon as we find the first 93 * one. 94 */ 95 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { 96 trans->dbg.unsupported_region_msk |= BIT(i); 97 break; 98 } 99 } 100 } 101 102 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 103 struct iwl_rx_packet *pkt, void *data) 104 { 105 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 106 struct iwl_mld *mld = 107 container_of(notif_wait, struct iwl_mld, notif_wait); 108 struct iwl_trans *trans = mld->trans; 109 u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, 110 UCODE_ALIVE_NTFY, 0); 111 struct iwl_alive_ntf_v6 *palive; 112 bool *alive_valid = data; 113 struct iwl_umac_alive *umac; 114 struct iwl_lmac_alive *lmac1; 115 struct iwl_lmac_alive *lmac2 = NULL; 116 u32 lmac_error_event_table; 117 u32 umac_error_table; 118 u16 status; 119 120 if (version < 6 || version > 7 || pkt_len != sizeof(*palive)) 121 return false; 122 123 palive = (void *)pkt->data; 124 125 iwl_mld_alive_imr_data(trans, &palive->imr); 126 127 umac = &palive->umac_data; 128 lmac1 = &palive->lmac_data[0]; 129 lmac2 = &palive->lmac_data[1]; 130 status = le16_to_cpu(palive->status); 131 132 trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); 133 trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); 134 trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); 135 136 IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", 137 trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]); 138 139 lmac_error_event_table = 140 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); 141 iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table); 142 143 if (lmac2) 144 trans->dbg.lmac_error_event_table[1] = 145 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); 146 147 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & 148 ~FW_ADDR_CACHE_CONTROL; 149 150 if (umac_error_table >= trans->cfg->min_umac_error_event_table) 151 iwl_fw_umac_set_alive_err_table(trans, umac_error_table); 152 else 153 IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", 154 umac_error_table); 155 156 *alive_valid = status == IWL_ALIVE_STATUS_OK; 157 158 IWL_DEBUG_FW(mld, 159 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 160 status, lmac1->ver_type, lmac1->ver_subtype); 161 162 if (lmac2) 163 IWL_DEBUG_FW(mld, "Alive ucode CDB\n"); 164 165 IWL_DEBUG_FW(mld, 166 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 167 le32_to_cpu(umac->umac_major), 168 le32_to_cpu(umac->umac_minor)); 169 170 if (version >= 7) 171 IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", 172 le16_to_cpu(palive->flags)); 173 174 iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); 175 176 return true; 177 } 178 179 #define MLD_ALIVE_TIMEOUT (2 * HZ) 180 #define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ) 181 182 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) 183 { 184 struct iwl_trans *trans = mld->trans; 185 struct iwl_pc_data *pc_data; 186 u8 count; 187 188 IWL_ERR(mld, 189 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 190 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), 191 iwl_read_umac_prph(trans, 192 UMAG_SB_CPU_2_STATUS)); 193 #define IWL_FW_PRINT_REG_INFO(reg_name) \ 194 IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) 195 196 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); 197 198 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); 199 200 /* print OTP info */ 201 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); 202 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); 203 #undef IWL_FW_PRINT_REG_INFO 204 205 pc_data = trans->dbg.pc_data; 206 for (count = 0; count < trans->dbg.num_pc; count++, pc_data++) 207 IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name, 208 pc_data->pc_address); 209 } 210 211 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) 212 { 213 const struct fw_img *fw = 214 iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR); 215 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; 216 struct iwl_notification_wait alive_wait; 217 bool alive_valid = false; 218 int ret; 219 220 lockdep_assert_wiphy(mld->wiphy); 221 222 iwl_init_notification_wait(&mld->notif_wait, &alive_wait, 223 alive_cmd, ARRAY_SIZE(alive_cmd), 224 iwl_alive_fn, &alive_valid); 225 226 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 227 228 ret = iwl_trans_start_fw(mld->trans, fw, true); 229 if (ret) { 230 iwl_remove_notification(&mld->notif_wait, &alive_wait); 231 return ret; 232 } 233 234 ret = iwl_wait_notification(&mld->notif_wait, &alive_wait, 235 MLD_ALIVE_TIMEOUT); 236 237 if (ret) { 238 if (ret == -ETIMEDOUT) 239 iwl_fw_dbg_error_collect(&mld->fwrt, 240 FW_DBG_TRIGGER_ALIVE_TIMEOUT); 241 iwl_mld_print_alive_notif_timeout(mld); 242 goto alive_failure; 243 } 244 245 if (!alive_valid) { 246 IWL_ERR(mld, "Loaded firmware is not valid!\n"); 247 ret = -EIO; 248 goto alive_failure; 249 } 250 251 iwl_trans_fw_alive(mld->trans, 0); 252 253 return 0; 254 255 alive_failure: 256 iwl_trans_stop_device(mld->trans); 257 return ret; 258 } 259 260 int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) 261 { 262 struct iwl_notification_wait init_wait; 263 struct iwl_init_extended_cfg_cmd init_cfg = {}; 264 static const u16 init_complete[] = { 265 INIT_COMPLETE_NOTIF, 266 }; 267 int ret; 268 269 lockdep_assert_wiphy(mld->wiphy); 270 271 ret = iwl_mld_load_fw_wait_alive(mld); 272 if (ret) 273 return ret; 274 275 mld->trans->step_urm = 276 !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) & 277 CNVI_PMU_STEP_FLOW_FORCE_URM); 278 279 ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, 280 &mld->fw->ucode_capa); 281 if (ret) { 282 IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); 283 goto init_failure; 284 } 285 286 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 287 NULL); 288 289 iwl_init_notification_wait(&mld->notif_wait, 290 &init_wait, 291 init_complete, 292 ARRAY_SIZE(init_complete), 293 NULL, NULL); 294 295 ret = iwl_mld_send_cmd_pdu(mld, 296 WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 297 &init_cfg); 298 if (ret) { 299 IWL_ERR(mld, "Failed to send init config command: %d\n", ret); 300 iwl_remove_notification(&mld->notif_wait, &init_wait); 301 goto init_failure; 302 } 303 304 ret = iwl_wait_notification(&mld->notif_wait, &init_wait, 305 MLD_INIT_COMPLETE_TIMEOUT); 306 if (ret) { 307 IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret); 308 goto init_failure; 309 } 310 311 if (!mld->nvm_data) { 312 mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0); 313 if (IS_ERR(mld->nvm_data)) { 314 ret = PTR_ERR(mld->nvm_data); 315 mld->nvm_data = NULL; 316 IWL_ERR(mld, "Failed to read NVM: %d\n", ret); 317 goto init_failure; 318 } 319 } 320 321 return 0; 322 323 init_failure: 324 iwl_trans_stop_device(mld->trans); 325 return ret; 326 } 327 328 int iwl_mld_load_fw(struct iwl_mld *mld) 329 { 330 int ret; 331 332 lockdep_assert_wiphy(mld->wiphy); 333 334 ret = iwl_trans_start_hw(mld->trans); 335 if (ret) 336 return ret; 337 338 ret = iwl_mld_run_fw_init_sequence(mld); 339 if (ret) 340 return ret; 341 342 ret = iwl_mld_init_mcc(mld); 343 if (ret) 344 return ret; 345 346 mld->fw_status.running = true; 347 348 return 0; 349 } 350 351 void iwl_mld_stop_fw(struct iwl_mld *mld) 352 { 353 lockdep_assert_wiphy(mld->wiphy); 354 355 iwl_abort_notification_waits(&mld->notif_wait); 356 357 iwl_fw_dbg_stop_sync(&mld->fwrt); 358 359 iwl_trans_stop_device(mld->trans); 360 361 mld->fw_status.running = false; 362 } 363 364 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac, 365 struct ieee80211_vif *vif) 366 { 367 if (vif->type == NL80211_IFTYPE_STATION) 368 ieee80211_hw_restart_disconnect(vif); 369 } 370 371 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags) 372 { 373 u32 error_log_size = mld->fw->ucode_capa.error_log_size; 374 struct iwl_fw_error_recovery_cmd recovery_cmd = { 375 .flags = cpu_to_le32(flags), 376 }; 377 struct iwl_host_cmd cmd = { 378 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), 379 .flags = CMD_WANT_SKB, 380 .data = {&recovery_cmd, }, 381 .len = {sizeof(recovery_cmd), }, 382 }; 383 int ret; 384 385 /* no error log was defined in TLV */ 386 if (!error_log_size) 387 return; 388 389 if (flags & ERROR_RECOVERY_UPDATE_DB) { 390 /* no buf was allocated upon NIC error */ 391 if (!mld->error_recovery_buf) 392 return; 393 394 cmd.data[1] = mld->error_recovery_buf; 395 cmd.len[1] = error_log_size; 396 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 397 recovery_cmd.buf_size = cpu_to_le32(error_log_size); 398 } 399 400 ret = iwl_mld_send_cmd(mld, &cmd); 401 402 /* we no longer need the recovery buffer */ 403 kfree(mld->error_recovery_buf); 404 mld->error_recovery_buf = NULL; 405 406 if (ret) { 407 IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret); 408 return; 409 } 410 411 if (flags & ERROR_RECOVERY_UPDATE_DB) { 412 struct iwl_rx_packet *pkt = cmd.resp_pkt; 413 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 414 u32 resp; 415 416 if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp), 417 "Unexpected recovery cmd response size %u (expected %zu)\n", 418 pkt_len, sizeof(resp))) 419 goto out; 420 421 resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data); 422 if (!resp) 423 goto out; 424 425 IWL_ERR(mld, 426 "Failed to send recovery cmd blob was invalid %d\n", 427 resp); 428 429 ieee80211_iterate_interfaces(mld->hw, 0, 430 iwl_mld_restart_disconnect_iter, 431 NULL); 432 } 433 434 out: 435 iwl_free_resp(&cmd); 436 } 437 438 static int iwl_mld_config_fw(struct iwl_mld *mld) 439 { 440 int ret; 441 442 lockdep_assert_wiphy(mld->wiphy); 443 444 iwl_fw_disable_dbg_asserts(&mld->fwrt); 445 iwl_get_shared_mem_conf(&mld->fwrt); 446 447 ret = iwl_mld_send_tx_ant_cfg(mld); 448 if (ret) 449 return ret; 450 451 ret = iwl_mld_send_bt_init_conf(mld); 452 if (ret) 453 return ret; 454 455 ret = iwl_set_soc_latency(&mld->fwrt); 456 if (ret) 457 return ret; 458 459 iwl_mld_configure_lari(mld); 460 461 ret = iwl_mld_config_temp_report_ths(mld); 462 if (ret) 463 return ret; 464 465 #ifdef CONFIG_THERMAL 466 ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, 467 CTDP_CMD_OPERATION_START); 468 if (ret) 469 return ret; 470 #endif 471 472 ret = iwl_configure_rxq(&mld->fwrt); 473 if (ret) 474 return ret; 475 476 ret = iwl_mld_send_rss_cfg_cmd(mld); 477 if (ret) 478 return ret; 479 480 ret = iwl_mld_config_scan(mld); 481 if (ret) 482 return ret; 483 484 ret = iwl_mld_update_device_power(mld, false); 485 if (ret) 486 return ret; 487 488 if (mld->fw_status.in_hw_restart) { 489 iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB); 490 iwl_mld_time_sync_fw_config(mld); 491 } 492 493 iwl_mld_led_config_fw(mld); 494 495 ret = iwl_mld_init_ppag(mld); 496 if (ret) 497 return ret; 498 499 ret = iwl_mld_init_sar(mld); 500 if (ret) 501 return ret; 502 503 ret = iwl_mld_init_sgom(mld); 504 if (ret) 505 return ret; 506 507 iwl_mld_init_tas(mld); 508 iwl_mld_init_uats(mld); 509 510 return 0; 511 } 512 513 int iwl_mld_start_fw(struct iwl_mld *mld) 514 { 515 int ret; 516 517 lockdep_assert_wiphy(mld->wiphy); 518 519 ret = iwl_mld_load_fw(mld); 520 if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) { 521 iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER); 522 goto error; 523 } 524 525 IWL_DEBUG_INFO(mld, "uCode started.\n"); 526 527 ret = iwl_mld_config_fw(mld); 528 if (ret) 529 goto error; 530 531 return 0; 532 533 error: 534 iwl_mld_stop_fw(mld); 535 return ret; 536 } 537