1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 6 #include "mld.h" 7 8 #include "fw/api/alive.h" 9 #include "fw/api/scan.h" 10 #include "fw/api/rx.h" 11 #include "phy.h" 12 #include "fw/dbg.h" 13 #include "fw/pnvm.h" 14 #include "hcmd.h" 15 #include "power.h" 16 #include "mcc.h" 17 #include "led.h" 18 #include "coex.h" 19 #include "regulatory.h" 20 #include "thermal.h" 21 22 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld) 23 { 24 struct iwl_tx_ant_cfg_cmd cmd; 25 26 lockdep_assert_wiphy(mld->wiphy); 27 28 cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)); 29 30 IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid); 31 32 return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd); 33 } 34 35 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) 36 { 37 struct iwl_rss_config_cmd cmd = { 38 .flags = cpu_to_le32(IWL_RSS_ENABLE), 39 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | 40 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | 41 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | 42 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | 43 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | 44 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), 45 }; 46 47 lockdep_assert_wiphy(mld->wiphy); 48 49 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 50 for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 51 cmd.indirection_table[i] = 52 1 + (i % (mld->trans->num_rx_queues - 1)); 53 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 54 55 return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); 56 } 57 58 static int iwl_mld_config_scan(struct iwl_mld *mld) 59 { 60 struct iwl_scan_config cmd = { 61 .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)), 62 .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld)) 63 }; 64 65 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD), 66 &cmd); 67 } 68 69 static void iwl_mld_alive_imr_data(struct iwl_trans *trans, 70 const struct iwl_imr_alive_info *imr_info) 71 { 72 struct iwl_imr_data *imr_data = &trans->dbg.imr_data; 73 74 imr_data->imr_enable = le32_to_cpu(imr_info->enabled); 75 imr_data->imr_size = le32_to_cpu(imr_info->size); 76 imr_data->imr2sram_remainbyte = imr_data->imr_size; 77 imr_data->imr_base_addr = imr_info->base_addr; 78 imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr); 79 80 if (imr_data->imr_enable) 81 return; 82 83 for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 84 struct iwl_fw_ini_region_tlv *reg; 85 86 if (!trans->dbg.active_regions[i]) 87 continue; 88 89 reg = (void *)trans->dbg.active_regions[i]->data; 90 91 /* We have only one DRAM IMR region, so we 92 * can break as soon as we find the first 93 * one. 94 */ 95 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { 96 trans->dbg.unsupported_region_msk |= BIT(i); 97 break; 98 } 99 } 100 } 101 102 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 103 struct iwl_rx_packet *pkt, void *data) 104 { 105 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 106 unsigned int expected_sz; 107 struct iwl_mld *mld = 108 container_of(notif_wait, struct iwl_mld, notif_wait); 109 struct iwl_trans *trans = mld->trans; 110 u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, 111 UCODE_ALIVE_NTFY, 0); 112 struct iwl_alive_ntf *palive; 113 bool *alive_valid = data; 114 struct iwl_umac_alive *umac; 115 struct iwl_lmac_alive *lmac1; 116 struct iwl_lmac_alive *lmac2 = NULL; 117 u32 lmac_error_event_table; 118 u32 umac_error_table; 119 u16 status; 120 121 switch (version) { 122 case 6: 123 case 7: 124 expected_sz = sizeof(struct iwl_alive_ntf_v6); 125 break; 126 case 8: 127 expected_sz = sizeof(struct iwl_alive_ntf); 128 break; 129 default: 130 return false; 131 } 132 133 if (pkt_len != expected_sz) 134 return false; 135 136 palive = (void *)pkt->data; 137 138 iwl_mld_alive_imr_data(trans, &palive->imr); 139 140 umac = &palive->umac_data; 141 lmac1 = &palive->lmac_data[0]; 142 lmac2 = &palive->lmac_data[1]; 143 status = le16_to_cpu(palive->status); 144 145 trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); 146 trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); 147 trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); 148 149 IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", 150 trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]); 151 152 lmac_error_event_table = 153 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); 154 iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table); 155 156 if (lmac2) 157 trans->dbg.lmac_error_event_table[1] = 158 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); 159 160 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & 161 ~FW_ADDR_CACHE_CONTROL; 162 163 if (umac_error_table >= trans->cfg->min_umac_error_event_table) 164 iwl_fw_umac_set_alive_err_table(trans, umac_error_table); 165 else 166 IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", 167 umac_error_table); 168 169 *alive_valid = status == IWL_ALIVE_STATUS_OK; 170 171 IWL_DEBUG_FW(mld, 172 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 173 status, lmac1->ver_type, lmac1->ver_subtype); 174 175 if (lmac2) 176 IWL_DEBUG_FW(mld, "Alive ucode CDB\n"); 177 178 IWL_DEBUG_FW(mld, 179 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 180 le32_to_cpu(umac->umac_major), 181 le32_to_cpu(umac->umac_minor)); 182 183 if (version >= 7) 184 IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", 185 le16_to_cpu(palive->flags)); 186 187 if (version >= 8) 188 IWL_DEBUG_FW(mld, "platform_id 0x%llx\n", 189 le64_to_cpu(palive->platform_id)); 190 191 iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); 192 193 return true; 194 } 195 196 #define MLD_ALIVE_TIMEOUT (2 * HZ) 197 #define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ) 198 199 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) 200 { 201 struct iwl_trans *trans = mld->trans; 202 struct iwl_pc_data *pc_data; 203 u8 count; 204 205 IWL_ERR(mld, 206 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 207 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), 208 iwl_read_umac_prph(trans, 209 UMAG_SB_CPU_2_STATUS)); 210 #define IWL_FW_PRINT_REG_INFO(reg_name) \ 211 IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) 212 213 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); 214 215 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); 216 217 /* print OTP info */ 218 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); 219 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); 220 #undef IWL_FW_PRINT_REG_INFO 221 222 pc_data = trans->dbg.pc_data; 223 for (count = 0; count < trans->dbg.num_pc; count++, pc_data++) 224 IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name, 225 pc_data->pc_address); 226 } 227 228 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) 229 { 230 const struct fw_img *fw = 231 iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR); 232 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; 233 struct iwl_notification_wait alive_wait; 234 bool alive_valid = false; 235 int ret; 236 237 lockdep_assert_wiphy(mld->wiphy); 238 239 iwl_init_notification_wait(&mld->notif_wait, &alive_wait, 240 alive_cmd, ARRAY_SIZE(alive_cmd), 241 iwl_alive_fn, &alive_valid); 242 243 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 244 245 ret = iwl_trans_start_fw(mld->trans, fw, true); 246 if (ret) { 247 iwl_remove_notification(&mld->notif_wait, &alive_wait); 248 return ret; 249 } 250 251 ret = iwl_wait_notification(&mld->notif_wait, &alive_wait, 252 MLD_ALIVE_TIMEOUT); 253 254 if (ret) { 255 if (ret == -ETIMEDOUT) 256 iwl_fw_dbg_error_collect(&mld->fwrt, 257 FW_DBG_TRIGGER_ALIVE_TIMEOUT); 258 iwl_mld_print_alive_notif_timeout(mld); 259 return ret; 260 } 261 262 if (!alive_valid) { 263 IWL_ERR(mld, "Loaded firmware is not valid!\n"); 264 return -EIO; 265 } 266 267 iwl_trans_fw_alive(mld->trans, 0); 268 269 return 0; 270 } 271 272 static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) 273 { 274 struct iwl_notification_wait init_wait; 275 struct iwl_init_extended_cfg_cmd init_cfg = { 276 .init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)), 277 }; 278 static const u16 init_complete[] = { 279 INIT_COMPLETE_NOTIF, 280 }; 281 int ret; 282 283 lockdep_assert_wiphy(mld->wiphy); 284 285 ret = iwl_mld_load_fw_wait_alive(mld); 286 if (ret) 287 return ret; 288 289 ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, 290 &mld->fw->ucode_capa); 291 if (ret) { 292 IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); 293 return ret; 294 } 295 296 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 297 NULL); 298 299 iwl_init_notification_wait(&mld->notif_wait, 300 &init_wait, 301 init_complete, 302 ARRAY_SIZE(init_complete), 303 NULL, NULL); 304 305 ret = iwl_mld_send_cmd_pdu(mld, 306 WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 307 &init_cfg); 308 if (ret) { 309 IWL_ERR(mld, "Failed to send init config command: %d\n", ret); 310 iwl_remove_notification(&mld->notif_wait, &init_wait); 311 return ret; 312 } 313 314 ret = iwl_mld_send_phy_cfg_cmd(mld); 315 if (ret) { 316 IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret); 317 iwl_remove_notification(&mld->notif_wait, &init_wait); 318 return ret; 319 } 320 321 ret = iwl_wait_notification(&mld->notif_wait, &init_wait, 322 MLD_INIT_COMPLETE_TIMEOUT); 323 if (ret) { 324 IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret); 325 return ret; 326 } 327 328 return 0; 329 } 330 331 int iwl_mld_load_fw(struct iwl_mld *mld) 332 { 333 int ret; 334 335 lockdep_assert_wiphy(mld->wiphy); 336 337 ret = iwl_trans_start_hw(mld->trans); 338 if (ret) 339 return ret; 340 341 ret = iwl_mld_run_fw_init_sequence(mld); 342 if (ret) 343 goto err; 344 345 ret = iwl_mld_init_mcc(mld); 346 if (ret) 347 goto err; 348 349 mld->fw_status.running = true; 350 351 return 0; 352 err: 353 iwl_mld_stop_fw(mld); 354 return ret; 355 } 356 357 void iwl_mld_stop_fw(struct iwl_mld *mld) 358 { 359 lockdep_assert_wiphy(mld->wiphy); 360 361 iwl_abort_notification_waits(&mld->notif_wait); 362 363 iwl_fw_dbg_stop_sync(&mld->fwrt); 364 365 iwl_trans_stop_device(mld->trans); 366 367 /* HW is stopped, no more coming RX. Cancel all notifications in 368 * case they were sent just before stopping the HW. 369 */ 370 iwl_mld_cancel_async_notifications(mld); 371 372 mld->fw_status.running = false; 373 } 374 375 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac, 376 struct ieee80211_vif *vif) 377 { 378 if (vif->type == NL80211_IFTYPE_STATION) 379 ieee80211_hw_restart_disconnect(vif); 380 } 381 382 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags) 383 { 384 u32 error_log_size = mld->fw->ucode_capa.error_log_size; 385 struct iwl_fw_error_recovery_cmd recovery_cmd = { 386 .flags = cpu_to_le32(flags), 387 }; 388 struct iwl_host_cmd cmd = { 389 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), 390 .flags = CMD_WANT_SKB, 391 .data = {&recovery_cmd, }, 392 .len = {sizeof(recovery_cmd), }, 393 }; 394 int ret; 395 396 /* no error log was defined in TLV */ 397 if (!error_log_size) 398 return; 399 400 if (flags & ERROR_RECOVERY_UPDATE_DB) { 401 /* no buf was allocated upon NIC error */ 402 if (!mld->error_recovery_buf) 403 return; 404 405 cmd.data[1] = mld->error_recovery_buf; 406 cmd.len[1] = error_log_size; 407 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 408 recovery_cmd.buf_size = cpu_to_le32(error_log_size); 409 } 410 411 ret = iwl_mld_send_cmd(mld, &cmd); 412 413 /* we no longer need the recovery buffer */ 414 kfree(mld->error_recovery_buf); 415 mld->error_recovery_buf = NULL; 416 417 if (ret) { 418 IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret); 419 return; 420 } 421 422 if (flags & ERROR_RECOVERY_UPDATE_DB) { 423 struct iwl_rx_packet *pkt = cmd.resp_pkt; 424 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 425 u32 resp; 426 427 if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp), 428 "Unexpected recovery cmd response size %u (expected %zu)\n", 429 pkt_len, sizeof(resp))) 430 goto out; 431 432 resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data); 433 if (!resp) 434 goto out; 435 436 IWL_ERR(mld, 437 "Failed to send recovery cmd blob was invalid %d\n", 438 resp); 439 440 ieee80211_iterate_interfaces(mld->hw, 0, 441 iwl_mld_restart_disconnect_iter, 442 NULL); 443 } 444 445 out: 446 iwl_free_resp(&cmd); 447 } 448 449 static int iwl_mld_config_fw(struct iwl_mld *mld) 450 { 451 int ret; 452 453 lockdep_assert_wiphy(mld->wiphy); 454 455 iwl_fw_disable_dbg_asserts(&mld->fwrt); 456 iwl_get_shared_mem_conf(&mld->fwrt); 457 458 ret = iwl_mld_send_tx_ant_cfg(mld); 459 if (ret) 460 return ret; 461 462 ret = iwl_mld_send_bt_init_conf(mld); 463 if (ret) 464 return ret; 465 466 ret = iwl_set_soc_latency(&mld->fwrt); 467 if (ret) 468 return ret; 469 470 iwl_mld_configure_lari(mld); 471 472 ret = iwl_mld_config_temp_report_ths(mld); 473 if (ret) 474 return ret; 475 476 #ifdef CONFIG_THERMAL 477 ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, 478 CTDP_CMD_OPERATION_START); 479 if (ret) 480 return ret; 481 #endif 482 483 ret = iwl_configure_rxq(&mld->fwrt); 484 if (ret) 485 return ret; 486 487 ret = iwl_mld_send_rss_cfg_cmd(mld); 488 if (ret) 489 return ret; 490 491 ret = iwl_mld_config_scan(mld); 492 if (ret) 493 return ret; 494 495 ret = iwl_mld_update_device_power(mld, false); 496 if (ret) 497 return ret; 498 499 if (mld->fw_status.in_hw_restart) { 500 iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB); 501 iwl_mld_time_sync_fw_config(mld); 502 } 503 504 iwl_mld_led_config_fw(mld); 505 506 ret = iwl_mld_init_ppag(mld); 507 if (ret) 508 return ret; 509 510 ret = iwl_mld_init_sar(mld); 511 if (ret) 512 return ret; 513 514 ret = iwl_mld_init_sgom(mld); 515 if (ret) 516 return ret; 517 518 iwl_mld_init_tas(mld); 519 iwl_mld_init_uats(mld); 520 521 return 0; 522 } 523 524 int iwl_mld_start_fw(struct iwl_mld *mld) 525 { 526 int ret; 527 528 lockdep_assert_wiphy(mld->wiphy); 529 530 ret = iwl_mld_load_fw(mld); 531 if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) { 532 iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER); 533 return ret; 534 } 535 536 IWL_DEBUG_INFO(mld, "uCode started.\n"); 537 538 ret = iwl_mld_config_fw(mld); 539 if (ret) 540 goto error; 541 542 return 0; 543 544 error: 545 iwl_mld_stop_fw(mld); 546 return ret; 547 } 548