1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 6 #include "mld.h" 7 8 #include "fw/api/alive.h" 9 #include "fw/api/scan.h" 10 #include "fw/api/rx.h" 11 #include "phy.h" 12 #include "fw/dbg.h" 13 #include "fw/pnvm.h" 14 #include "hcmd.h" 15 #include "power.h" 16 #include "mcc.h" 17 #include "led.h" 18 #include "coex.h" 19 #include "regulatory.h" 20 #include "thermal.h" 21 22 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld) 23 { 24 struct iwl_tx_ant_cfg_cmd cmd; 25 26 lockdep_assert_wiphy(mld->wiphy); 27 28 cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)); 29 30 IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid); 31 32 return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd); 33 } 34 35 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) 36 { 37 struct iwl_rss_config_cmd cmd = { 38 .flags = cpu_to_le32(IWL_RSS_ENABLE), 39 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | 40 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | 41 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | 42 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | 43 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | 44 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), 45 }; 46 47 lockdep_assert_wiphy(mld->wiphy); 48 49 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 50 for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 51 cmd.indirection_table[i] = 52 1 + (i % (mld->trans->info.num_rxqs - 1)); 53 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 54 55 return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); 56 } 57 58 static int iwl_mld_config_scan(struct iwl_mld *mld) 59 { 60 struct iwl_scan_config cmd = { 61 .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)), 62 .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld)) 63 }; 64 65 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD), 66 &cmd); 67 } 68 69 static void iwl_mld_alive_imr_data(struct iwl_trans *trans, 70 const struct iwl_imr_alive_info *imr_info) 71 { 72 struct iwl_imr_data *imr_data = &trans->dbg.imr_data; 73 74 imr_data->imr_enable = le32_to_cpu(imr_info->enabled); 75 imr_data->imr_size = le32_to_cpu(imr_info->size); 76 imr_data->imr2sram_remainbyte = imr_data->imr_size; 77 imr_data->imr_base_addr = imr_info->base_addr; 78 imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr); 79 80 if (imr_data->imr_enable) 81 return; 82 83 for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 84 struct iwl_fw_ini_region_tlv *reg; 85 86 if (!trans->dbg.active_regions[i]) 87 continue; 88 89 reg = (void *)trans->dbg.active_regions[i]->data; 90 91 /* We have only one DRAM IMR region, so we 92 * can break as soon as we find the first 93 * one. 94 */ 95 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { 96 trans->dbg.unsupported_region_msk |= BIT(i); 97 break; 98 } 99 } 100 } 101 102 struct iwl_mld_alive_data { 103 __le32 sku_id[3]; 104 bool valid; 105 }; 106 107 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 108 struct iwl_rx_packet *pkt, void *data) 109 { 110 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 111 unsigned int expected_sz; 112 struct iwl_mld *mld = 113 container_of(notif_wait, struct iwl_mld, notif_wait); 114 struct iwl_trans *trans = mld->trans; 115 u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, 116 UCODE_ALIVE_NTFY, 0); 117 struct iwl_mld_alive_data *alive_data = data; 118 struct iwl_alive_ntf *palive; 119 struct iwl_umac_alive *umac; 120 struct iwl_lmac_alive *lmac1; 121 struct iwl_lmac_alive *lmac2 = NULL; 122 u32 lmac_error_event_table; 123 u32 umac_error_table; 124 u16 status; 125 126 switch (version) { 127 case 7: 128 expected_sz = sizeof(struct iwl_alive_ntf_v7); 129 break; 130 case 8: 131 expected_sz = sizeof(struct iwl_alive_ntf); 132 break; 133 default: 134 return false; 135 } 136 137 if (pkt_len != expected_sz) 138 return false; 139 140 palive = (void *)pkt->data; 141 142 iwl_mld_alive_imr_data(trans, &palive->imr); 143 144 umac = &palive->umac_data; 145 lmac1 = &palive->lmac_data[0]; 146 lmac2 = &palive->lmac_data[1]; 147 status = le16_to_cpu(palive->status); 148 149 BUILD_BUG_ON(sizeof(alive_data->sku_id) != 150 sizeof(palive->sku_id.data)); 151 memcpy(alive_data->sku_id, palive->sku_id.data, 152 sizeof(palive->sku_id.data)); 153 154 IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", 155 le32_to_cpu(alive_data->sku_id[0]), 156 le32_to_cpu(alive_data->sku_id[1]), 157 le32_to_cpu(alive_data->sku_id[2])); 158 159 lmac_error_event_table = 160 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); 161 iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table); 162 163 if (lmac2) 164 trans->dbg.lmac_error_event_table[1] = 165 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); 166 167 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & 168 ~FW_ADDR_CACHE_CONTROL; 169 170 iwl_fw_umac_set_alive_err_table(trans, umac_error_table); 171 172 alive_data->valid = status == IWL_ALIVE_STATUS_OK; 173 174 IWL_DEBUG_FW(mld, 175 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 176 status, lmac1->ver_type, lmac1->ver_subtype); 177 178 if (lmac2) 179 IWL_DEBUG_FW(mld, "Alive ucode CDB\n"); 180 181 IWL_DEBUG_FW(mld, 182 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 183 le32_to_cpu(umac->umac_major), 184 le32_to_cpu(umac->umac_minor)); 185 186 IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", 187 le16_to_cpu(palive->flags)); 188 189 if (version >= 8) 190 IWL_DEBUG_FW(mld, "platform_id 0x%llx\n", 191 le64_to_cpu(palive->platform_id)); 192 193 iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); 194 195 return true; 196 } 197 198 #define MLD_ALIVE_TIMEOUT (2 * HZ) 199 #define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ) 200 201 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) 202 { 203 struct iwl_trans *trans = mld->trans; 204 struct iwl_pc_data *pc_data; 205 u8 count; 206 207 IWL_ERR(mld, 208 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 209 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), 210 iwl_read_umac_prph(trans, 211 UMAG_SB_CPU_2_STATUS)); 212 #define IWL_FW_PRINT_REG_INFO(reg_name) \ 213 IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) 214 215 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); 216 217 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); 218 219 /* print OTP info */ 220 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); 221 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); 222 #undef IWL_FW_PRINT_REG_INFO 223 224 pc_data = trans->dbg.pc_data; 225 for (count = 0; count < trans->dbg.num_pc; count++, pc_data++) 226 IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name, 227 pc_data->pc_address); 228 } 229 230 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld, 231 struct iwl_mld_alive_data *alive_data) 232 { 233 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; 234 struct iwl_notification_wait alive_wait; 235 int ret; 236 237 lockdep_assert_wiphy(mld->wiphy); 238 239 iwl_init_notification_wait(&mld->notif_wait, &alive_wait, 240 alive_cmd, ARRAY_SIZE(alive_cmd), 241 iwl_alive_fn, alive_data); 242 243 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 244 245 ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true); 246 if (ret) { 247 iwl_remove_notification(&mld->notif_wait, &alive_wait); 248 return ret; 249 } 250 251 ret = iwl_wait_notification(&mld->notif_wait, &alive_wait, 252 MLD_ALIVE_TIMEOUT); 253 254 if (ret) { 255 if (ret == -ETIMEDOUT) 256 iwl_fw_dbg_error_collect(&mld->fwrt, 257 FW_DBG_TRIGGER_ALIVE_TIMEOUT); 258 iwl_mld_print_alive_notif_timeout(mld); 259 return ret; 260 } 261 262 if (!alive_data->valid) { 263 IWL_ERR(mld, "Loaded firmware is not valid!\n"); 264 return -EIO; 265 } 266 267 iwl_trans_fw_alive(mld->trans); 268 269 return 0; 270 } 271 272 static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) 273 { 274 struct iwl_notification_wait init_wait; 275 struct iwl_init_extended_cfg_cmd init_cfg = { 276 .init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)), 277 }; 278 struct iwl_mld_alive_data alive_data = {}; 279 static const u16 init_complete[] = { 280 INIT_COMPLETE_NOTIF, 281 }; 282 int ret; 283 284 lockdep_assert_wiphy(mld->wiphy); 285 286 ret = iwl_mld_load_fw_wait_alive(mld, &alive_data); 287 if (ret) 288 return ret; 289 290 ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, 291 mld->fw, alive_data.sku_id); 292 if (ret) { 293 IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); 294 return ret; 295 } 296 297 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 298 NULL); 299 300 iwl_init_notification_wait(&mld->notif_wait, 301 &init_wait, 302 init_complete, 303 ARRAY_SIZE(init_complete), 304 NULL, NULL); 305 306 ret = iwl_mld_send_cmd_pdu(mld, 307 WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 308 &init_cfg); 309 if (ret) { 310 IWL_ERR(mld, "Failed to send init config command: %d\n", ret); 311 iwl_remove_notification(&mld->notif_wait, &init_wait); 312 return ret; 313 } 314 315 ret = iwl_mld_send_phy_cfg_cmd(mld); 316 if (ret) { 317 IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret); 318 iwl_remove_notification(&mld->notif_wait, &init_wait); 319 return ret; 320 } 321 322 ret = iwl_wait_notification(&mld->notif_wait, &init_wait, 323 MLD_INIT_COMPLETE_TIMEOUT); 324 if (ret) { 325 IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret); 326 return ret; 327 } 328 329 return 0; 330 } 331 332 int iwl_mld_load_fw(struct iwl_mld *mld) 333 { 334 int ret; 335 336 lockdep_assert_wiphy(mld->wiphy); 337 338 ret = iwl_trans_start_hw(mld->trans); 339 if (ret) 340 return ret; 341 342 ret = iwl_mld_run_fw_init_sequence(mld); 343 if (ret) 344 goto err; 345 346 mld->fw_status.running = true; 347 348 return 0; 349 err: 350 iwl_mld_stop_fw(mld); 351 return ret; 352 } 353 354 void iwl_mld_stop_fw(struct iwl_mld *mld) 355 { 356 lockdep_assert_wiphy(mld->wiphy); 357 358 iwl_abort_notification_waits(&mld->notif_wait); 359 360 iwl_fw_dbg_stop_sync(&mld->fwrt); 361 362 iwl_trans_stop_device(mld->trans); 363 364 /* HW is stopped, no more coming RX. Cancel all notifications in 365 * case they were sent just before stopping the HW. 366 */ 367 iwl_mld_cancel_async_notifications(mld); 368 369 mld->fw_status.running = false; 370 } 371 372 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac, 373 struct ieee80211_vif *vif) 374 { 375 if (vif->type == NL80211_IFTYPE_STATION) 376 ieee80211_hw_restart_disconnect(vif); 377 } 378 379 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags) 380 { 381 u32 error_log_size = mld->fw->ucode_capa.error_log_size; 382 struct iwl_fw_error_recovery_cmd recovery_cmd = { 383 .flags = cpu_to_le32(flags), 384 }; 385 struct iwl_host_cmd cmd = { 386 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), 387 .flags = CMD_WANT_SKB, 388 .data = {&recovery_cmd, }, 389 .len = {sizeof(recovery_cmd), }, 390 }; 391 int ret; 392 393 /* no error log was defined in TLV */ 394 if (!error_log_size) 395 return; 396 397 if (flags & ERROR_RECOVERY_UPDATE_DB) { 398 /* no buf was allocated upon NIC error */ 399 if (!mld->error_recovery_buf) 400 return; 401 402 cmd.data[1] = mld->error_recovery_buf; 403 cmd.len[1] = error_log_size; 404 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 405 recovery_cmd.buf_size = cpu_to_le32(error_log_size); 406 } 407 408 ret = iwl_mld_send_cmd(mld, &cmd); 409 410 /* we no longer need the recovery buffer */ 411 kfree(mld->error_recovery_buf); 412 mld->error_recovery_buf = NULL; 413 414 if (ret) { 415 IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret); 416 return; 417 } 418 419 if (flags & ERROR_RECOVERY_UPDATE_DB) { 420 struct iwl_rx_packet *pkt = cmd.resp_pkt; 421 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 422 u32 resp; 423 424 if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp), 425 "Unexpected recovery cmd response size %u (expected %zu)\n", 426 pkt_len, sizeof(resp))) 427 goto out; 428 429 resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data); 430 if (!resp) 431 goto out; 432 433 IWL_ERR(mld, 434 "Failed to send recovery cmd blob was invalid %d\n", 435 resp); 436 437 ieee80211_iterate_interfaces(mld->hw, 0, 438 iwl_mld_restart_disconnect_iter, 439 NULL); 440 } 441 442 out: 443 iwl_free_resp(&cmd); 444 } 445 446 static int iwl_mld_config_fw(struct iwl_mld *mld) 447 { 448 int ret; 449 450 lockdep_assert_wiphy(mld->wiphy); 451 452 iwl_fw_disable_dbg_asserts(&mld->fwrt); 453 iwl_get_shared_mem_conf(&mld->fwrt); 454 455 ret = iwl_mld_send_tx_ant_cfg(mld); 456 if (ret) 457 return ret; 458 459 ret = iwl_mld_send_bt_init_conf(mld); 460 if (ret) 461 return ret; 462 463 ret = iwl_set_soc_latency(&mld->fwrt); 464 if (ret) 465 return ret; 466 467 iwl_mld_configure_lari(mld); 468 469 ret = iwl_mld_config_temp_report_ths(mld); 470 if (ret) 471 return ret; 472 473 #ifdef CONFIG_THERMAL 474 ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, 475 CTDP_CMD_OPERATION_START); 476 if (ret) 477 return ret; 478 #endif 479 480 ret = iwl_configure_rxq(&mld->fwrt); 481 if (ret) 482 return ret; 483 484 ret = iwl_mld_send_rss_cfg_cmd(mld); 485 if (ret) 486 return ret; 487 488 ret = iwl_mld_config_scan(mld); 489 if (ret) 490 return ret; 491 492 ret = iwl_mld_update_device_power(mld, false); 493 if (ret) 494 return ret; 495 496 if (mld->fw_status.in_hw_restart) { 497 iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB); 498 iwl_mld_time_sync_fw_config(mld); 499 } 500 501 iwl_mld_led_config_fw(mld); 502 503 ret = iwl_mld_init_ppag(mld); 504 if (ret) 505 return ret; 506 507 ret = iwl_mld_init_sar(mld); 508 if (ret) 509 return ret; 510 511 ret = iwl_mld_init_sgom(mld); 512 if (ret) 513 return ret; 514 515 iwl_mld_init_tas(mld); 516 iwl_mld_init_uats(mld); 517 518 return 0; 519 } 520 521 int iwl_mld_start_fw(struct iwl_mld *mld) 522 { 523 int ret; 524 525 lockdep_assert_wiphy(mld->wiphy); 526 527 ret = iwl_mld_load_fw(mld); 528 if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) { 529 iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER); 530 return ret; 531 } 532 533 IWL_DEBUG_INFO(mld, "uCode started.\n"); 534 535 ret = iwl_mld_config_fw(mld); 536 if (ret) 537 goto error; 538 539 ret = iwl_mld_init_mcc(mld); 540 if (ret) 541 goto error; 542 543 return 0; 544 545 error: 546 iwl_mld_stop_fw(mld); 547 return ret; 548 } 549