1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 6 #include "mld.h" 7 8 #include "fw/api/alive.h" 9 #include "fw/api/scan.h" 10 #include "fw/api/rx.h" 11 #include "fw/dbg.h" 12 #include "fw/pnvm.h" 13 #include "hcmd.h" 14 #include "power.h" 15 #include "mcc.h" 16 #include "led.h" 17 #include "coex.h" 18 #include "regulatory.h" 19 #include "thermal.h" 20 21 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld) 22 { 23 struct iwl_tx_ant_cfg_cmd cmd; 24 25 lockdep_assert_wiphy(mld->wiphy); 26 27 cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)); 28 29 IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid); 30 31 return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd); 32 } 33 34 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) 35 { 36 struct iwl_rss_config_cmd cmd = { 37 .flags = cpu_to_le32(IWL_RSS_ENABLE), 38 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | 39 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | 40 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | 41 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | 42 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | 43 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), 44 }; 45 46 lockdep_assert_wiphy(mld->wiphy); 47 48 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 49 for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 50 cmd.indirection_table[i] = 51 1 + (i % (mld->trans->num_rx_queues - 1)); 52 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 53 54 return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); 55 } 56 57 static int iwl_mld_config_scan(struct iwl_mld *mld) 58 { 59 struct iwl_scan_config cmd = { 60 .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)), 61 .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld)) 62 }; 63 64 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD), 65 &cmd); 66 } 67 68 static void iwl_mld_alive_imr_data(struct iwl_trans *trans, 69 const struct iwl_imr_alive_info *imr_info) 70 { 71 struct iwl_imr_data *imr_data = &trans->dbg.imr_data; 72 73 imr_data->imr_enable = le32_to_cpu(imr_info->enabled); 74 imr_data->imr_size = le32_to_cpu(imr_info->size); 75 imr_data->imr2sram_remainbyte = imr_data->imr_size; 76 imr_data->imr_base_addr = imr_info->base_addr; 77 imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr); 78 79 if (imr_data->imr_enable) 80 return; 81 82 for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 83 struct iwl_fw_ini_region_tlv *reg; 84 85 if (!trans->dbg.active_regions[i]) 86 continue; 87 88 reg = (void *)trans->dbg.active_regions[i]->data; 89 90 /* We have only one DRAM IMR region, so we 91 * can break as soon as we find the first 92 * one. 93 */ 94 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { 95 trans->dbg.unsupported_region_msk |= BIT(i); 96 break; 97 } 98 } 99 } 100 101 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 102 struct iwl_rx_packet *pkt, void *data) 103 { 104 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 105 struct iwl_mld *mld = 106 container_of(notif_wait, struct iwl_mld, notif_wait); 107 struct iwl_trans *trans = mld->trans; 108 u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, 109 UCODE_ALIVE_NTFY, 0); 110 struct iwl_alive_ntf_v6 *palive; 111 bool *alive_valid = data; 112 struct iwl_umac_alive *umac; 113 struct iwl_lmac_alive *lmac1; 114 struct iwl_lmac_alive *lmac2 = NULL; 115 u32 lmac_error_event_table; 116 u32 umac_error_table; 117 u16 status; 118 119 if (version < 6 || version > 7 || pkt_len != sizeof(*palive)) 120 return false; 121 122 palive = (void *)pkt->data; 123 124 iwl_mld_alive_imr_data(trans, &palive->imr); 125 126 umac = &palive->umac_data; 127 lmac1 = &palive->lmac_data[0]; 128 lmac2 = &palive->lmac_data[1]; 129 status = le16_to_cpu(palive->status); 130 131 trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); 132 trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); 133 trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); 134 135 IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", 136 trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]); 137 138 lmac_error_event_table = 139 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); 140 iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table); 141 142 if (lmac2) 143 trans->dbg.lmac_error_event_table[1] = 144 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); 145 146 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & 147 ~FW_ADDR_CACHE_CONTROL; 148 149 if (umac_error_table >= trans->cfg->min_umac_error_event_table) 150 iwl_fw_umac_set_alive_err_table(trans, umac_error_table); 151 else 152 IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", 153 umac_error_table); 154 155 *alive_valid = status == IWL_ALIVE_STATUS_OK; 156 157 IWL_DEBUG_FW(mld, 158 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 159 status, lmac1->ver_type, lmac1->ver_subtype); 160 161 if (lmac2) 162 IWL_DEBUG_FW(mld, "Alive ucode CDB\n"); 163 164 IWL_DEBUG_FW(mld, 165 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 166 le32_to_cpu(umac->umac_major), 167 le32_to_cpu(umac->umac_minor)); 168 169 if (version >= 7) 170 IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", 171 le16_to_cpu(palive->flags)); 172 173 iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); 174 175 return true; 176 } 177 178 #define MLD_ALIVE_TIMEOUT (2 * HZ) 179 #define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ) 180 181 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) 182 { 183 struct iwl_trans *trans = mld->trans; 184 struct iwl_pc_data *pc_data; 185 u8 count; 186 187 IWL_ERR(mld, 188 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 189 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), 190 iwl_read_umac_prph(trans, 191 UMAG_SB_CPU_2_STATUS)); 192 #define IWL_FW_PRINT_REG_INFO(reg_name) \ 193 IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) 194 195 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); 196 197 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); 198 199 /* print OTP info */ 200 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); 201 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); 202 #undef IWL_FW_PRINT_REG_INFO 203 204 pc_data = trans->dbg.pc_data; 205 for (count = 0; count < trans->dbg.num_pc; count++, pc_data++) 206 IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name, 207 pc_data->pc_address); 208 } 209 210 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) 211 { 212 const struct fw_img *fw = 213 iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR); 214 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; 215 struct iwl_notification_wait alive_wait; 216 bool alive_valid = false; 217 int ret; 218 219 lockdep_assert_wiphy(mld->wiphy); 220 221 iwl_init_notification_wait(&mld->notif_wait, &alive_wait, 222 alive_cmd, ARRAY_SIZE(alive_cmd), 223 iwl_alive_fn, &alive_valid); 224 225 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 226 227 ret = iwl_trans_start_fw(mld->trans, fw, true); 228 if (ret) { 229 iwl_remove_notification(&mld->notif_wait, &alive_wait); 230 return ret; 231 } 232 233 ret = iwl_wait_notification(&mld->notif_wait, &alive_wait, 234 MLD_ALIVE_TIMEOUT); 235 236 if (ret) { 237 if (ret == -ETIMEDOUT) 238 iwl_fw_dbg_error_collect(&mld->fwrt, 239 FW_DBG_TRIGGER_ALIVE_TIMEOUT); 240 iwl_mld_print_alive_notif_timeout(mld); 241 return ret; 242 } 243 244 if (!alive_valid) { 245 IWL_ERR(mld, "Loaded firmware is not valid!\n"); 246 return -EIO; 247 } 248 249 iwl_trans_fw_alive(mld->trans, 0); 250 251 return 0; 252 } 253 254 static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) 255 { 256 struct iwl_notification_wait init_wait; 257 struct iwl_init_extended_cfg_cmd init_cfg = {}; 258 static const u16 init_complete[] = { 259 INIT_COMPLETE_NOTIF, 260 }; 261 int ret; 262 263 lockdep_assert_wiphy(mld->wiphy); 264 265 ret = iwl_mld_load_fw_wait_alive(mld); 266 if (ret) 267 return ret; 268 269 ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, 270 &mld->fw->ucode_capa); 271 if (ret) { 272 IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); 273 return ret; 274 } 275 276 iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 277 NULL); 278 279 iwl_init_notification_wait(&mld->notif_wait, 280 &init_wait, 281 init_complete, 282 ARRAY_SIZE(init_complete), 283 NULL, NULL); 284 285 ret = iwl_mld_send_cmd_pdu(mld, 286 WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 287 &init_cfg); 288 if (ret) { 289 IWL_ERR(mld, "Failed to send init config command: %d\n", ret); 290 iwl_remove_notification(&mld->notif_wait, &init_wait); 291 return ret; 292 } 293 294 ret = iwl_wait_notification(&mld->notif_wait, &init_wait, 295 MLD_INIT_COMPLETE_TIMEOUT); 296 if (ret) { 297 IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret); 298 return ret; 299 } 300 301 return 0; 302 } 303 304 int iwl_mld_load_fw(struct iwl_mld *mld) 305 { 306 int ret; 307 308 lockdep_assert_wiphy(mld->wiphy); 309 310 ret = iwl_trans_start_hw(mld->trans); 311 if (ret) 312 return ret; 313 314 ret = iwl_mld_run_fw_init_sequence(mld); 315 if (ret) 316 goto err; 317 318 ret = iwl_mld_init_mcc(mld); 319 if (ret) 320 goto err; 321 322 mld->fw_status.running = true; 323 324 return 0; 325 err: 326 iwl_mld_stop_fw(mld); 327 return ret; 328 } 329 330 void iwl_mld_stop_fw(struct iwl_mld *mld) 331 { 332 lockdep_assert_wiphy(mld->wiphy); 333 334 iwl_abort_notification_waits(&mld->notif_wait); 335 336 iwl_fw_dbg_stop_sync(&mld->fwrt); 337 338 iwl_trans_stop_device(mld->trans); 339 340 /* HW is stopped, no more coming RX. Cancel all notifications in 341 * case they were sent just before stopping the HW. 342 */ 343 iwl_mld_cancel_async_notifications(mld); 344 345 mld->fw_status.running = false; 346 } 347 348 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac, 349 struct ieee80211_vif *vif) 350 { 351 if (vif->type == NL80211_IFTYPE_STATION) 352 ieee80211_hw_restart_disconnect(vif); 353 } 354 355 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags) 356 { 357 u32 error_log_size = mld->fw->ucode_capa.error_log_size; 358 struct iwl_fw_error_recovery_cmd recovery_cmd = { 359 .flags = cpu_to_le32(flags), 360 }; 361 struct iwl_host_cmd cmd = { 362 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), 363 .flags = CMD_WANT_SKB, 364 .data = {&recovery_cmd, }, 365 .len = {sizeof(recovery_cmd), }, 366 }; 367 int ret; 368 369 /* no error log was defined in TLV */ 370 if (!error_log_size) 371 return; 372 373 if (flags & ERROR_RECOVERY_UPDATE_DB) { 374 /* no buf was allocated upon NIC error */ 375 if (!mld->error_recovery_buf) 376 return; 377 378 cmd.data[1] = mld->error_recovery_buf; 379 cmd.len[1] = error_log_size; 380 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 381 recovery_cmd.buf_size = cpu_to_le32(error_log_size); 382 } 383 384 ret = iwl_mld_send_cmd(mld, &cmd); 385 386 /* we no longer need the recovery buffer */ 387 kfree(mld->error_recovery_buf); 388 mld->error_recovery_buf = NULL; 389 390 if (ret) { 391 IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret); 392 return; 393 } 394 395 if (flags & ERROR_RECOVERY_UPDATE_DB) { 396 struct iwl_rx_packet *pkt = cmd.resp_pkt; 397 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 398 u32 resp; 399 400 if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp), 401 "Unexpected recovery cmd response size %u (expected %zu)\n", 402 pkt_len, sizeof(resp))) 403 goto out; 404 405 resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data); 406 if (!resp) 407 goto out; 408 409 IWL_ERR(mld, 410 "Failed to send recovery cmd blob was invalid %d\n", 411 resp); 412 413 ieee80211_iterate_interfaces(mld->hw, 0, 414 iwl_mld_restart_disconnect_iter, 415 NULL); 416 } 417 418 out: 419 iwl_free_resp(&cmd); 420 } 421 422 static int iwl_mld_config_fw(struct iwl_mld *mld) 423 { 424 int ret; 425 426 lockdep_assert_wiphy(mld->wiphy); 427 428 iwl_fw_disable_dbg_asserts(&mld->fwrt); 429 iwl_get_shared_mem_conf(&mld->fwrt); 430 431 ret = iwl_mld_send_tx_ant_cfg(mld); 432 if (ret) 433 return ret; 434 435 ret = iwl_mld_send_bt_init_conf(mld); 436 if (ret) 437 return ret; 438 439 ret = iwl_set_soc_latency(&mld->fwrt); 440 if (ret) 441 return ret; 442 443 iwl_mld_configure_lari(mld); 444 445 ret = iwl_mld_config_temp_report_ths(mld); 446 if (ret) 447 return ret; 448 449 #ifdef CONFIG_THERMAL 450 ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, 451 CTDP_CMD_OPERATION_START); 452 if (ret) 453 return ret; 454 #endif 455 456 ret = iwl_configure_rxq(&mld->fwrt); 457 if (ret) 458 return ret; 459 460 ret = iwl_mld_send_rss_cfg_cmd(mld); 461 if (ret) 462 return ret; 463 464 ret = iwl_mld_config_scan(mld); 465 if (ret) 466 return ret; 467 468 ret = iwl_mld_update_device_power(mld, false); 469 if (ret) 470 return ret; 471 472 if (mld->fw_status.in_hw_restart) { 473 iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB); 474 iwl_mld_time_sync_fw_config(mld); 475 } 476 477 iwl_mld_led_config_fw(mld); 478 479 ret = iwl_mld_init_ppag(mld); 480 if (ret) 481 return ret; 482 483 ret = iwl_mld_init_sar(mld); 484 if (ret) 485 return ret; 486 487 ret = iwl_mld_init_sgom(mld); 488 if (ret) 489 return ret; 490 491 iwl_mld_init_tas(mld); 492 iwl_mld_init_uats(mld); 493 494 return 0; 495 } 496 497 int iwl_mld_start_fw(struct iwl_mld *mld) 498 { 499 int ret; 500 501 lockdep_assert_wiphy(mld->wiphy); 502 503 ret = iwl_mld_load_fw(mld); 504 if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) { 505 iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER); 506 return ret; 507 } 508 509 IWL_DEBUG_INFO(mld, "uCode started.\n"); 510 511 ret = iwl_mld_config_fw(mld); 512 if (ret) 513 goto error; 514 515 return 0; 516 517 error: 518 iwl_mld_stop_fw(mld); 519 return ret; 520 } 521