1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <net/mac80211.h> 8 #include <linux/netdevice.h> 9 #include <linux/dmi.h> 10 11 #include "iwl-trans.h" 12 #include "iwl-op-mode.h" 13 #include "fw/img.h" 14 #include "iwl-debug.h" 15 #include "iwl-prph.h" 16 #include "fw/acpi.h" 17 #include "fw/pnvm.h" 18 #include "fw/uefi.h" 19 #include "fw/regulatory.h" 20 21 #include "mvm.h" 22 #include "fw/dbg.h" 23 #include "iwl-phy-db.h" 24 #include "iwl-modparams.h" 25 #include "iwl-nvm-parse.h" 26 #include "time-sync.h" 27 28 #define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ) 29 #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) 30 31 #define IWL_UATS_VLP_AP_SUPPORTED BIT(29) 32 #define IWL_UATS_AFC_AP_SUPPORTED BIT(30) 33 34 struct iwl_mvm_alive_data { 35 bool valid; 36 u32 scd_base_addr; 37 }; 38 39 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) 40 { 41 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { 42 .valid = cpu_to_le32(valid_tx_ant), 43 }; 44 45 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 46 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, 47 sizeof(tx_ant_cmd), &tx_ant_cmd); 48 } 49 50 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) 51 { 52 int i; 53 struct iwl_rss_config_cmd cmd = { 54 .flags = cpu_to_le32(IWL_RSS_ENABLE), 55 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | 56 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | 57 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | 58 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | 59 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | 60 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), 61 }; 62 63 if (mvm->trans->num_rx_queues == 1) 64 return 0; 65 66 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 67 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 68 cmd.indirection_table[i] = 69 1 + (i % (mvm->trans->num_rx_queues - 1)); 70 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 71 72 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 73 } 74 75 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) 76 { 77 struct iwl_dqa_enable_cmd dqa_cmd = { 78 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), 79 }; 80 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD); 81 int ret; 82 83 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); 84 if (ret) 85 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); 86 else 87 IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); 88 89 return ret; 90 } 91 92 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, 93 struct iwl_rx_cmd_buffer *rxb) 94 { 95 struct iwl_rx_packet *pkt = rxb_addr(rxb); 96 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; 97 __le32 *dump_data = mfu_dump_notif->data; 98 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); 99 int i; 100 101 if (mfu_dump_notif->index_num == 0) 102 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", 103 le32_to_cpu(mfu_dump_notif->assert_id)); 104 105 for (i = 0; i < n_words; i++) 106 IWL_DEBUG_INFO(mvm, 107 "MFUART assert dump, dword %u: 0x%08x\n", 108 le16_to_cpu(mfu_dump_notif->index_num) * 109 n_words + i, 110 le32_to_cpu(dump_data[i])); 111 } 112 113 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 114 struct iwl_rx_packet *pkt, void *data) 115 { 116 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 117 struct iwl_mvm *mvm = 118 container_of(notif_wait, struct iwl_mvm, notif_wait); 119 struct iwl_mvm_alive_data *alive_data = data; 120 struct iwl_umac_alive *umac; 121 struct iwl_lmac_alive *lmac1; 122 struct iwl_lmac_alive *lmac2 = NULL; 123 u16 status; 124 u32 lmac_error_event_table, umac_error_table; 125 u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, 126 UCODE_ALIVE_NTFY, 0); 127 u32 i; 128 129 130 if (version == 6) { 131 struct iwl_alive_ntf_v6 *palive; 132 133 if (pkt_len < sizeof(*palive)) 134 return false; 135 136 palive = (void *)pkt->data; 137 mvm->trans->dbg.imr_data.imr_enable = 138 le32_to_cpu(palive->imr.enabled); 139 mvm->trans->dbg.imr_data.imr_size = 140 le32_to_cpu(palive->imr.size); 141 mvm->trans->dbg.imr_data.imr2sram_remainbyte = 142 mvm->trans->dbg.imr_data.imr_size; 143 mvm->trans->dbg.imr_data.imr_base_addr = 144 palive->imr.base_addr; 145 mvm->trans->dbg.imr_data.imr_curr_addr = 146 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr); 147 IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n", 148 mvm->trans->dbg.imr_data.imr_enable, 149 mvm->trans->dbg.imr_data.imr_size, 150 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr)); 151 152 if (!mvm->trans->dbg.imr_data.imr_enable) { 153 for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) { 154 struct iwl_ucode_tlv *reg_tlv; 155 struct iwl_fw_ini_region_tlv *reg; 156 157 reg_tlv = mvm->trans->dbg.active_regions[i]; 158 if (!reg_tlv) 159 continue; 160 161 reg = (void *)reg_tlv->data; 162 /* 163 * We have only one DRAM IMR region, so we 164 * can break as soon as we find the first 165 * one. 166 */ 167 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { 168 mvm->trans->dbg.unsupported_region_msk |= BIT(i); 169 break; 170 } 171 } 172 } 173 } 174 175 if (version >= 5) { 176 struct iwl_alive_ntf_v5 *palive; 177 178 if (pkt_len < sizeof(*palive)) 179 return false; 180 181 palive = (void *)pkt->data; 182 umac = &palive->umac_data; 183 lmac1 = &palive->lmac_data[0]; 184 lmac2 = &palive->lmac_data[1]; 185 status = le16_to_cpu(palive->status); 186 187 mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); 188 mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); 189 mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); 190 191 IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", 192 mvm->trans->sku_id[0], 193 mvm->trans->sku_id[1], 194 mvm->trans->sku_id[2]); 195 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { 196 struct iwl_alive_ntf_v4 *palive; 197 198 if (pkt_len < sizeof(*palive)) 199 return false; 200 201 palive = (void *)pkt->data; 202 umac = &palive->umac_data; 203 lmac1 = &palive->lmac_data[0]; 204 lmac2 = &palive->lmac_data[1]; 205 status = le16_to_cpu(palive->status); 206 } else if (iwl_rx_packet_payload_len(pkt) == 207 sizeof(struct iwl_alive_ntf_v3)) { 208 struct iwl_alive_ntf_v3 *palive3; 209 210 if (pkt_len < sizeof(*palive3)) 211 return false; 212 213 palive3 = (void *)pkt->data; 214 umac = &palive3->umac_data; 215 lmac1 = &palive3->lmac_data; 216 status = le16_to_cpu(palive3->status); 217 } else { 218 WARN(1, "unsupported alive notification (size %d)\n", 219 iwl_rx_packet_payload_len(pkt)); 220 /* get timeout later */ 221 return false; 222 } 223 224 lmac_error_event_table = 225 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); 226 iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table); 227 228 if (lmac2) 229 mvm->trans->dbg.lmac_error_event_table[1] = 230 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); 231 232 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & 233 ~FW_ADDR_CACHE_CONTROL; 234 235 if (umac_error_table) { 236 if (umac_error_table >= 237 mvm->trans->cfg->min_umac_error_event_table) { 238 iwl_fw_umac_set_alive_err_table(mvm->trans, 239 umac_error_table); 240 } else { 241 IWL_ERR(mvm, 242 "Not valid error log pointer 0x%08X for %s uCode\n", 243 umac_error_table, 244 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? 245 "Init" : "RT"); 246 } 247 } 248 249 alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); 250 alive_data->valid = status == IWL_ALIVE_STATUS_OK; 251 252 IWL_DEBUG_FW(mvm, 253 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 254 status, lmac1->ver_type, lmac1->ver_subtype); 255 256 if (lmac2) 257 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); 258 259 IWL_DEBUG_FW(mvm, 260 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 261 le32_to_cpu(umac->umac_major), 262 le32_to_cpu(umac->umac_minor)); 263 264 iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac); 265 266 return true; 267 } 268 269 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, 270 struct iwl_rx_packet *pkt, void *data) 271 { 272 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 273 274 return true; 275 } 276 277 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, 278 struct iwl_rx_packet *pkt, void *data) 279 { 280 struct iwl_phy_db *phy_db = data; 281 282 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { 283 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 284 return true; 285 } 286 287 WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); 288 289 return false; 290 } 291 292 static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) 293 { 294 #define IWL_FW_PRINT_REG_INFO(reg_name) \ 295 IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) 296 297 struct iwl_trans *trans = mvm->trans; 298 enum iwl_device_family device_family = trans->trans_cfg->device_family; 299 300 if (device_family < IWL_DEVICE_FAMILY_8000) 301 return; 302 303 if (device_family <= IWL_DEVICE_FAMILY_9000) 304 IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION); 305 else 306 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); 307 308 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); 309 310 /* print OPT info */ 311 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); 312 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); 313 } 314 315 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, 316 enum iwl_ucode_type ucode_type) 317 { 318 struct iwl_notification_wait alive_wait; 319 struct iwl_mvm_alive_data alive_data = {}; 320 const struct fw_img *fw; 321 int ret; 322 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; 323 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; 324 bool run_in_rfkill = 325 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); 326 u8 count; 327 struct iwl_pc_data *pc_data; 328 329 if (ucode_type == IWL_UCODE_REGULAR && 330 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && 331 !(fw_has_capa(&mvm->fw->ucode_capa, 332 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) 333 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); 334 else 335 fw = iwl_get_ucode_image(mvm->fw, ucode_type); 336 if (WARN_ON(!fw)) 337 return -EINVAL; 338 iwl_fw_set_current_image(&mvm->fwrt, ucode_type); 339 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 340 341 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, 342 alive_cmd, ARRAY_SIZE(alive_cmd), 343 iwl_alive_fn, &alive_data); 344 345 /* 346 * We want to load the INIT firmware even in RFKILL 347 * For the unified firmware case, the ucode_type is not 348 * INIT, but we still need to run it. 349 */ 350 ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); 351 if (ret) { 352 iwl_fw_set_current_image(&mvm->fwrt, old_type); 353 iwl_remove_notification(&mvm->notif_wait, &alive_wait); 354 return ret; 355 } 356 357 /* 358 * Some things may run in the background now, but we 359 * just wait for the ALIVE notification here. 360 */ 361 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, 362 MVM_UCODE_ALIVE_TIMEOUT); 363 364 if (mvm->trans->trans_cfg->device_family == 365 IWL_DEVICE_FAMILY_AX210) { 366 /* print these registers regardless of alive fail/success */ 367 IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n", 368 iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION)); 369 IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n", 370 iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION)); 371 IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n", 372 iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG)); 373 IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n", 374 iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9)); 375 } 376 377 if (ret) { 378 struct iwl_trans *trans = mvm->trans; 379 380 /* SecBoot info */ 381 if (trans->trans_cfg->device_family >= 382 IWL_DEVICE_FAMILY_22000) { 383 IWL_ERR(mvm, 384 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 385 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), 386 iwl_read_umac_prph(trans, 387 UMAG_SB_CPU_2_STATUS)); 388 } else if (trans->trans_cfg->device_family >= 389 IWL_DEVICE_FAMILY_8000) { 390 IWL_ERR(mvm, 391 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 392 iwl_read_prph(trans, SB_CPU_1_STATUS), 393 iwl_read_prph(trans, SB_CPU_2_STATUS)); 394 } 395 396 iwl_mvm_print_pd_notification(mvm); 397 398 /* LMAC/UMAC PC info */ 399 if (trans->trans_cfg->device_family >= 400 IWL_DEVICE_FAMILY_22000) { 401 pc_data = trans->dbg.pc_data; 402 for (count = 0; count < trans->dbg.num_pc; 403 count++, pc_data++) 404 IWL_ERR(mvm, "%s: 0x%x\n", 405 pc_data->pc_name, 406 pc_data->pc_address); 407 } else if (trans->trans_cfg->device_family >= 408 IWL_DEVICE_FAMILY_9000) { 409 IWL_ERR(mvm, "UMAC PC: 0x%x\n", 410 iwl_read_umac_prph(trans, 411 UREG_UMAC_CURRENT_PC)); 412 IWL_ERR(mvm, "LMAC PC: 0x%x\n", 413 iwl_read_umac_prph(trans, 414 UREG_LMAC1_CURRENT_PC)); 415 if (iwl_mvm_is_cdb_supported(mvm)) 416 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n", 417 iwl_read_umac_prph(trans, 418 UREG_LMAC2_CURRENT_PC)); 419 } 420 421 if (ret == -ETIMEDOUT && !mvm->pldr_sync) 422 iwl_fw_dbg_error_collect(&mvm->fwrt, 423 FW_DBG_TRIGGER_ALIVE_TIMEOUT); 424 425 iwl_fw_set_current_image(&mvm->fwrt, old_type); 426 return ret; 427 } 428 429 if (!alive_data.valid) { 430 IWL_ERR(mvm, "Loaded ucode is not valid!\n"); 431 iwl_fw_set_current_image(&mvm->fwrt, old_type); 432 return -EIO; 433 } 434 435 /* if reached this point, Alive notification was received */ 436 iwl_mei_alive_notif(true); 437 438 ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, 439 &mvm->fw->ucode_capa); 440 if (ret) { 441 IWL_ERR(mvm, "Timeout waiting for PNVM load!\n"); 442 iwl_fw_set_current_image(&mvm->fwrt, old_type); 443 return ret; 444 } 445 446 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 447 448 /* 449 * Note: all the queues are enabled as part of the interface 450 * initialization, but in firmware restart scenarios they 451 * could be stopped, so wake them up. In firmware restart, 452 * mac80211 will have the queues stopped as well until the 453 * reconfiguration completes. During normal startup, they 454 * will be empty. 455 */ 456 457 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 458 /* 459 * Set a 'fake' TID for the command queue, since we use the 460 * hweight() of the tid_bitmap as a refcount now. Not that 461 * we ever even consider the command queue as one we might 462 * want to reuse, but be safe nevertheless. 463 */ 464 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = 465 BIT(IWL_MAX_TID_COUNT + 2); 466 467 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 468 #ifdef CONFIG_IWLWIFI_DEBUGFS 469 iwl_fw_set_dbg_rec_on(&mvm->fwrt); 470 #endif 471 472 /* 473 * All the BSSes in the BSS table include the GP2 in the system 474 * at the beacon Rx time, this is of course no longer relevant 475 * since we are resetting the firmware. 476 * Purge all the BSS table. 477 */ 478 cfg80211_bss_flush(mvm->hw->wiphy); 479 480 return 0; 481 } 482 483 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, 484 struct iwl_phy_specific_cfg *phy_filters) 485 { 486 #ifdef CONFIG_ACPI 487 *phy_filters = mvm->phy_filters; 488 #endif /* CONFIG_ACPI */ 489 } 490 491 static void iwl_mvm_uats_init(struct iwl_mvm *mvm) 492 { 493 u8 cmd_ver; 494 int ret; 495 struct iwl_host_cmd cmd = { 496 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, 497 UATS_TABLE_CMD), 498 .flags = 0, 499 .data[0] = &mvm->fwrt.uats_table, 500 .len[0] = sizeof(mvm->fwrt.uats_table), 501 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 502 }; 503 504 if (!(mvm->trans->trans_cfg->device_family >= 505 IWL_DEVICE_FAMILY_AX210)) { 506 IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); 507 return; 508 } 509 510 if (!mvm->fwrt.uats_enabled) { 511 IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n"); 512 return; 513 } 514 515 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, 516 IWL_FW_CMD_VER_UNKNOWN); 517 if (cmd_ver != 1) { 518 IWL_DEBUG_RADIO(mvm, 519 "UATS_TABLE_CMD ver %d not supported\n", 520 cmd_ver); 521 return; 522 } 523 524 ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); 525 if (ret < 0) { 526 IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret); 527 return; 528 } 529 530 ret = iwl_mvm_send_cmd(mvm, &cmd); 531 if (ret < 0) 532 IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret); 533 else 534 IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n"); 535 } 536 537 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) 538 { 539 u8 cmd_ver; 540 int ret; 541 struct iwl_host_cmd cmd = { 542 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, 543 SAR_OFFSET_MAPPING_TABLE_CMD), 544 .flags = 0, 545 .data[0] = &mvm->fwrt.sgom_table, 546 .len[0] = sizeof(mvm->fwrt.sgom_table), 547 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 548 }; 549 550 if (!mvm->fwrt.sgom_enabled) { 551 IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); 552 return 0; 553 } 554 555 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, 556 IWL_FW_CMD_VER_UNKNOWN); 557 558 if (cmd_ver != 2) { 559 IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", 560 cmd_ver); 561 return 0; 562 } 563 564 ret = iwl_mvm_send_cmd(mvm, &cmd); 565 if (ret < 0) 566 IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); 567 568 return ret; 569 } 570 571 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 572 { 573 u32 cmd_id = PHY_CONFIGURATION_CMD; 574 struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; 575 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; 576 u8 cmd_ver; 577 size_t cmd_size; 578 579 if (iwl_mvm_has_unified_ucode(mvm) && 580 !mvm->trans->cfg->tx_with_siso_diversity) 581 return 0; 582 583 if (mvm->trans->cfg->tx_with_siso_diversity) { 584 /* 585 * TODO: currently we don't set the antenna but letting the NIC 586 * to decide which antenna to use. This should come from BIOS. 587 */ 588 phy_cfg_cmd.phy_cfg = 589 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); 590 } 591 592 /* Set parameters */ 593 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); 594 595 /* set flags extra PHY configuration flags from the device's cfg */ 596 phy_cfg_cmd.phy_cfg |= 597 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); 598 599 phy_cfg_cmd.calib_control.event_trigger = 600 mvm->fw->default_calib[ucode_type].event_trigger; 601 phy_cfg_cmd.calib_control.flow_trigger = 602 mvm->fw->default_calib[ucode_type].flow_trigger; 603 604 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 605 IWL_FW_CMD_VER_UNKNOWN); 606 if (cmd_ver >= 3) 607 iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg); 608 609 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 610 phy_cfg_cmd.phy_cfg); 611 cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : 612 sizeof(struct iwl_phy_cfg_cmd_v1); 613 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); 614 } 615 616 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) 617 { 618 struct iwl_notification_wait init_wait; 619 struct iwl_nvm_access_complete_cmd nvm_complete = {}; 620 struct iwl_init_extended_cfg_cmd init_cfg = { 621 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), 622 }; 623 static const u16 init_complete[] = { 624 INIT_COMPLETE_NOTIF, 625 }; 626 u32 sb_cfg; 627 int ret; 628 629 if (mvm->trans->cfg->tx_with_siso_diversity) 630 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY)); 631 632 lockdep_assert_held(&mvm->mutex); 633 634 mvm->rfkill_safe_init_done = false; 635 636 if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { 637 sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); 638 /* if needed, we'll reset this on our way out later */ 639 mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM; 640 if (mvm->pldr_sync && iwl_mei_pldr_req()) 641 return -EBUSY; 642 } 643 644 iwl_init_notification_wait(&mvm->notif_wait, 645 &init_wait, 646 init_complete, 647 ARRAY_SIZE(init_complete), 648 iwl_wait_init_complete, 649 NULL); 650 651 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 652 653 /* Will also start the device */ 654 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 655 if (ret) { 656 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 657 658 /* if we needed reset then fail here, but notify and remove */ 659 if (mvm->pldr_sync) { 660 iwl_mei_alive_notif(false); 661 iwl_trans_pcie_remove(mvm->trans, true); 662 } 663 664 goto error; 665 } 666 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 667 NULL); 668 669 if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ) 670 mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans, 671 CNVI_PMU_STEP_FLOW) & 672 CNVI_PMU_STEP_FLOW_FORCE_URM); 673 674 /* Send init config command to mark that we are sending NVM access 675 * commands 676 */ 677 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, 678 INIT_EXTENDED_CFG_CMD), 679 CMD_SEND_IN_RFKILL, 680 sizeof(init_cfg), &init_cfg); 681 if (ret) { 682 IWL_ERR(mvm, "Failed to run init config command: %d\n", 683 ret); 684 goto error; 685 } 686 687 /* Load NVM to NIC if needed */ 688 if (mvm->nvm_file_name) { 689 ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, 690 mvm->nvm_sections); 691 if (ret) 692 goto error; 693 ret = iwl_mvm_load_nvm_to_nic(mvm); 694 if (ret) 695 goto error; 696 } 697 698 if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { 699 ret = iwl_nvm_init(mvm); 700 if (ret) { 701 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 702 goto error; 703 } 704 } 705 706 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, 707 NVM_ACCESS_COMPLETE), 708 CMD_SEND_IN_RFKILL, 709 sizeof(nvm_complete), &nvm_complete); 710 if (ret) { 711 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", 712 ret); 713 goto error; 714 } 715 716 ret = iwl_send_phy_cfg_cmd(mvm); 717 if (ret) { 718 IWL_ERR(mvm, "Failed to run PHY configuration: %d\n", 719 ret); 720 goto error; 721 } 722 723 /* We wait for the INIT complete notification */ 724 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, 725 MVM_UCODE_ALIVE_TIMEOUT); 726 if (ret) 727 return ret; 728 729 /* Read the NVM only at driver load time, no need to do this twice */ 730 if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { 731 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw, 732 mvm->set_tx_ant, mvm->set_rx_ant); 733 if (IS_ERR(mvm->nvm_data)) { 734 ret = PTR_ERR(mvm->nvm_data); 735 mvm->nvm_data = NULL; 736 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 737 return ret; 738 } 739 } 740 741 mvm->rfkill_safe_init_done = true; 742 743 return 0; 744 745 error: 746 iwl_remove_notification(&mvm->notif_wait, &init_wait); 747 return ret; 748 } 749 750 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) 751 { 752 struct iwl_notification_wait calib_wait; 753 static const u16 init_complete[] = { 754 INIT_COMPLETE_NOTIF, 755 CALIB_RES_NOTIF_PHY_DB 756 }; 757 int ret; 758 759 if (iwl_mvm_has_unified_ucode(mvm)) 760 return iwl_run_unified_mvm_ucode(mvm); 761 762 lockdep_assert_held(&mvm->mutex); 763 764 mvm->rfkill_safe_init_done = false; 765 766 iwl_init_notification_wait(&mvm->notif_wait, 767 &calib_wait, 768 init_complete, 769 ARRAY_SIZE(init_complete), 770 iwl_wait_phy_db_entry, 771 mvm->phy_db); 772 773 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); 774 775 /* Will also start the device */ 776 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); 777 if (ret) { 778 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); 779 goto remove_notif; 780 } 781 782 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) { 783 ret = iwl_mvm_send_bt_init_conf(mvm); 784 if (ret) 785 goto remove_notif; 786 } 787 788 /* Read the NVM only at driver load time, no need to do this twice */ 789 if (!mvm->nvm_data) { 790 ret = iwl_nvm_init(mvm); 791 if (ret) { 792 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 793 goto remove_notif; 794 } 795 } 796 797 /* In case we read the NVM from external file, load it to the NIC */ 798 if (mvm->nvm_file_name) { 799 ret = iwl_mvm_load_nvm_to_nic(mvm); 800 if (ret) 801 goto remove_notif; 802 } 803 804 WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, 805 "Too old NVM version (0x%0x, required = 0x%0x)", 806 mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); 807 808 /* 809 * abort after reading the nvm in case RF Kill is on, we will complete 810 * the init seq later when RF kill will switch to off 811 */ 812 if (iwl_mvm_is_radio_hw_killed(mvm)) { 813 IWL_DEBUG_RF_KILL(mvm, 814 "jump over all phy activities due to RF kill\n"); 815 goto remove_notif; 816 } 817 818 mvm->rfkill_safe_init_done = true; 819 820 /* Send TX valid antennas before triggering calibrations */ 821 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 822 if (ret) 823 goto remove_notif; 824 825 ret = iwl_send_phy_cfg_cmd(mvm); 826 if (ret) { 827 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 828 ret); 829 goto remove_notif; 830 } 831 832 /* 833 * Some things may run in the background now, but we 834 * just wait for the calibration complete notification. 835 */ 836 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 837 MVM_UCODE_CALIB_TIMEOUT); 838 if (!ret) 839 goto out; 840 841 if (iwl_mvm_is_radio_hw_killed(mvm)) { 842 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 843 ret = 0; 844 } else { 845 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 846 ret); 847 } 848 849 goto out; 850 851 remove_notif: 852 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 853 out: 854 mvm->rfkill_safe_init_done = false; 855 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 856 /* we want to debug INIT and we have no NVM - fake */ 857 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 858 sizeof(struct ieee80211_channel) + 859 sizeof(struct ieee80211_rate), 860 GFP_KERNEL); 861 if (!mvm->nvm_data) 862 return -ENOMEM; 863 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; 864 mvm->nvm_data->bands[0].n_channels = 1; 865 mvm->nvm_data->bands[0].n_bitrates = 1; 866 mvm->nvm_data->bands[0].bitrates = 867 (void *)(mvm->nvm_data->channels + 1); 868 mvm->nvm_data->bands[0].bitrates->hw_value = 10; 869 } 870 871 return ret; 872 } 873 874 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) 875 { 876 struct iwl_ltr_config_cmd cmd = { 877 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), 878 }; 879 880 if (!mvm->trans->ltr_enabled) 881 return 0; 882 883 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, 884 sizeof(cmd), &cmd); 885 } 886 887 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) 888 { 889 u32 cmd_id = REDUCE_TX_POWER_CMD; 890 struct iwl_dev_tx_power_cmd cmd = { 891 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), 892 }; 893 __le16 *per_chain; 894 int ret; 895 u16 len = 0; 896 u32 n_subbands; 897 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 898 IWL_FW_CMD_VER_UNKNOWN); 899 if (cmd_ver == 7) { 900 len = sizeof(cmd.v7); 901 n_subbands = IWL_NUM_SUB_BANDS_V2; 902 per_chain = cmd.v7.per_chain[0][0]; 903 cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); 904 } else if (cmd_ver == 6) { 905 len = sizeof(cmd.v6); 906 n_subbands = IWL_NUM_SUB_BANDS_V2; 907 per_chain = cmd.v6.per_chain[0][0]; 908 } else if (fw_has_api(&mvm->fw->ucode_capa, 909 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { 910 len = sizeof(cmd.v5); 911 n_subbands = IWL_NUM_SUB_BANDS_V1; 912 per_chain = cmd.v5.per_chain[0][0]; 913 } else if (fw_has_capa(&mvm->fw->ucode_capa, 914 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { 915 len = sizeof(cmd.v4); 916 n_subbands = IWL_NUM_SUB_BANDS_V1; 917 per_chain = cmd.v4.per_chain[0][0]; 918 } else { 919 len = sizeof(cmd.v3); 920 n_subbands = IWL_NUM_SUB_BANDS_V1; 921 per_chain = cmd.v3.per_chain[0][0]; 922 } 923 924 /* all structs have the same common part, add it */ 925 len += sizeof(cmd.common); 926 927 ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain, 928 IWL_NUM_CHAIN_TABLES, 929 n_subbands, prof_a, prof_b); 930 931 /* return on error or if the profile is disabled (positive number) */ 932 if (ret) 933 return ret; 934 935 iwl_mei_set_power_limit(per_chain); 936 937 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); 938 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 939 } 940 941 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) 942 { 943 union iwl_geo_tx_power_profiles_cmd geo_tx_cmd; 944 struct iwl_geo_tx_power_profiles_resp *resp; 945 u16 len; 946 int ret; 947 struct iwl_host_cmd cmd = { 948 .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), 949 .flags = CMD_WANT_SKB, 950 .data = { &geo_tx_cmd }, 951 }; 952 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, 953 IWL_FW_CMD_VER_UNKNOWN); 954 955 /* the ops field is at the same spot for all versions, so set in v1 */ 956 geo_tx_cmd.v1.ops = 957 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); 958 959 if (cmd_ver == 5) 960 len = sizeof(geo_tx_cmd.v5); 961 else if (cmd_ver == 4) 962 len = sizeof(geo_tx_cmd.v4); 963 else if (cmd_ver == 3) 964 len = sizeof(geo_tx_cmd.v3); 965 else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, 966 IWL_UCODE_TLV_API_SAR_TABLE_VER)) 967 len = sizeof(geo_tx_cmd.v2); 968 else 969 len = sizeof(geo_tx_cmd.v1); 970 971 if (!iwl_sar_geo_support(&mvm->fwrt)) 972 return -EOPNOTSUPP; 973 974 cmd.len[0] = len; 975 976 ret = iwl_mvm_send_cmd(mvm, &cmd); 977 if (ret) { 978 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); 979 return ret; 980 } 981 982 resp = (void *)cmd.resp_pkt->data; 983 ret = le32_to_cpu(resp->profile_idx); 984 985 if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM)) 986 ret = -EIO; 987 988 iwl_free_resp(&cmd); 989 return ret; 990 } 991 992 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 993 { 994 u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); 995 union iwl_geo_tx_power_profiles_cmd cmd; 996 u16 len; 997 u32 n_bands; 998 u32 n_profiles; 999 __le32 sk = cpu_to_le32(0); 1000 int ret; 1001 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1002 IWL_FW_CMD_VER_UNKNOWN); 1003 1004 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) != 1005 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) || 1006 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) != 1007 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) || 1008 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) != 1009 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) || 1010 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) != 1011 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops)); 1012 1013 /* the ops field is at the same spot for all versions, so set in v1 */ 1014 cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); 1015 1016 /* Only set to South Korea if the table revision is 1 */ 1017 if (mvm->fwrt.geo_rev == 1) 1018 sk = cpu_to_le32(1); 1019 1020 if (cmd_ver == 5) { 1021 len = sizeof(cmd.v5); 1022 n_bands = ARRAY_SIZE(cmd.v5.table[0]); 1023 n_profiles = BIOS_GEO_MAX_PROFILE_NUM; 1024 cmd.v5.table_revision = sk; 1025 } else if (cmd_ver == 4) { 1026 len = sizeof(cmd.v4); 1027 n_bands = ARRAY_SIZE(cmd.v4.table[0]); 1028 n_profiles = BIOS_GEO_MAX_PROFILE_NUM; 1029 cmd.v4.table_revision = sk; 1030 } else if (cmd_ver == 3) { 1031 len = sizeof(cmd.v3); 1032 n_bands = ARRAY_SIZE(cmd.v3.table[0]); 1033 n_profiles = BIOS_GEO_MIN_PROFILE_NUM; 1034 cmd.v3.table_revision = sk; 1035 } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, 1036 IWL_UCODE_TLV_API_SAR_TABLE_VER)) { 1037 len = sizeof(cmd.v2); 1038 n_bands = ARRAY_SIZE(cmd.v2.table[0]); 1039 n_profiles = BIOS_GEO_MIN_PROFILE_NUM; 1040 cmd.v2.table_revision = sk; 1041 } else { 1042 len = sizeof(cmd.v1); 1043 n_bands = ARRAY_SIZE(cmd.v1.table[0]); 1044 n_profiles = BIOS_GEO_MIN_PROFILE_NUM; 1045 } 1046 1047 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) != 1048 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) || 1049 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) != 1050 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) || 1051 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) != 1052 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) || 1053 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != 1054 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); 1055 /* the table is at the same position for all versions, so set use v1 */ 1056 ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0], 1057 n_bands, n_profiles); 1058 1059 /* 1060 * It is a valid scenario to not support SAR, or miss wgds table, 1061 * but in that case there is no need to send the command. 1062 */ 1063 if (ret) 1064 return 0; 1065 1066 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 1067 } 1068 1069 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) 1070 { 1071 union iwl_ppag_table_cmd cmd; 1072 int ret, cmd_size; 1073 1074 ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size); 1075 /* Not supporting PPAG table is a valid scenario */ 1076 if (ret < 0) 1077 return 0; 1078 1079 IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); 1080 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, 1081 PER_PLATFORM_ANT_GAIN_CMD), 1082 0, cmd_size, &cmd); 1083 if (ret < 0) 1084 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", 1085 ret); 1086 1087 return ret; 1088 } 1089 1090 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) 1091 { 1092 /* no need to read the table, done in INIT stage */ 1093 if (!(iwl_is_ppag_approved(&mvm->fwrt))) 1094 return 0; 1095 1096 return iwl_mvm_ppag_send_cmd(mvm); 1097 } 1098 1099 static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) 1100 { 1101 int i; 1102 u32 size = le32_to_cpu(*le_size); 1103 1104 /* Verify that there is room for another country */ 1105 if (size >= IWL_WTAS_BLACK_LIST_MAX) 1106 return false; 1107 1108 for (i = 0; i < size; i++) { 1109 if (list[i] == cpu_to_le32(mcc)) 1110 return true; 1111 } 1112 1113 list[size++] = cpu_to_le32(mcc); 1114 *le_size = cpu_to_le32(size); 1115 return true; 1116 } 1117 1118 static void iwl_mvm_tas_init(struct iwl_mvm *mvm) 1119 { 1120 u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); 1121 int ret; 1122 struct iwl_tas_data data = {}; 1123 struct iwl_tas_config_cmd cmd = {}; 1124 int cmd_size, fw_ver; 1125 1126 BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) != 1127 IWL_WTAS_BLACK_LIST_MAX); 1128 BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) != 1129 IWL_WTAS_BLACK_LIST_MAX); 1130 1131 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { 1132 IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n"); 1133 return; 1134 } 1135 1136 ret = iwl_bios_get_tas_table(&mvm->fwrt, &data); 1137 if (ret < 0) { 1138 IWL_DEBUG_RADIO(mvm, 1139 "TAS table invalid or unavailable. (%d)\n", 1140 ret); 1141 return; 1142 } 1143 1144 if (ret == 0) 1145 return; 1146 1147 if (!iwl_is_tas_approved()) { 1148 IWL_DEBUG_RADIO(mvm, 1149 "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", 1150 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); 1151 if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array, 1152 &data.block_list_size, 1153 IWL_MCC_US)) || 1154 (!iwl_mvm_add_to_tas_block_list(data.block_list_array, 1155 &data.block_list_size, 1156 IWL_MCC_CANADA))) { 1157 IWL_DEBUG_RADIO(mvm, 1158 "Unable to add US/Canada to TAS block list, disabling TAS\n"); 1159 return; 1160 } 1161 } else { 1162 IWL_DEBUG_RADIO(mvm, 1163 "System vendor '%s' is in the approved list.\n", 1164 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); 1165 } 1166 1167 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1168 IWL_FW_CMD_VER_UNKNOWN); 1169 1170 memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common)); 1171 1172 /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */ 1173 if (fw_ver == 4) { 1174 cmd.v4.override_tas_iec = data.override_tas_iec; 1175 cmd.v4.enable_tas_iec = data.enable_tas_iec; 1176 cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed; 1177 } else { 1178 cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec); 1179 cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec); 1180 } 1181 1182 cmd_size = sizeof(struct iwl_tas_config_cmd_common); 1183 if (fw_ver >= 3) 1184 /* v4 is the same size as v3 */ 1185 cmd_size += sizeof(struct iwl_tas_config_cmd_v3); 1186 1187 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); 1188 if (ret < 0) 1189 IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); 1190 } 1191 1192 static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) 1193 { 1194 u32 value = 0; 1195 /* default behaviour is disabled */ 1196 bool bios_enable_rfi = false; 1197 int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value); 1198 1199 1200 if (ret < 0) { 1201 IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret); 1202 return bios_enable_rfi; 1203 } 1204 1205 value &= DSM_VALUE_RFI_DISABLE; 1206 /* RFI BIOS CONFIG value can be 0 or 3 only. 1207 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. 1208 * 1 and 2 are invalid BIOS configurations, So, it's not possible to 1209 * disable ddr/dlvr separately. 1210 */ 1211 if (!value) { 1212 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n"); 1213 bios_enable_rfi = true; 1214 } else if (value == DSM_VALUE_RFI_DISABLE) { 1215 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n"); 1216 } else { 1217 IWL_DEBUG_RADIO(mvm, 1218 "DSM RFI got invalid value, value=%d\n", value); 1219 } 1220 1221 return bios_enable_rfi; 1222 } 1223 1224 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) 1225 { 1226 int ret; 1227 u32 value; 1228 struct iwl_lari_config_change_cmd_v7 cmd = {}; 1229 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 1230 WIDE_ID(REGULATORY_AND_NVM_GROUP, 1231 LARI_CONFIG_CHANGE), 1); 1232 1233 cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt); 1234 1235 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); 1236 if (!ret) 1237 cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); 1238 1239 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); 1240 if (!ret) 1241 cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); 1242 1243 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); 1244 if (!ret) { 1245 if (cmd_ver < 8) 1246 value &= ~ACTIVATE_5G2_IN_WW_MASK; 1247 cmd.chan_state_active_bitmap = cpu_to_le32(value); 1248 } 1249 1250 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value); 1251 if (!ret) 1252 cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); 1253 1254 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, 1255 &value); 1256 if (!ret) 1257 cmd.force_disable_channels_bitmap = cpu_to_le32(value); 1258 1259 ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, 1260 &value); 1261 if (!ret) 1262 cmd.edt_bitmap = cpu_to_le32(value); 1263 1264 if (cmd.config_bitmap || 1265 cmd.oem_uhb_allow_bitmap || 1266 cmd.oem_11ax_allow_bitmap || 1267 cmd.oem_unii4_allow_bitmap || 1268 cmd.chan_state_active_bitmap || 1269 cmd.force_disable_channels_bitmap || 1270 cmd.edt_bitmap) { 1271 size_t cmd_size; 1272 1273 switch (cmd_ver) { 1274 case 8: 1275 case 7: 1276 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); 1277 break; 1278 case 6: 1279 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); 1280 break; 1281 case 5: 1282 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); 1283 break; 1284 case 4: 1285 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); 1286 break; 1287 case 3: 1288 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); 1289 break; 1290 case 2: 1291 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); 1292 break; 1293 default: 1294 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); 1295 break; 1296 } 1297 1298 IWL_DEBUG_RADIO(mvm, 1299 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", 1300 le32_to_cpu(cmd.config_bitmap), 1301 le32_to_cpu(cmd.oem_11ax_allow_bitmap)); 1302 IWL_DEBUG_RADIO(mvm, 1303 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", 1304 le32_to_cpu(cmd.oem_unii4_allow_bitmap), 1305 le32_to_cpu(cmd.chan_state_active_bitmap), 1306 cmd_ver); 1307 IWL_DEBUG_RADIO(mvm, 1308 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", 1309 le32_to_cpu(cmd.oem_uhb_allow_bitmap), 1310 le32_to_cpu(cmd.force_disable_channels_bitmap)); 1311 IWL_DEBUG_RADIO(mvm, 1312 "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n", 1313 le32_to_cpu(cmd.edt_bitmap)); 1314 ret = iwl_mvm_send_cmd_pdu(mvm, 1315 WIDE_ID(REGULATORY_AND_NVM_GROUP, 1316 LARI_CONFIG_CHANGE), 1317 0, cmd_size, &cmd); 1318 if (ret < 0) 1319 IWL_DEBUG_RADIO(mvm, 1320 "Failed to send LARI_CONFIG_CHANGE (%d)\n", 1321 ret); 1322 } 1323 1324 if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED || 1325 le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED) 1326 mvm->fwrt.uats_enabled = true; 1327 } 1328 1329 void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm) 1330 { 1331 int ret; 1332 1333 iwl_acpi_get_guid_lock_status(&mvm->fwrt); 1334 1335 /* read PPAG table */ 1336 ret = iwl_bios_get_ppag_table(&mvm->fwrt); 1337 if (ret < 0) { 1338 IWL_DEBUG_RADIO(mvm, 1339 "PPAG BIOS table invalid or unavailable. (%d)\n", 1340 ret); 1341 } 1342 1343 /* read SAR tables */ 1344 ret = iwl_bios_get_wrds_table(&mvm->fwrt); 1345 if (ret < 0) { 1346 IWL_DEBUG_RADIO(mvm, 1347 "WRDS SAR BIOS table invalid or unavailable. (%d)\n", 1348 ret); 1349 /* 1350 * If not available, don't fail and don't bother with EWRD and 1351 * WGDS */ 1352 1353 if (!iwl_bios_get_wgds_table(&mvm->fwrt)) { 1354 /* 1355 * If basic SAR is not available, we check for WGDS, 1356 * which should *not* be available either. If it is 1357 * available, issue an error, because we can't use SAR 1358 * Geo without basic SAR. 1359 */ 1360 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); 1361 } 1362 1363 } else { 1364 ret = iwl_bios_get_ewrd_table(&mvm->fwrt); 1365 /* if EWRD is not available, we can still use 1366 * WRDS, so don't fail */ 1367 if (ret < 0) 1368 IWL_DEBUG_RADIO(mvm, 1369 "EWRD SAR BIOS table invalid or unavailable. (%d)\n", 1370 ret); 1371 1372 /* read geo SAR table */ 1373 if (iwl_sar_geo_support(&mvm->fwrt)) { 1374 ret = iwl_bios_get_wgds_table(&mvm->fwrt); 1375 if (ret < 0) 1376 IWL_DEBUG_RADIO(mvm, 1377 "Geo SAR BIOS table invalid or unavailable. (%d)\n", 1378 ret); 1379 /* we don't fail if the table is not available */ 1380 } 1381 } 1382 1383 iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters); 1384 1385 if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid)) 1386 IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n"); 1387 } 1388 1389 static void iwl_mvm_disconnect_iterator(void *data, u8 *mac, 1390 struct ieee80211_vif *vif) 1391 { 1392 if (vif->type == NL80211_IFTYPE_STATION) 1393 ieee80211_hw_restart_disconnect(vif); 1394 } 1395 1396 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) 1397 { 1398 u32 error_log_size = mvm->fw->ucode_capa.error_log_size; 1399 int ret; 1400 u32 resp; 1401 1402 struct iwl_fw_error_recovery_cmd recovery_cmd = { 1403 .flags = cpu_to_le32(flags), 1404 .buf_size = 0, 1405 }; 1406 struct iwl_host_cmd host_cmd = { 1407 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), 1408 .flags = CMD_WANT_SKB, 1409 .data = {&recovery_cmd, }, 1410 .len = {sizeof(recovery_cmd), }, 1411 }; 1412 1413 /* no error log was defined in TLV */ 1414 if (!error_log_size) 1415 return; 1416 1417 if (flags & ERROR_RECOVERY_UPDATE_DB) { 1418 /* no buf was allocated while HW reset */ 1419 if (!mvm->error_recovery_buf) 1420 return; 1421 1422 host_cmd.data[1] = mvm->error_recovery_buf; 1423 host_cmd.len[1] = error_log_size; 1424 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 1425 recovery_cmd.buf_size = cpu_to_le32(error_log_size); 1426 } 1427 1428 ret = iwl_mvm_send_cmd(mvm, &host_cmd); 1429 kfree(mvm->error_recovery_buf); 1430 mvm->error_recovery_buf = NULL; 1431 1432 if (ret) { 1433 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret); 1434 return; 1435 } 1436 1437 /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ 1438 if (flags & ERROR_RECOVERY_UPDATE_DB) { 1439 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); 1440 if (resp) { 1441 IWL_ERR(mvm, 1442 "Failed to send recovery cmd blob was invalid %d\n", 1443 resp); 1444 1445 ieee80211_iterate_interfaces(mvm->hw, 0, 1446 iwl_mvm_disconnect_iterator, 1447 mvm); 1448 } 1449 } 1450 } 1451 1452 static int iwl_mvm_sar_init(struct iwl_mvm *mvm) 1453 { 1454 return iwl_mvm_sar_select_profile(mvm, 1, 1); 1455 } 1456 1457 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) 1458 { 1459 int ret; 1460 1461 if (iwl_mvm_has_unified_ucode(mvm)) 1462 return iwl_run_unified_mvm_ucode(mvm); 1463 1464 ret = iwl_run_init_mvm_ucode(mvm); 1465 1466 if (ret) { 1467 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 1468 1469 if (iwlmvm_mod_params.init_dbg) 1470 return 0; 1471 return ret; 1472 } 1473 1474 iwl_fw_dbg_stop_sync(&mvm->fwrt); 1475 iwl_trans_stop_device(mvm->trans); 1476 ret = iwl_trans_start_hw(mvm->trans); 1477 if (ret) 1478 return ret; 1479 1480 mvm->rfkill_safe_init_done = false; 1481 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 1482 if (ret) 1483 return ret; 1484 1485 mvm->rfkill_safe_init_done = true; 1486 1487 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, 1488 NULL); 1489 1490 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); 1491 } 1492 1493 int iwl_mvm_up(struct iwl_mvm *mvm) 1494 { 1495 int ret, i; 1496 struct ieee80211_supported_band *sband = NULL; 1497 1498 lockdep_assert_held(&mvm->mutex); 1499 1500 ret = iwl_trans_start_hw(mvm->trans); 1501 if (ret) 1502 return ret; 1503 1504 ret = iwl_mvm_load_rt_fw(mvm); 1505 if (ret) { 1506 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 1507 if (ret != -ERFKILL && !mvm->pldr_sync) 1508 iwl_fw_dbg_error_collect(&mvm->fwrt, 1509 FW_DBG_TRIGGER_DRIVER); 1510 goto error; 1511 } 1512 1513 /* FW loaded successfully */ 1514 mvm->pldr_sync = false; 1515 1516 iwl_fw_disable_dbg_asserts(&mvm->fwrt); 1517 iwl_get_shared_mem_conf(&mvm->fwrt); 1518 1519 ret = iwl_mvm_sf_update(mvm, NULL, false); 1520 if (ret) 1521 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 1522 1523 if (!iwl_trans_dbg_ini_valid(mvm->trans)) { 1524 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1525 /* if we have a destination, assume EARLY START */ 1526 if (mvm->fw->dbg.dest_tlv) 1527 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; 1528 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); 1529 } 1530 1531 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1532 if (ret) 1533 goto error; 1534 1535 if (!iwl_mvm_has_unified_ucode(mvm)) { 1536 /* Send phy db control command and then phy db calibration */ 1537 ret = iwl_send_phy_db_data(mvm->phy_db); 1538 if (ret) 1539 goto error; 1540 ret = iwl_send_phy_cfg_cmd(mvm); 1541 if (ret) 1542 goto error; 1543 } 1544 1545 ret = iwl_mvm_send_bt_init_conf(mvm); 1546 if (ret) 1547 goto error; 1548 1549 if (fw_has_capa(&mvm->fw->ucode_capa, 1550 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) { 1551 ret = iwl_set_soc_latency(&mvm->fwrt); 1552 if (ret) 1553 goto error; 1554 } 1555 1556 iwl_mvm_lari_cfg(mvm); 1557 1558 /* Init RSS configuration */ 1559 ret = iwl_configure_rxq(&mvm->fwrt); 1560 if (ret) 1561 goto error; 1562 1563 if (iwl_mvm_has_new_rx_api(mvm)) { 1564 ret = iwl_send_rss_cfg_cmd(mvm); 1565 if (ret) { 1566 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 1567 ret); 1568 goto error; 1569 } 1570 } 1571 1572 /* init the fw <-> mac80211 STA mapping */ 1573 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 1574 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1575 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); 1576 } 1577 1578 for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) 1579 RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); 1580 1581 memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map)); 1582 1583 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1584 1585 /* reset quota debouncing buffer - 0xff will yield invalid data */ 1586 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); 1587 1588 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { 1589 ret = iwl_mvm_send_dqa_cmd(mvm); 1590 if (ret) 1591 goto error; 1592 } 1593 1594 /* 1595 * Add auxiliary station for scanning. 1596 * Newer versions of this command implies that the fw uses 1597 * internal aux station for all aux activities that don't 1598 * requires a dedicated data queue. 1599 */ 1600 if (!iwl_mvm_has_new_station_api(mvm->fw)) { 1601 /* 1602 * In old version the aux station uses mac id like other 1603 * station and not lmac id 1604 */ 1605 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); 1606 if (ret) 1607 goto error; 1608 } 1609 1610 /* Add all the PHY contexts */ 1611 i = 0; 1612 while (!sband && i < NUM_NL80211_BANDS) 1613 sband = mvm->hw->wiphy->bands[i++]; 1614 1615 if (WARN_ON_ONCE(!sband)) { 1616 ret = -ENODEV; 1617 goto error; 1618 } 1619 1620 if (iwl_mvm_is_tt_in_fw(mvm)) { 1621 /* in order to give the responsibility of ct-kill and 1622 * TX backoff to FW we need to send empty temperature reporting 1623 * cmd during init time 1624 */ 1625 iwl_mvm_send_temp_report_ths_cmd(mvm); 1626 } else { 1627 /* Initialize tx backoffs to the minimal possible */ 1628 iwl_mvm_tt_tx_backoff(mvm, 0); 1629 } 1630 1631 #ifdef CONFIG_THERMAL 1632 /* TODO: read the budget from BIOS / Platform NVM */ 1633 1634 /* 1635 * In case there is no budget from BIOS / Platform NVM the default 1636 * budget should be 2000mW (cooling state 0). 1637 */ 1638 if (iwl_mvm_is_ctdp_supported(mvm)) { 1639 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 1640 mvm->cooling_dev.cur_state); 1641 if (ret) 1642 goto error; 1643 } 1644 #endif 1645 1646 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) 1647 WARN_ON(iwl_mvm_config_ltr(mvm)); 1648 1649 ret = iwl_mvm_power_update_device(mvm); 1650 if (ret) 1651 goto error; 1652 1653 /* 1654 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx 1655 * anyway, so don't init MCC. 1656 */ 1657 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { 1658 ret = iwl_mvm_init_mcc(mvm); 1659 if (ret) 1660 goto error; 1661 } 1662 1663 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1664 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; 1665 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; 1666 ret = iwl_mvm_config_scan(mvm); 1667 if (ret) 1668 goto error; 1669 } 1670 1671 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1672 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); 1673 1674 if (mvm->time_sync.active) 1675 iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr, 1676 IWL_TIME_SYNC_PROTOCOL_TM | 1677 IWL_TIME_SYNC_PROTOCOL_FTM); 1678 } 1679 1680 if (!mvm->ptp_data.ptp_clock) 1681 iwl_mvm_ptp_init(mvm); 1682 1683 ret = iwl_mvm_ppag_init(mvm); 1684 if (ret) 1685 goto error; 1686 1687 ret = iwl_mvm_sar_init(mvm); 1688 if (ret == 0) 1689 ret = iwl_mvm_sar_geo_init(mvm); 1690 if (ret < 0) 1691 goto error; 1692 1693 ret = iwl_mvm_sgom_init(mvm); 1694 if (ret) 1695 goto error; 1696 1697 iwl_mvm_tas_init(mvm); 1698 iwl_mvm_leds_sync(mvm); 1699 iwl_mvm_uats_init(mvm); 1700 1701 if (iwl_rfi_supported(mvm)) { 1702 if (iwl_mvm_eval_dsm_rfi(mvm)) 1703 iwl_rfi_send_config_cmd(mvm, NULL); 1704 } 1705 1706 iwl_mvm_mei_device_state(mvm, true); 1707 1708 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 1709 return 0; 1710 error: 1711 if (!iwlmvm_mod_params.init_dbg || !ret) 1712 iwl_mvm_stop_device(mvm); 1713 return ret; 1714 } 1715 1716 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) 1717 { 1718 int ret, i; 1719 1720 lockdep_assert_held(&mvm->mutex); 1721 1722 ret = iwl_trans_start_hw(mvm->trans); 1723 if (ret) 1724 return ret; 1725 1726 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); 1727 if (ret) { 1728 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); 1729 goto error; 1730 } 1731 1732 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1733 if (ret) 1734 goto error; 1735 1736 /* Send phy db control command and then phy db calibration*/ 1737 ret = iwl_send_phy_db_data(mvm->phy_db); 1738 if (ret) 1739 goto error; 1740 1741 ret = iwl_send_phy_cfg_cmd(mvm); 1742 if (ret) 1743 goto error; 1744 1745 /* init the fw <-> mac80211 STA mapping */ 1746 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 1747 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1748 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); 1749 } 1750 1751 if (!iwl_mvm_has_new_station_api(mvm->fw)) { 1752 /* 1753 * Add auxiliary station for scanning. 1754 * Newer versions of this command implies that the fw uses 1755 * internal aux station for all aux activities that don't 1756 * requires a dedicated data queue. 1757 * In old version the aux station uses mac id like other 1758 * station and not lmac id 1759 */ 1760 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); 1761 if (ret) 1762 goto error; 1763 } 1764 1765 return 0; 1766 error: 1767 iwl_mvm_stop_device(mvm); 1768 return ret; 1769 } 1770 1771 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, 1772 struct iwl_rx_cmd_buffer *rxb) 1773 { 1774 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1775 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; 1776 1777 IWL_DEBUG_INFO(mvm, 1778 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", 1779 le32_to_cpu(mfuart_notif->installed_ver), 1780 le32_to_cpu(mfuart_notif->external_ver), 1781 le32_to_cpu(mfuart_notif->status), 1782 le32_to_cpu(mfuart_notif->duration)); 1783 1784 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) 1785 IWL_DEBUG_INFO(mvm, 1786 "MFUART: image size: 0x%08x\n", 1787 le32_to_cpu(mfuart_notif->image_size)); 1788 } 1789