1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 42 #include "amdgpu_ras.h" 43 #include "amdgpu_securedisplay.h" 44 #include "amdgpu_atomfirmware.h" 45 46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 47 48 static int psp_load_smu_fw(struct psp_context *psp); 49 static int psp_rap_terminate(struct psp_context *psp); 50 static int psp_securedisplay_terminate(struct psp_context *psp); 51 52 static int psp_ring_init(struct psp_context *psp, 53 enum psp_ring_type ring_type) 54 { 55 int ret = 0; 56 struct psp_ring *ring; 57 struct amdgpu_device *adev = psp->adev; 58 59 ring = &psp->km_ring; 60 61 ring->ring_type = ring_type; 62 63 /* allocate 4k Page of Local Frame Buffer memory for ring */ 64 ring->ring_size = 0x1000; 65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 66 AMDGPU_GEM_DOMAIN_VRAM | 67 AMDGPU_GEM_DOMAIN_GTT, 68 &adev->firmware.rbuf, 69 &ring->ring_mem_mc_addr, 70 (void **)&ring->ring_mem); 71 if (ret) { 72 ring->ring_size = 0; 73 return ret; 74 } 75 76 return 0; 77 } 78 79 /* 80 * Due to DF Cstate management centralized to PMFW, the firmware 81 * loading sequence will be updated as below: 82 * - Load KDB 83 * - Load SYS_DRV 84 * - Load tOS 85 * - Load PMFW 86 * - Setup TMR 87 * - Load other non-psp fw 88 * - Load ASD 89 * - Load XGMI/RAS/HDCP/DTM TA if any 90 * 91 * This new sequence is required for 92 * - Arcturus and onwards 93 */ 94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 95 { 96 struct amdgpu_device *adev = psp->adev; 97 98 if (amdgpu_sriov_vf(adev)) { 99 psp->pmfw_centralized_cstate_management = false; 100 return; 101 } 102 103 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 104 case IP_VERSION(11, 0, 0): 105 case IP_VERSION(11, 0, 4): 106 case IP_VERSION(11, 0, 5): 107 case IP_VERSION(11, 0, 7): 108 case IP_VERSION(11, 0, 9): 109 case IP_VERSION(11, 0, 11): 110 case IP_VERSION(11, 0, 12): 111 case IP_VERSION(11, 0, 13): 112 case IP_VERSION(13, 0, 0): 113 case IP_VERSION(13, 0, 2): 114 case IP_VERSION(13, 0, 7): 115 psp->pmfw_centralized_cstate_management = true; 116 break; 117 default: 118 psp->pmfw_centralized_cstate_management = false; 119 break; 120 } 121 } 122 123 static int psp_init_sriov_microcode(struct psp_context *psp) 124 { 125 struct amdgpu_device *adev = psp->adev; 126 char ucode_prefix[30]; 127 int ret = 0; 128 129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 130 131 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 132 case IP_VERSION(9, 0, 0): 133 case IP_VERSION(11, 0, 7): 134 case IP_VERSION(11, 0, 9): 135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 136 ret = psp_init_cap_microcode(psp, ucode_prefix); 137 break; 138 case IP_VERSION(13, 0, 2): 139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 140 ret = psp_init_cap_microcode(psp, ucode_prefix); 141 ret &= psp_init_ta_microcode(psp, ucode_prefix); 142 break; 143 case IP_VERSION(13, 0, 0): 144 adev->virt.autoload_ucode_id = 0; 145 break; 146 case IP_VERSION(13, 0, 6): 147 ret = psp_init_cap_microcode(psp, ucode_prefix); 148 ret &= psp_init_ta_microcode(psp, ucode_prefix); 149 break; 150 case IP_VERSION(13, 0, 10): 151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 152 ret = psp_init_cap_microcode(psp, ucode_prefix); 153 break; 154 default: 155 return -EINVAL; 156 } 157 return ret; 158 } 159 160 static int psp_early_init(void *handle) 161 { 162 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 163 struct psp_context *psp = &adev->psp; 164 165 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 166 case IP_VERSION(9, 0, 0): 167 psp_v3_1_set_psp_funcs(psp); 168 psp->autoload_supported = false; 169 break; 170 case IP_VERSION(10, 0, 0): 171 case IP_VERSION(10, 0, 1): 172 psp_v10_0_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 break; 175 case IP_VERSION(11, 0, 2): 176 case IP_VERSION(11, 0, 4): 177 psp_v11_0_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 break; 180 case IP_VERSION(11, 0, 0): 181 case IP_VERSION(11, 0, 7): 182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 183 fallthrough; 184 case IP_VERSION(11, 0, 5): 185 case IP_VERSION(11, 0, 9): 186 case IP_VERSION(11, 0, 11): 187 case IP_VERSION(11, 5, 0): 188 case IP_VERSION(11, 0, 12): 189 case IP_VERSION(11, 0, 13): 190 psp_v11_0_set_psp_funcs(psp); 191 psp->autoload_supported = true; 192 break; 193 case IP_VERSION(11, 0, 3): 194 case IP_VERSION(12, 0, 1): 195 psp_v12_0_set_psp_funcs(psp); 196 break; 197 case IP_VERSION(13, 0, 2): 198 case IP_VERSION(13, 0, 6): 199 psp_v13_0_set_psp_funcs(psp); 200 break; 201 case IP_VERSION(13, 0, 1): 202 case IP_VERSION(13, 0, 3): 203 case IP_VERSION(13, 0, 5): 204 case IP_VERSION(13, 0, 8): 205 case IP_VERSION(13, 0, 11): 206 case IP_VERSION(14, 0, 0): 207 psp_v13_0_set_psp_funcs(psp); 208 psp->autoload_supported = true; 209 break; 210 case IP_VERSION(11, 0, 8): 211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 212 psp_v11_0_8_set_psp_funcs(psp); 213 psp->autoload_supported = false; 214 } 215 break; 216 case IP_VERSION(13, 0, 0): 217 case IP_VERSION(13, 0, 7): 218 case IP_VERSION(13, 0, 10): 219 psp_v13_0_set_psp_funcs(psp); 220 psp->autoload_supported = true; 221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 222 break; 223 case IP_VERSION(13, 0, 4): 224 psp_v13_0_4_set_psp_funcs(psp); 225 psp->autoload_supported = true; 226 break; 227 default: 228 return -EINVAL; 229 } 230 231 psp->adev = adev; 232 233 psp_check_pmfw_centralized_cstate_management(psp); 234 235 if (amdgpu_sriov_vf(adev)) 236 return psp_init_sriov_microcode(psp); 237 else 238 return psp_init_microcode(psp); 239 } 240 241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 242 { 243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 244 &mem_ctx->shared_buf); 245 mem_ctx->shared_bo = NULL; 246 } 247 248 static void psp_free_shared_bufs(struct psp_context *psp) 249 { 250 void *tmr_buf; 251 void **pptr; 252 253 /* free TMR memory buffer */ 254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 256 psp->tmr_bo = NULL; 257 258 /* free xgmi shared memory */ 259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 260 261 /* free ras shared memory */ 262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 263 264 /* free hdcp shared memory */ 265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 266 267 /* free dtm shared memory */ 268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 269 270 /* free rap shared memory */ 271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 272 273 /* free securedisplay shared memory */ 274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 275 276 277 } 278 279 static void psp_memory_training_fini(struct psp_context *psp) 280 { 281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 282 283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 284 kfree(ctx->sys_cache); 285 ctx->sys_cache = NULL; 286 } 287 288 static int psp_memory_training_init(struct psp_context *psp) 289 { 290 int ret; 291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 292 293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 294 DRM_DEBUG("memory training is not supported!\n"); 295 return 0; 296 } 297 298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 299 if (ctx->sys_cache == NULL) { 300 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 301 ret = -ENOMEM; 302 goto Err_out; 303 } 304 305 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 306 ctx->train_data_size, 307 ctx->p2c_train_data_offset, 308 ctx->c2p_train_data_offset); 309 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 310 return 0; 311 312 Err_out: 313 psp_memory_training_fini(psp); 314 return ret; 315 } 316 317 /* 318 * Helper funciton to query psp runtime database entry 319 * 320 * @adev: amdgpu_device pointer 321 * @entry_type: the type of psp runtime database entry 322 * @db_entry: runtime database entry pointer 323 * 324 * Return false if runtime database doesn't exit or entry is invalid 325 * or true if the specific database entry is found, and copy to @db_entry 326 */ 327 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 328 enum psp_runtime_entry_type entry_type, 329 void *db_entry) 330 { 331 uint64_t db_header_pos, db_dir_pos; 332 struct psp_runtime_data_header db_header = {0}; 333 struct psp_runtime_data_directory db_dir = {0}; 334 bool ret = false; 335 int i; 336 337 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 338 return false; 339 340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 341 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 342 343 /* read runtime db header from vram */ 344 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 345 sizeof(struct psp_runtime_data_header), false); 346 347 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 348 /* runtime db doesn't exist, exit */ 349 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 350 return false; 351 } 352 353 /* read runtime database entry from vram */ 354 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 355 sizeof(struct psp_runtime_data_directory), false); 356 357 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 358 /* invalid db entry count, exit */ 359 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 360 return false; 361 } 362 363 /* look up for requested entry type */ 364 for (i = 0; i < db_dir.entry_count && !ret; i++) { 365 if (db_dir.entry_list[i].entry_type == entry_type) { 366 switch (entry_type) { 367 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 368 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 369 /* invalid db entry size */ 370 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 371 return false; 372 } 373 /* read runtime database entry */ 374 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 375 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 376 ret = true; 377 break; 378 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 379 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 380 /* invalid db entry size */ 381 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 382 return false; 383 } 384 /* read runtime database entry */ 385 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 386 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 387 ret = true; 388 break; 389 default: 390 ret = false; 391 break; 392 } 393 } 394 } 395 396 return ret; 397 } 398 399 static int psp_sw_init(void *handle) 400 { 401 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 402 struct psp_context *psp = &adev->psp; 403 int ret; 404 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 405 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 406 struct psp_runtime_scpm_entry scpm_entry; 407 408 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 409 if (!psp->cmd) { 410 DRM_ERROR("Failed to allocate memory to command buffer!\n"); 411 ret = -ENOMEM; 412 } 413 414 adev->psp.xgmi_context.supports_extended_data = 415 !adev->gmc.xgmi.connected_to_cpu && 416 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 417 418 memset(&scpm_entry, 0, sizeof(scpm_entry)); 419 if ((psp_get_runtime_db_entry(adev, 420 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 421 &scpm_entry)) && 422 (scpm_entry.scpm_status != SCPM_DISABLE)) { 423 adev->scpm_enabled = true; 424 adev->scpm_status = scpm_entry.scpm_status; 425 } else { 426 adev->scpm_enabled = false; 427 adev->scpm_status = SCPM_DISABLE; 428 } 429 430 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 431 432 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 433 if (psp_get_runtime_db_entry(adev, 434 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 435 &boot_cfg_entry)) { 436 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 437 if ((psp->boot_cfg_bitmask) & 438 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 439 /* If psp runtime database exists, then 440 * only enable two stage memory training 441 * when TWO_STAGE_DRAM_TRAINING bit is set 442 * in runtime database 443 */ 444 mem_training_ctx->enable_mem_training = true; 445 } 446 447 } else { 448 /* If psp runtime database doesn't exist or is 449 * invalid, force enable two stage memory training 450 */ 451 mem_training_ctx->enable_mem_training = true; 452 } 453 454 if (mem_training_ctx->enable_mem_training) { 455 ret = psp_memory_training_init(psp); 456 if (ret) { 457 DRM_ERROR("Failed to initialize memory training!\n"); 458 return ret; 459 } 460 461 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 462 if (ret) { 463 DRM_ERROR("Failed to process memory training!\n"); 464 return ret; 465 } 466 } 467 468 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 469 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 470 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 471 &psp->fw_pri_bo, 472 &psp->fw_pri_mc_addr, 473 &psp->fw_pri_buf); 474 if (ret) 475 return ret; 476 477 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 478 AMDGPU_GEM_DOMAIN_VRAM | 479 AMDGPU_GEM_DOMAIN_GTT, 480 &psp->fence_buf_bo, 481 &psp->fence_buf_mc_addr, 482 &psp->fence_buf); 483 if (ret) 484 goto failed1; 485 486 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 487 AMDGPU_GEM_DOMAIN_VRAM | 488 AMDGPU_GEM_DOMAIN_GTT, 489 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 490 (void **)&psp->cmd_buf_mem); 491 if (ret) 492 goto failed2; 493 494 return 0; 495 496 failed2: 497 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 498 &psp->fence_buf_mc_addr, &psp->fence_buf); 499 failed1: 500 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 501 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 502 return ret; 503 } 504 505 static int psp_sw_fini(void *handle) 506 { 507 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 508 struct psp_context *psp = &adev->psp; 509 struct psp_gfx_cmd_resp *cmd = psp->cmd; 510 511 psp_memory_training_fini(psp); 512 513 amdgpu_ucode_release(&psp->sos_fw); 514 amdgpu_ucode_release(&psp->asd_fw); 515 amdgpu_ucode_release(&psp->ta_fw); 516 amdgpu_ucode_release(&psp->cap_fw); 517 amdgpu_ucode_release(&psp->toc_fw); 518 519 kfree(cmd); 520 cmd = NULL; 521 522 psp_free_shared_bufs(psp); 523 524 if (psp->km_ring.ring_mem) 525 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 526 &psp->km_ring.ring_mem_mc_addr, 527 (void **)&psp->km_ring.ring_mem); 528 529 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 530 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 531 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 532 &psp->fence_buf_mc_addr, &psp->fence_buf); 533 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 534 (void **)&psp->cmd_buf_mem); 535 536 return 0; 537 } 538 539 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 540 uint32_t reg_val, uint32_t mask, bool check_changed) 541 { 542 uint32_t val; 543 int i; 544 struct amdgpu_device *adev = psp->adev; 545 546 if (psp->adev->no_hw_access) 547 return 0; 548 549 for (i = 0; i < adev->usec_timeout; i++) { 550 val = RREG32(reg_index); 551 if (check_changed) { 552 if (val != reg_val) 553 return 0; 554 } else { 555 if ((val & mask) == reg_val) 556 return 0; 557 } 558 udelay(1); 559 } 560 561 return -ETIME; 562 } 563 564 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 565 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 566 { 567 uint32_t val; 568 int i; 569 struct amdgpu_device *adev = psp->adev; 570 571 if (psp->adev->no_hw_access) 572 return 0; 573 574 for (i = 0; i < msec_timeout; i++) { 575 val = RREG32(reg_index); 576 if ((val & mask) == reg_val) 577 return 0; 578 msleep(1); 579 } 580 581 return -ETIME; 582 } 583 584 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 585 { 586 switch (cmd_id) { 587 case GFX_CMD_ID_LOAD_TA: 588 return "LOAD_TA"; 589 case GFX_CMD_ID_UNLOAD_TA: 590 return "UNLOAD_TA"; 591 case GFX_CMD_ID_INVOKE_CMD: 592 return "INVOKE_CMD"; 593 case GFX_CMD_ID_LOAD_ASD: 594 return "LOAD_ASD"; 595 case GFX_CMD_ID_SETUP_TMR: 596 return "SETUP_TMR"; 597 case GFX_CMD_ID_LOAD_IP_FW: 598 return "LOAD_IP_FW"; 599 case GFX_CMD_ID_DESTROY_TMR: 600 return "DESTROY_TMR"; 601 case GFX_CMD_ID_SAVE_RESTORE: 602 return "SAVE_RESTORE_IP_FW"; 603 case GFX_CMD_ID_SETUP_VMR: 604 return "SETUP_VMR"; 605 case GFX_CMD_ID_DESTROY_VMR: 606 return "DESTROY_VMR"; 607 case GFX_CMD_ID_PROG_REG: 608 return "PROG_REG"; 609 case GFX_CMD_ID_GET_FW_ATTESTATION: 610 return "GET_FW_ATTESTATION"; 611 case GFX_CMD_ID_LOAD_TOC: 612 return "ID_LOAD_TOC"; 613 case GFX_CMD_ID_AUTOLOAD_RLC: 614 return "AUTOLOAD_RLC"; 615 case GFX_CMD_ID_BOOT_CFG: 616 return "BOOT_CFG"; 617 default: 618 return "UNKNOWN CMD"; 619 } 620 } 621 622 static int 623 psp_cmd_submit_buf(struct psp_context *psp, 624 struct amdgpu_firmware_info *ucode, 625 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 626 { 627 int ret; 628 int index; 629 int timeout = 20000; 630 bool ras_intr = false; 631 bool skip_unsupport = false; 632 633 if (psp->adev->no_hw_access) 634 return 0; 635 636 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 637 638 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 639 640 index = atomic_inc_return(&psp->fence_value); 641 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 642 if (ret) { 643 atomic_dec(&psp->fence_value); 644 goto exit; 645 } 646 647 amdgpu_device_invalidate_hdp(psp->adev, NULL); 648 while (*((unsigned int *)psp->fence_buf) != index) { 649 if (--timeout == 0) 650 break; 651 /* 652 * Shouldn't wait for timeout when err_event_athub occurs, 653 * because gpu reset thread triggered and lock resource should 654 * be released for psp resume sequence. 655 */ 656 ras_intr = amdgpu_ras_intr_triggered(); 657 if (ras_intr) 658 break; 659 usleep_range(10, 100); 660 amdgpu_device_invalidate_hdp(psp->adev, NULL); 661 } 662 663 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 664 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 665 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 666 667 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 668 669 /* In some cases, psp response status is not 0 even there is no 670 * problem while the command is submitted. Some version of PSP FW 671 * doesn't write 0 to that field. 672 * So here we would like to only print a warning instead of an error 673 * during psp initialization to avoid breaking hw_init and it doesn't 674 * return -EINVAL. 675 */ 676 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 677 if (ucode) 678 DRM_WARN("failed to load ucode %s(0x%X) ", 679 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 680 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 681 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 682 psp->cmd_buf_mem->resp.status); 683 /* If any firmware (including CAP) load fails under SRIOV, it should 684 * return failure to stop the VF from initializing. 685 * Also return failure in case of timeout 686 */ 687 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 688 ret = -EINVAL; 689 goto exit; 690 } 691 } 692 693 if (ucode) { 694 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 695 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 696 } 697 698 exit: 699 return ret; 700 } 701 702 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 703 { 704 struct psp_gfx_cmd_resp *cmd = psp->cmd; 705 706 mutex_lock(&psp->mutex); 707 708 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 709 710 return cmd; 711 } 712 713 static void release_psp_cmd_buf(struct psp_context *psp) 714 { 715 mutex_unlock(&psp->mutex); 716 } 717 718 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 719 struct psp_gfx_cmd_resp *cmd, 720 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 721 { 722 struct amdgpu_device *adev = psp->adev; 723 uint32_t size = 0; 724 uint64_t tmr_pa = 0; 725 726 if (tmr_bo) { 727 size = amdgpu_bo_size(tmr_bo); 728 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 729 } 730 731 if (amdgpu_sriov_vf(psp->adev)) 732 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 733 else 734 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 735 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 736 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 737 cmd->cmd.cmd_setup_tmr.buf_size = size; 738 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 739 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 740 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 741 } 742 743 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 744 uint64_t pri_buf_mc, uint32_t size) 745 { 746 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 747 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 748 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 749 cmd->cmd.cmd_load_toc.toc_size = size; 750 } 751 752 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 753 static int psp_load_toc(struct psp_context *psp, 754 uint32_t *tmr_size) 755 { 756 int ret; 757 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 758 759 /* Copy toc to psp firmware private buffer */ 760 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 761 762 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 763 764 ret = psp_cmd_submit_buf(psp, NULL, cmd, 765 psp->fence_buf_mc_addr); 766 if (!ret) 767 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 768 769 release_psp_cmd_buf(psp); 770 771 return ret; 772 } 773 774 static bool psp_boottime_tmr(struct psp_context *psp) 775 { 776 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 777 case IP_VERSION(13, 0, 6): 778 return true; 779 default: 780 return false; 781 } 782 } 783 784 /* Set up Trusted Memory Region */ 785 static int psp_tmr_init(struct psp_context *psp) 786 { 787 int ret = 0; 788 int tmr_size; 789 void *tmr_buf; 790 void **pptr; 791 792 /* 793 * According to HW engineer, they prefer the TMR address be "naturally 794 * aligned" , e.g. the start address be an integer divide of TMR size. 795 * 796 * Note: this memory need be reserved till the driver 797 * uninitializes. 798 */ 799 tmr_size = PSP_TMR_SIZE(psp->adev); 800 801 /* For ASICs support RLC autoload, psp will parse the toc 802 * and calculate the total size of TMR needed 803 */ 804 if (!amdgpu_sriov_vf(psp->adev) && 805 psp->toc.start_addr && 806 psp->toc.size_bytes && 807 psp->fw_pri_buf) { 808 ret = psp_load_toc(psp, &tmr_size); 809 if (ret) { 810 DRM_ERROR("Failed to load toc\n"); 811 return ret; 812 } 813 } 814 815 if (!psp->tmr_bo) { 816 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 817 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 818 PSP_TMR_ALIGNMENT, 819 AMDGPU_HAS_VRAM(psp->adev) ? 820 AMDGPU_GEM_DOMAIN_VRAM : 821 AMDGPU_GEM_DOMAIN_GTT, 822 &psp->tmr_bo, &psp->tmr_mc_addr, 823 pptr); 824 } 825 826 return ret; 827 } 828 829 static bool psp_skip_tmr(struct psp_context *psp) 830 { 831 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 832 case IP_VERSION(11, 0, 9): 833 case IP_VERSION(11, 0, 7): 834 case IP_VERSION(13, 0, 2): 835 case IP_VERSION(13, 0, 6): 836 case IP_VERSION(13, 0, 10): 837 return true; 838 default: 839 return false; 840 } 841 } 842 843 static int psp_tmr_load(struct psp_context *psp) 844 { 845 int ret; 846 struct psp_gfx_cmd_resp *cmd; 847 848 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 849 * Already set up by host driver. 850 */ 851 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 852 return 0; 853 854 cmd = acquire_psp_cmd_buf(psp); 855 856 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 857 if (psp->tmr_bo) 858 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 859 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 860 861 ret = psp_cmd_submit_buf(psp, NULL, cmd, 862 psp->fence_buf_mc_addr); 863 864 release_psp_cmd_buf(psp); 865 866 return ret; 867 } 868 869 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 870 struct psp_gfx_cmd_resp *cmd) 871 { 872 if (amdgpu_sriov_vf(psp->adev)) 873 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 874 else 875 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 876 } 877 878 static int psp_tmr_unload(struct psp_context *psp) 879 { 880 int ret; 881 struct psp_gfx_cmd_resp *cmd; 882 883 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 884 * as TMR is not loaded at all 885 */ 886 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 887 return 0; 888 889 cmd = acquire_psp_cmd_buf(psp); 890 891 psp_prep_tmr_unload_cmd_buf(psp, cmd); 892 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 893 894 ret = psp_cmd_submit_buf(psp, NULL, cmd, 895 psp->fence_buf_mc_addr); 896 897 release_psp_cmd_buf(psp); 898 899 return ret; 900 } 901 902 static int psp_tmr_terminate(struct psp_context *psp) 903 { 904 return psp_tmr_unload(psp); 905 } 906 907 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 908 uint64_t *output_ptr) 909 { 910 int ret; 911 struct psp_gfx_cmd_resp *cmd; 912 913 if (!output_ptr) 914 return -EINVAL; 915 916 if (amdgpu_sriov_vf(psp->adev)) 917 return 0; 918 919 cmd = acquire_psp_cmd_buf(psp); 920 921 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 922 923 ret = psp_cmd_submit_buf(psp, NULL, cmd, 924 psp->fence_buf_mc_addr); 925 926 if (!ret) { 927 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 928 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 929 } 930 931 release_psp_cmd_buf(psp); 932 933 return ret; 934 } 935 936 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 937 { 938 struct psp_context *psp = &adev->psp; 939 struct psp_gfx_cmd_resp *cmd; 940 int ret; 941 942 if (amdgpu_sriov_vf(adev)) 943 return 0; 944 945 cmd = acquire_psp_cmd_buf(psp); 946 947 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 948 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 949 950 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 951 if (!ret) { 952 *boot_cfg = 953 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 954 } 955 956 release_psp_cmd_buf(psp); 957 958 return ret; 959 } 960 961 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 962 { 963 int ret; 964 struct psp_context *psp = &adev->psp; 965 struct psp_gfx_cmd_resp *cmd; 966 967 if (amdgpu_sriov_vf(adev)) 968 return 0; 969 970 cmd = acquire_psp_cmd_buf(psp); 971 972 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 973 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 974 cmd->cmd.boot_cfg.boot_config = boot_cfg; 975 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 976 977 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 978 979 release_psp_cmd_buf(psp); 980 981 return ret; 982 } 983 984 static int psp_rl_load(struct amdgpu_device *adev) 985 { 986 int ret; 987 struct psp_context *psp = &adev->psp; 988 struct psp_gfx_cmd_resp *cmd; 989 990 if (!is_psp_fw_valid(psp->rl)) 991 return 0; 992 993 cmd = acquire_psp_cmd_buf(psp); 994 995 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 996 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 997 998 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 999 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1000 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1001 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1002 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1003 1004 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1005 1006 release_psp_cmd_buf(psp); 1007 1008 return ret; 1009 } 1010 1011 int psp_spatial_partition(struct psp_context *psp, int mode) 1012 { 1013 struct psp_gfx_cmd_resp *cmd; 1014 int ret; 1015 1016 if (amdgpu_sriov_vf(psp->adev)) 1017 return 0; 1018 1019 cmd = acquire_psp_cmd_buf(psp); 1020 1021 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1022 cmd->cmd.cmd_spatial_part.mode = mode; 1023 1024 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1025 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1026 1027 release_psp_cmd_buf(psp); 1028 1029 return ret; 1030 } 1031 1032 static int psp_asd_initialize(struct psp_context *psp) 1033 { 1034 int ret; 1035 1036 /* If PSP version doesn't match ASD version, asd loading will be failed. 1037 * add workaround to bypass it for sriov now. 1038 * TODO: add version check to make it common 1039 */ 1040 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1041 return 0; 1042 1043 psp->asd_context.mem_context.shared_mc_addr = 0; 1044 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1045 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1046 1047 ret = psp_ta_load(psp, &psp->asd_context); 1048 if (!ret) 1049 psp->asd_context.initialized = true; 1050 1051 return ret; 1052 } 1053 1054 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1055 uint32_t session_id) 1056 { 1057 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1058 cmd->cmd.cmd_unload_ta.session_id = session_id; 1059 } 1060 1061 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1062 { 1063 int ret; 1064 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1065 1066 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1067 1068 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1069 1070 context->resp_status = cmd->resp.status; 1071 1072 release_psp_cmd_buf(psp); 1073 1074 return ret; 1075 } 1076 1077 static int psp_asd_terminate(struct psp_context *psp) 1078 { 1079 int ret; 1080 1081 if (amdgpu_sriov_vf(psp->adev)) 1082 return 0; 1083 1084 if (!psp->asd_context.initialized) 1085 return 0; 1086 1087 ret = psp_ta_unload(psp, &psp->asd_context); 1088 if (!ret) 1089 psp->asd_context.initialized = false; 1090 1091 return ret; 1092 } 1093 1094 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1095 uint32_t id, uint32_t value) 1096 { 1097 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1098 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1099 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1100 } 1101 1102 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1103 uint32_t value) 1104 { 1105 struct psp_gfx_cmd_resp *cmd; 1106 int ret = 0; 1107 1108 if (reg >= PSP_REG_LAST) 1109 return -EINVAL; 1110 1111 cmd = acquire_psp_cmd_buf(psp); 1112 1113 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1114 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1115 if (ret) 1116 DRM_ERROR("PSP failed to program reg id %d", reg); 1117 1118 release_psp_cmd_buf(psp); 1119 1120 return ret; 1121 } 1122 1123 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1124 uint64_t ta_bin_mc, 1125 struct ta_context *context) 1126 { 1127 cmd->cmd_id = context->ta_load_type; 1128 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1129 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1130 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1131 1132 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1133 lower_32_bits(context->mem_context.shared_mc_addr); 1134 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1135 upper_32_bits(context->mem_context.shared_mc_addr); 1136 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1137 } 1138 1139 int psp_ta_init_shared_buf(struct psp_context *psp, 1140 struct ta_mem_context *mem_ctx) 1141 { 1142 /* 1143 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1144 * physical) for ta to host memory 1145 */ 1146 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1147 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1148 AMDGPU_GEM_DOMAIN_GTT, 1149 &mem_ctx->shared_bo, 1150 &mem_ctx->shared_mc_addr, 1151 &mem_ctx->shared_buf); 1152 } 1153 1154 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1155 uint32_t ta_cmd_id, 1156 uint32_t session_id) 1157 { 1158 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1159 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1160 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1161 } 1162 1163 int psp_ta_invoke(struct psp_context *psp, 1164 uint32_t ta_cmd_id, 1165 struct ta_context *context) 1166 { 1167 int ret; 1168 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1169 1170 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1171 1172 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1173 psp->fence_buf_mc_addr); 1174 1175 context->resp_status = cmd->resp.status; 1176 1177 release_psp_cmd_buf(psp); 1178 1179 return ret; 1180 } 1181 1182 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1183 { 1184 int ret; 1185 struct psp_gfx_cmd_resp *cmd; 1186 1187 cmd = acquire_psp_cmd_buf(psp); 1188 1189 psp_copy_fw(psp, context->bin_desc.start_addr, 1190 context->bin_desc.size_bytes); 1191 1192 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1193 1194 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1195 psp->fence_buf_mc_addr); 1196 1197 context->resp_status = cmd->resp.status; 1198 1199 if (!ret) 1200 context->session_id = cmd->resp.session_id; 1201 1202 release_psp_cmd_buf(psp); 1203 1204 return ret; 1205 } 1206 1207 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1208 { 1209 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1210 } 1211 1212 int psp_xgmi_terminate(struct psp_context *psp) 1213 { 1214 int ret; 1215 struct amdgpu_device *adev = psp->adev; 1216 1217 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1218 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1219 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1220 adev->gmc.xgmi.connected_to_cpu)) 1221 return 0; 1222 1223 if (!psp->xgmi_context.context.initialized) 1224 return 0; 1225 1226 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1227 1228 psp->xgmi_context.context.initialized = false; 1229 1230 return ret; 1231 } 1232 1233 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1234 { 1235 struct ta_xgmi_shared_memory *xgmi_cmd; 1236 int ret; 1237 1238 if (!psp->ta_fw || 1239 !psp->xgmi_context.context.bin_desc.size_bytes || 1240 !psp->xgmi_context.context.bin_desc.start_addr) 1241 return -ENOENT; 1242 1243 if (!load_ta) 1244 goto invoke; 1245 1246 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1247 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1248 1249 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1250 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1251 if (ret) 1252 return ret; 1253 } 1254 1255 /* Load XGMI TA */ 1256 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1257 if (!ret) 1258 psp->xgmi_context.context.initialized = true; 1259 else 1260 return ret; 1261 1262 invoke: 1263 /* Initialize XGMI session */ 1264 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1265 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1266 xgmi_cmd->flag_extend_link_record = set_extended_data; 1267 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1268 1269 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1270 /* note down the capbility flag for XGMI TA */ 1271 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1272 1273 return ret; 1274 } 1275 1276 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1277 { 1278 struct ta_xgmi_shared_memory *xgmi_cmd; 1279 int ret; 1280 1281 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1282 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1283 1284 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1285 1286 /* Invoke xgmi ta to get hive id */ 1287 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1288 if (ret) 1289 return ret; 1290 1291 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1292 1293 return 0; 1294 } 1295 1296 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1297 { 1298 struct ta_xgmi_shared_memory *xgmi_cmd; 1299 int ret; 1300 1301 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1302 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1303 1304 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1305 1306 /* Invoke xgmi ta to get the node id */ 1307 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1308 if (ret) 1309 return ret; 1310 1311 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1312 1313 return 0; 1314 } 1315 1316 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1317 { 1318 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1319 IP_VERSION(13, 0, 2) && 1320 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1321 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1322 IP_VERSION(13, 0, 6); 1323 } 1324 1325 /* 1326 * Chips that support extended topology information require the driver to 1327 * reflect topology information in the opposite direction. This is 1328 * because the TA has already exceeded its link record limit and if the 1329 * TA holds bi-directional information, the driver would have to do 1330 * multiple fetches instead of just two. 1331 */ 1332 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1333 struct psp_xgmi_node_info node_info) 1334 { 1335 struct amdgpu_device *mirror_adev; 1336 struct amdgpu_hive_info *hive; 1337 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1338 uint64_t dst_node_id = node_info.node_id; 1339 uint8_t dst_num_hops = node_info.num_hops; 1340 uint8_t dst_num_links = node_info.num_links; 1341 1342 hive = amdgpu_get_xgmi_hive(psp->adev); 1343 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1344 struct psp_xgmi_topology_info *mirror_top_info; 1345 int j; 1346 1347 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1348 continue; 1349 1350 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1351 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1352 if (mirror_top_info->nodes[j].node_id != src_node_id) 1353 continue; 1354 1355 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1356 /* 1357 * prevent 0 num_links value re-reflection since reflection 1358 * criteria is based on num_hops (direct or indirect). 1359 * 1360 */ 1361 if (dst_num_links) 1362 mirror_top_info->nodes[j].num_links = dst_num_links; 1363 1364 break; 1365 } 1366 1367 break; 1368 } 1369 1370 amdgpu_put_xgmi_hive(hive); 1371 } 1372 1373 int psp_xgmi_get_topology_info(struct psp_context *psp, 1374 int number_devices, 1375 struct psp_xgmi_topology_info *topology, 1376 bool get_extended_data) 1377 { 1378 struct ta_xgmi_shared_memory *xgmi_cmd; 1379 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1380 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1381 int i; 1382 int ret; 1383 1384 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1385 return -EINVAL; 1386 1387 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1388 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1389 xgmi_cmd->flag_extend_link_record = get_extended_data; 1390 1391 /* Fill in the shared memory with topology information as input */ 1392 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1393 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1394 topology_info_input->num_nodes = number_devices; 1395 1396 for (i = 0; i < topology_info_input->num_nodes; i++) { 1397 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1398 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1399 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1400 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1401 } 1402 1403 /* Invoke xgmi ta to get the topology information */ 1404 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1405 if (ret) 1406 return ret; 1407 1408 /* Read the output topology information from the shared memory */ 1409 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1410 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1411 for (i = 0; i < topology->num_nodes; i++) { 1412 /* extended data will either be 0 or equal to non-extended data */ 1413 if (topology_info_output->nodes[i].num_hops) 1414 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1415 1416 /* non-extended data gets everything here so no need to update */ 1417 if (!get_extended_data) { 1418 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1419 topology->nodes[i].is_sharing_enabled = 1420 topology_info_output->nodes[i].is_sharing_enabled; 1421 topology->nodes[i].sdma_engine = 1422 topology_info_output->nodes[i].sdma_engine; 1423 } 1424 1425 } 1426 1427 /* Invoke xgmi ta again to get the link information */ 1428 if (psp_xgmi_peer_link_info_supported(psp)) { 1429 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1430 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1431 bool requires_reflection = 1432 (psp->xgmi_context.supports_extended_data && 1433 get_extended_data) || 1434 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1435 IP_VERSION(13, 0, 6); 1436 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1437 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1438 1439 /* popluate the shared output buffer rather than the cmd input buffer 1440 * with node_ids as the input for GET_PEER_LINKS command execution. 1441 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1442 * The same requirement for GET_EXTEND_PEER_LINKS command. 1443 */ 1444 if (ta_port_num_support) { 1445 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1446 1447 for (i = 0; i < topology->num_nodes; i++) 1448 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1449 1450 link_extend_info_output->num_nodes = topology->num_nodes; 1451 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1452 } else { 1453 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1454 1455 for (i = 0; i < topology->num_nodes; i++) 1456 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1457 1458 link_info_output->num_nodes = topology->num_nodes; 1459 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1460 } 1461 1462 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1463 if (ret) 1464 return ret; 1465 1466 for (i = 0; i < topology->num_nodes; i++) { 1467 uint8_t node_num_links = ta_port_num_support ? 1468 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1469 /* accumulate num_links on extended data */ 1470 if (get_extended_data) { 1471 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1472 } else { 1473 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1474 topology->nodes[i].num_links : node_num_links; 1475 } 1476 /* popluate the connected port num info if supported and available */ 1477 if (ta_port_num_support && topology->nodes[i].num_links) { 1478 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1479 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1480 } 1481 1482 /* reflect the topology information for bi-directionality */ 1483 if (requires_reflection && topology->nodes[i].num_hops) 1484 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1485 } 1486 } 1487 1488 return 0; 1489 } 1490 1491 int psp_xgmi_set_topology_info(struct psp_context *psp, 1492 int number_devices, 1493 struct psp_xgmi_topology_info *topology) 1494 { 1495 struct ta_xgmi_shared_memory *xgmi_cmd; 1496 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1497 int i; 1498 1499 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1500 return -EINVAL; 1501 1502 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1503 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1504 1505 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1506 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1507 topology_info_input->num_nodes = number_devices; 1508 1509 for (i = 0; i < topology_info_input->num_nodes; i++) { 1510 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1511 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1512 topology_info_input->nodes[i].is_sharing_enabled = 1; 1513 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1514 } 1515 1516 /* Invoke xgmi ta to set topology information */ 1517 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1518 } 1519 1520 // ras begin 1521 static void psp_ras_ta_check_status(struct psp_context *psp) 1522 { 1523 struct ta_ras_shared_memory *ras_cmd = 1524 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1525 1526 switch (ras_cmd->ras_status) { 1527 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1528 dev_warn(psp->adev->dev, 1529 "RAS WARNING: cmd failed due to unsupported ip\n"); 1530 break; 1531 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1532 dev_warn(psp->adev->dev, 1533 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1534 break; 1535 case TA_RAS_STATUS__SUCCESS: 1536 break; 1537 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1538 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1539 dev_warn(psp->adev->dev, 1540 "RAS WARNING: Inject error to critical region is not allowed\n"); 1541 break; 1542 default: 1543 dev_warn(psp->adev->dev, 1544 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1545 break; 1546 } 1547 } 1548 1549 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1550 { 1551 struct ta_ras_shared_memory *ras_cmd; 1552 int ret; 1553 1554 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1555 1556 /* 1557 * TODO: bypass the loading in sriov for now 1558 */ 1559 if (amdgpu_sriov_vf(psp->adev)) 1560 return 0; 1561 1562 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1563 1564 if (amdgpu_ras_intr_triggered()) 1565 return ret; 1566 1567 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1568 DRM_WARN("RAS: Unsupported Interface"); 1569 return -EINVAL; 1570 } 1571 1572 if (!ret) { 1573 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1574 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1575 1576 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1577 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1578 dev_warn(psp->adev->dev, 1579 "RAS internal register access blocked\n"); 1580 1581 psp_ras_ta_check_status(psp); 1582 } 1583 1584 return ret; 1585 } 1586 1587 int psp_ras_enable_features(struct psp_context *psp, 1588 union ta_ras_cmd_input *info, bool enable) 1589 { 1590 struct ta_ras_shared_memory *ras_cmd; 1591 int ret; 1592 1593 if (!psp->ras_context.context.initialized) 1594 return -EINVAL; 1595 1596 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1597 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1598 1599 if (enable) 1600 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1601 else 1602 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1603 1604 ras_cmd->ras_in_message = *info; 1605 1606 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1607 if (ret) 1608 return -EINVAL; 1609 1610 return 0; 1611 } 1612 1613 int psp_ras_terminate(struct psp_context *psp) 1614 { 1615 int ret; 1616 1617 /* 1618 * TODO: bypass the terminate in sriov for now 1619 */ 1620 if (amdgpu_sriov_vf(psp->adev)) 1621 return 0; 1622 1623 if (!psp->ras_context.context.initialized) 1624 return 0; 1625 1626 ret = psp_ta_unload(psp, &psp->ras_context.context); 1627 1628 psp->ras_context.context.initialized = false; 1629 1630 return ret; 1631 } 1632 1633 int psp_ras_initialize(struct psp_context *psp) 1634 { 1635 int ret; 1636 uint32_t boot_cfg = 0xFF; 1637 struct amdgpu_device *adev = psp->adev; 1638 struct ta_ras_shared_memory *ras_cmd; 1639 1640 /* 1641 * TODO: bypass the initialize in sriov for now 1642 */ 1643 if (amdgpu_sriov_vf(adev)) 1644 return 0; 1645 1646 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1647 !adev->psp.ras_context.context.bin_desc.start_addr) { 1648 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1649 return 0; 1650 } 1651 1652 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1653 /* query GECC enablement status from boot config 1654 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1655 */ 1656 ret = psp_boot_config_get(adev, &boot_cfg); 1657 if (ret) 1658 dev_warn(adev->dev, "PSP get boot config failed\n"); 1659 1660 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1661 if (!boot_cfg) { 1662 dev_info(adev->dev, "GECC is disabled\n"); 1663 } else { 1664 /* disable GECC in next boot cycle if ras is 1665 * disabled by module parameter amdgpu_ras_enable 1666 * and/or amdgpu_ras_mask, or boot_config_get call 1667 * is failed 1668 */ 1669 ret = psp_boot_config_set(adev, 0); 1670 if (ret) 1671 dev_warn(adev->dev, "PSP set boot config failed\n"); 1672 else 1673 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1674 } 1675 } else { 1676 if (boot_cfg == 1) { 1677 dev_info(adev->dev, "GECC is enabled\n"); 1678 } else { 1679 /* enable GECC in next boot cycle if it is disabled 1680 * in boot config, or force enable GECC if failed to 1681 * get boot configuration 1682 */ 1683 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1684 if (ret) 1685 dev_warn(adev->dev, "PSP set boot config failed\n"); 1686 else 1687 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1688 } 1689 } 1690 } 1691 1692 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1693 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1694 1695 if (!psp->ras_context.context.mem_context.shared_buf) { 1696 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1697 if (ret) 1698 return ret; 1699 } 1700 1701 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1702 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1703 1704 if (amdgpu_ras_is_poison_mode_supported(adev)) 1705 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1706 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1707 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1708 ras_cmd->ras_in_message.init_flags.xcc_mask = 1709 adev->gfx.xcc_mask; 1710 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1711 1712 ret = psp_ta_load(psp, &psp->ras_context.context); 1713 1714 if (!ret && !ras_cmd->ras_status) 1715 psp->ras_context.context.initialized = true; 1716 else { 1717 if (ras_cmd->ras_status) 1718 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1719 1720 /* fail to load RAS TA */ 1721 psp->ras_context.context.initialized = false; 1722 } 1723 1724 return ret; 1725 } 1726 1727 int psp_ras_trigger_error(struct psp_context *psp, 1728 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1729 { 1730 struct ta_ras_shared_memory *ras_cmd; 1731 struct amdgpu_device *adev = psp->adev; 1732 int ret; 1733 uint32_t dev_mask; 1734 1735 if (!psp->ras_context.context.initialized) 1736 return -EINVAL; 1737 1738 switch (info->block_id) { 1739 case TA_RAS_BLOCK__GFX: 1740 dev_mask = GET_MASK(GC, instance_mask); 1741 break; 1742 case TA_RAS_BLOCK__SDMA: 1743 dev_mask = GET_MASK(SDMA0, instance_mask); 1744 break; 1745 case TA_RAS_BLOCK__VCN: 1746 case TA_RAS_BLOCK__JPEG: 1747 dev_mask = GET_MASK(VCN, instance_mask); 1748 break; 1749 default: 1750 dev_mask = instance_mask; 1751 break; 1752 } 1753 1754 /* reuse sub_block_index for backward compatibility */ 1755 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1756 dev_mask &= AMDGPU_RAS_INST_MASK; 1757 info->sub_block_index |= dev_mask; 1758 1759 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1760 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1761 1762 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1763 ras_cmd->ras_in_message.trigger_error = *info; 1764 1765 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1766 if (ret) 1767 return -EINVAL; 1768 1769 /* If err_event_athub occurs error inject was successful, however 1770 * return status from TA is no long reliable 1771 */ 1772 if (amdgpu_ras_intr_triggered()) 1773 return 0; 1774 1775 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1776 return -EACCES; 1777 else if (ras_cmd->ras_status) 1778 return -EINVAL; 1779 1780 return 0; 1781 } 1782 // ras end 1783 1784 // HDCP start 1785 static int psp_hdcp_initialize(struct psp_context *psp) 1786 { 1787 int ret; 1788 1789 /* 1790 * TODO: bypass the initialize in sriov for now 1791 */ 1792 if (amdgpu_sriov_vf(psp->adev)) 1793 return 0; 1794 1795 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1796 !psp->hdcp_context.context.bin_desc.start_addr) { 1797 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1798 return 0; 1799 } 1800 1801 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1802 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1803 1804 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1805 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1806 if (ret) 1807 return ret; 1808 } 1809 1810 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1811 if (!ret) { 1812 psp->hdcp_context.context.initialized = true; 1813 mutex_init(&psp->hdcp_context.mutex); 1814 } 1815 1816 return ret; 1817 } 1818 1819 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1820 { 1821 /* 1822 * TODO: bypass the loading in sriov for now 1823 */ 1824 if (amdgpu_sriov_vf(psp->adev)) 1825 return 0; 1826 1827 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1828 } 1829 1830 static int psp_hdcp_terminate(struct psp_context *psp) 1831 { 1832 int ret; 1833 1834 /* 1835 * TODO: bypass the terminate in sriov for now 1836 */ 1837 if (amdgpu_sriov_vf(psp->adev)) 1838 return 0; 1839 1840 if (!psp->hdcp_context.context.initialized) 1841 return 0; 1842 1843 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1844 1845 psp->hdcp_context.context.initialized = false; 1846 1847 return ret; 1848 } 1849 // HDCP end 1850 1851 // DTM start 1852 static int psp_dtm_initialize(struct psp_context *psp) 1853 { 1854 int ret; 1855 1856 /* 1857 * TODO: bypass the initialize in sriov for now 1858 */ 1859 if (amdgpu_sriov_vf(psp->adev)) 1860 return 0; 1861 1862 if (!psp->dtm_context.context.bin_desc.size_bytes || 1863 !psp->dtm_context.context.bin_desc.start_addr) { 1864 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1865 return 0; 1866 } 1867 1868 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1869 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1870 1871 if (!psp->dtm_context.context.mem_context.shared_buf) { 1872 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1873 if (ret) 1874 return ret; 1875 } 1876 1877 ret = psp_ta_load(psp, &psp->dtm_context.context); 1878 if (!ret) { 1879 psp->dtm_context.context.initialized = true; 1880 mutex_init(&psp->dtm_context.mutex); 1881 } 1882 1883 return ret; 1884 } 1885 1886 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1887 { 1888 /* 1889 * TODO: bypass the loading in sriov for now 1890 */ 1891 if (amdgpu_sriov_vf(psp->adev)) 1892 return 0; 1893 1894 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1895 } 1896 1897 static int psp_dtm_terminate(struct psp_context *psp) 1898 { 1899 int ret; 1900 1901 /* 1902 * TODO: bypass the terminate in sriov for now 1903 */ 1904 if (amdgpu_sriov_vf(psp->adev)) 1905 return 0; 1906 1907 if (!psp->dtm_context.context.initialized) 1908 return 0; 1909 1910 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1911 1912 psp->dtm_context.context.initialized = false; 1913 1914 return ret; 1915 } 1916 // DTM end 1917 1918 // RAP start 1919 static int psp_rap_initialize(struct psp_context *psp) 1920 { 1921 int ret; 1922 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1923 1924 /* 1925 * TODO: bypass the initialize in sriov for now 1926 */ 1927 if (amdgpu_sriov_vf(psp->adev)) 1928 return 0; 1929 1930 if (!psp->rap_context.context.bin_desc.size_bytes || 1931 !psp->rap_context.context.bin_desc.start_addr) { 1932 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1933 return 0; 1934 } 1935 1936 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 1937 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1938 1939 if (!psp->rap_context.context.mem_context.shared_buf) { 1940 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 1941 if (ret) 1942 return ret; 1943 } 1944 1945 ret = psp_ta_load(psp, &psp->rap_context.context); 1946 if (!ret) { 1947 psp->rap_context.context.initialized = true; 1948 mutex_init(&psp->rap_context.mutex); 1949 } else 1950 return ret; 1951 1952 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 1953 if (ret || status != TA_RAP_STATUS__SUCCESS) { 1954 psp_rap_terminate(psp); 1955 /* free rap shared memory */ 1956 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 1957 1958 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 1959 ret, status); 1960 1961 return ret; 1962 } 1963 1964 return 0; 1965 } 1966 1967 static int psp_rap_terminate(struct psp_context *psp) 1968 { 1969 int ret; 1970 1971 if (!psp->rap_context.context.initialized) 1972 return 0; 1973 1974 ret = psp_ta_unload(psp, &psp->rap_context.context); 1975 1976 psp->rap_context.context.initialized = false; 1977 1978 return ret; 1979 } 1980 1981 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 1982 { 1983 struct ta_rap_shared_memory *rap_cmd; 1984 int ret = 0; 1985 1986 if (!psp->rap_context.context.initialized) 1987 return 0; 1988 1989 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1990 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1991 return -EINVAL; 1992 1993 mutex_lock(&psp->rap_context.mutex); 1994 1995 rap_cmd = (struct ta_rap_shared_memory *) 1996 psp->rap_context.context.mem_context.shared_buf; 1997 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 1998 1999 rap_cmd->cmd_id = ta_cmd_id; 2000 rap_cmd->validation_method_id = METHOD_A; 2001 2002 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2003 if (ret) 2004 goto out_unlock; 2005 2006 if (status) 2007 *status = rap_cmd->rap_status; 2008 2009 out_unlock: 2010 mutex_unlock(&psp->rap_context.mutex); 2011 2012 return ret; 2013 } 2014 // RAP end 2015 2016 /* securedisplay start */ 2017 static int psp_securedisplay_initialize(struct psp_context *psp) 2018 { 2019 int ret; 2020 struct ta_securedisplay_cmd *securedisplay_cmd; 2021 2022 /* 2023 * TODO: bypass the initialize in sriov for now 2024 */ 2025 if (amdgpu_sriov_vf(psp->adev)) 2026 return 0; 2027 2028 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2029 !psp->securedisplay_context.context.bin_desc.start_addr) { 2030 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2031 return 0; 2032 } 2033 2034 psp->securedisplay_context.context.mem_context.shared_mem_size = 2035 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2036 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2037 2038 if (!psp->securedisplay_context.context.initialized) { 2039 ret = psp_ta_init_shared_buf(psp, 2040 &psp->securedisplay_context.context.mem_context); 2041 if (ret) 2042 return ret; 2043 } 2044 2045 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2046 if (!ret) { 2047 psp->securedisplay_context.context.initialized = true; 2048 mutex_init(&psp->securedisplay_context.mutex); 2049 } else 2050 return ret; 2051 2052 mutex_lock(&psp->securedisplay_context.mutex); 2053 2054 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2055 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2056 2057 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2058 2059 mutex_unlock(&psp->securedisplay_context.mutex); 2060 2061 if (ret) { 2062 psp_securedisplay_terminate(psp); 2063 /* free securedisplay shared memory */ 2064 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2065 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2066 return -EINVAL; 2067 } 2068 2069 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2070 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2071 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2072 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2073 /* don't try again */ 2074 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2075 } 2076 2077 return 0; 2078 } 2079 2080 static int psp_securedisplay_terminate(struct psp_context *psp) 2081 { 2082 int ret; 2083 2084 /* 2085 * TODO:bypass the terminate in sriov for now 2086 */ 2087 if (amdgpu_sriov_vf(psp->adev)) 2088 return 0; 2089 2090 if (!psp->securedisplay_context.context.initialized) 2091 return 0; 2092 2093 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2094 2095 psp->securedisplay_context.context.initialized = false; 2096 2097 return ret; 2098 } 2099 2100 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2101 { 2102 int ret; 2103 2104 if (!psp->securedisplay_context.context.initialized) 2105 return -EINVAL; 2106 2107 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2108 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 2109 return -EINVAL; 2110 2111 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2112 2113 return ret; 2114 } 2115 /* SECUREDISPLAY end */ 2116 2117 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2118 { 2119 struct psp_context *psp = &adev->psp; 2120 int ret = 0; 2121 2122 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2123 ret = psp->funcs->wait_for_bootloader(psp); 2124 2125 return ret; 2126 } 2127 2128 int amdgpu_psp_query_boot_status(struct amdgpu_device *adev) 2129 { 2130 struct psp_context *psp = &adev->psp; 2131 int ret = 0; 2132 2133 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2134 return 0; 2135 2136 if (psp->funcs && 2137 psp->funcs->query_boot_status) 2138 ret = psp->funcs->query_boot_status(psp); 2139 2140 return ret; 2141 } 2142 2143 static int psp_hw_start(struct psp_context *psp) 2144 { 2145 struct amdgpu_device *adev = psp->adev; 2146 int ret; 2147 2148 if (!amdgpu_sriov_vf(adev)) { 2149 if ((is_psp_fw_valid(psp->kdb)) && 2150 (psp->funcs->bootloader_load_kdb != NULL)) { 2151 ret = psp_bootloader_load_kdb(psp); 2152 if (ret) { 2153 DRM_ERROR("PSP load kdb failed!\n"); 2154 return ret; 2155 } 2156 } 2157 2158 if ((is_psp_fw_valid(psp->spl)) && 2159 (psp->funcs->bootloader_load_spl != NULL)) { 2160 ret = psp_bootloader_load_spl(psp); 2161 if (ret) { 2162 DRM_ERROR("PSP load spl failed!\n"); 2163 return ret; 2164 } 2165 } 2166 2167 if ((is_psp_fw_valid(psp->sys)) && 2168 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2169 ret = psp_bootloader_load_sysdrv(psp); 2170 if (ret) { 2171 DRM_ERROR("PSP load sys drv failed!\n"); 2172 return ret; 2173 } 2174 } 2175 2176 if ((is_psp_fw_valid(psp->soc_drv)) && 2177 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2178 ret = psp_bootloader_load_soc_drv(psp); 2179 if (ret) { 2180 DRM_ERROR("PSP load soc drv failed!\n"); 2181 return ret; 2182 } 2183 } 2184 2185 if ((is_psp_fw_valid(psp->intf_drv)) && 2186 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2187 ret = psp_bootloader_load_intf_drv(psp); 2188 if (ret) { 2189 DRM_ERROR("PSP load intf drv failed!\n"); 2190 return ret; 2191 } 2192 } 2193 2194 if ((is_psp_fw_valid(psp->dbg_drv)) && 2195 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2196 ret = psp_bootloader_load_dbg_drv(psp); 2197 if (ret) { 2198 DRM_ERROR("PSP load dbg drv failed!\n"); 2199 return ret; 2200 } 2201 } 2202 2203 if ((is_psp_fw_valid(psp->ras_drv)) && 2204 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2205 ret = psp_bootloader_load_ras_drv(psp); 2206 if (ret) { 2207 DRM_ERROR("PSP load ras_drv failed!\n"); 2208 return ret; 2209 } 2210 } 2211 2212 if ((is_psp_fw_valid(psp->sos)) && 2213 (psp->funcs->bootloader_load_sos != NULL)) { 2214 ret = psp_bootloader_load_sos(psp); 2215 if (ret) { 2216 DRM_ERROR("PSP load sos failed!\n"); 2217 return ret; 2218 } 2219 } 2220 } 2221 2222 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2223 if (ret) { 2224 DRM_ERROR("PSP create ring failed!\n"); 2225 return ret; 2226 } 2227 2228 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2229 goto skip_pin_bo; 2230 2231 if (!psp_boottime_tmr(psp)) { 2232 ret = psp_tmr_init(psp); 2233 if (ret) { 2234 DRM_ERROR("PSP tmr init failed!\n"); 2235 return ret; 2236 } 2237 } 2238 2239 skip_pin_bo: 2240 /* 2241 * For ASICs with DF Cstate management centralized 2242 * to PMFW, TMR setup should be performed after PMFW 2243 * loaded and before other non-psp firmware loaded. 2244 */ 2245 if (psp->pmfw_centralized_cstate_management) { 2246 ret = psp_load_smu_fw(psp); 2247 if (ret) 2248 return ret; 2249 } 2250 2251 ret = psp_tmr_load(psp); 2252 if (ret) { 2253 DRM_ERROR("PSP load tmr failed!\n"); 2254 return ret; 2255 } 2256 2257 return 0; 2258 } 2259 2260 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2261 enum psp_gfx_fw_type *type) 2262 { 2263 switch (ucode->ucode_id) { 2264 case AMDGPU_UCODE_ID_CAP: 2265 *type = GFX_FW_TYPE_CAP; 2266 break; 2267 case AMDGPU_UCODE_ID_SDMA0: 2268 *type = GFX_FW_TYPE_SDMA0; 2269 break; 2270 case AMDGPU_UCODE_ID_SDMA1: 2271 *type = GFX_FW_TYPE_SDMA1; 2272 break; 2273 case AMDGPU_UCODE_ID_SDMA2: 2274 *type = GFX_FW_TYPE_SDMA2; 2275 break; 2276 case AMDGPU_UCODE_ID_SDMA3: 2277 *type = GFX_FW_TYPE_SDMA3; 2278 break; 2279 case AMDGPU_UCODE_ID_SDMA4: 2280 *type = GFX_FW_TYPE_SDMA4; 2281 break; 2282 case AMDGPU_UCODE_ID_SDMA5: 2283 *type = GFX_FW_TYPE_SDMA5; 2284 break; 2285 case AMDGPU_UCODE_ID_SDMA6: 2286 *type = GFX_FW_TYPE_SDMA6; 2287 break; 2288 case AMDGPU_UCODE_ID_SDMA7: 2289 *type = GFX_FW_TYPE_SDMA7; 2290 break; 2291 case AMDGPU_UCODE_ID_CP_MES: 2292 *type = GFX_FW_TYPE_CP_MES; 2293 break; 2294 case AMDGPU_UCODE_ID_CP_MES_DATA: 2295 *type = GFX_FW_TYPE_MES_STACK; 2296 break; 2297 case AMDGPU_UCODE_ID_CP_MES1: 2298 *type = GFX_FW_TYPE_CP_MES_KIQ; 2299 break; 2300 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2301 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2302 break; 2303 case AMDGPU_UCODE_ID_CP_CE: 2304 *type = GFX_FW_TYPE_CP_CE; 2305 break; 2306 case AMDGPU_UCODE_ID_CP_PFP: 2307 *type = GFX_FW_TYPE_CP_PFP; 2308 break; 2309 case AMDGPU_UCODE_ID_CP_ME: 2310 *type = GFX_FW_TYPE_CP_ME; 2311 break; 2312 case AMDGPU_UCODE_ID_CP_MEC1: 2313 *type = GFX_FW_TYPE_CP_MEC; 2314 break; 2315 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2316 *type = GFX_FW_TYPE_CP_MEC_ME1; 2317 break; 2318 case AMDGPU_UCODE_ID_CP_MEC2: 2319 *type = GFX_FW_TYPE_CP_MEC; 2320 break; 2321 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2322 *type = GFX_FW_TYPE_CP_MEC_ME2; 2323 break; 2324 case AMDGPU_UCODE_ID_RLC_P: 2325 *type = GFX_FW_TYPE_RLC_P; 2326 break; 2327 case AMDGPU_UCODE_ID_RLC_V: 2328 *type = GFX_FW_TYPE_RLC_V; 2329 break; 2330 case AMDGPU_UCODE_ID_RLC_G: 2331 *type = GFX_FW_TYPE_RLC_G; 2332 break; 2333 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2334 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2335 break; 2336 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2337 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2338 break; 2339 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2340 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2341 break; 2342 case AMDGPU_UCODE_ID_RLC_IRAM: 2343 *type = GFX_FW_TYPE_RLC_IRAM; 2344 break; 2345 case AMDGPU_UCODE_ID_RLC_DRAM: 2346 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2347 break; 2348 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2349 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2350 break; 2351 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2352 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2353 break; 2354 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2355 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2356 break; 2357 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2358 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2359 break; 2360 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2361 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2362 break; 2363 case AMDGPU_UCODE_ID_SMC: 2364 *type = GFX_FW_TYPE_SMU; 2365 break; 2366 case AMDGPU_UCODE_ID_PPTABLE: 2367 *type = GFX_FW_TYPE_PPTABLE; 2368 break; 2369 case AMDGPU_UCODE_ID_UVD: 2370 *type = GFX_FW_TYPE_UVD; 2371 break; 2372 case AMDGPU_UCODE_ID_UVD1: 2373 *type = GFX_FW_TYPE_UVD1; 2374 break; 2375 case AMDGPU_UCODE_ID_VCE: 2376 *type = GFX_FW_TYPE_VCE; 2377 break; 2378 case AMDGPU_UCODE_ID_VCN: 2379 *type = GFX_FW_TYPE_VCN; 2380 break; 2381 case AMDGPU_UCODE_ID_VCN1: 2382 *type = GFX_FW_TYPE_VCN1; 2383 break; 2384 case AMDGPU_UCODE_ID_DMCU_ERAM: 2385 *type = GFX_FW_TYPE_DMCU_ERAM; 2386 break; 2387 case AMDGPU_UCODE_ID_DMCU_INTV: 2388 *type = GFX_FW_TYPE_DMCU_ISR; 2389 break; 2390 case AMDGPU_UCODE_ID_VCN0_RAM: 2391 *type = GFX_FW_TYPE_VCN0_RAM; 2392 break; 2393 case AMDGPU_UCODE_ID_VCN1_RAM: 2394 *type = GFX_FW_TYPE_VCN1_RAM; 2395 break; 2396 case AMDGPU_UCODE_ID_DMCUB: 2397 *type = GFX_FW_TYPE_DMUB; 2398 break; 2399 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2400 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2401 break; 2402 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2403 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2404 break; 2405 case AMDGPU_UCODE_ID_IMU_I: 2406 *type = GFX_FW_TYPE_IMU_I; 2407 break; 2408 case AMDGPU_UCODE_ID_IMU_D: 2409 *type = GFX_FW_TYPE_IMU_D; 2410 break; 2411 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2412 *type = GFX_FW_TYPE_RS64_PFP; 2413 break; 2414 case AMDGPU_UCODE_ID_CP_RS64_ME: 2415 *type = GFX_FW_TYPE_RS64_ME; 2416 break; 2417 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2418 *type = GFX_FW_TYPE_RS64_MEC; 2419 break; 2420 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2421 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2422 break; 2423 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2424 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2425 break; 2426 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2427 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2428 break; 2429 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2430 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2431 break; 2432 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2433 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2434 break; 2435 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2436 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2437 break; 2438 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2439 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2440 break; 2441 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2442 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2443 break; 2444 case AMDGPU_UCODE_ID_VPE_CTX: 2445 *type = GFX_FW_TYPE_VPEC_FW1; 2446 break; 2447 case AMDGPU_UCODE_ID_VPE_CTL: 2448 *type = GFX_FW_TYPE_VPEC_FW2; 2449 break; 2450 case AMDGPU_UCODE_ID_VPE: 2451 *type = GFX_FW_TYPE_VPE; 2452 break; 2453 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2454 *type = GFX_FW_TYPE_UMSCH_UCODE; 2455 break; 2456 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2457 *type = GFX_FW_TYPE_UMSCH_DATA; 2458 break; 2459 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2460 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2461 break; 2462 case AMDGPU_UCODE_ID_P2S_TABLE: 2463 *type = GFX_FW_TYPE_P2S_TABLE; 2464 break; 2465 case AMDGPU_UCODE_ID_MAXIMUM: 2466 default: 2467 return -EINVAL; 2468 } 2469 2470 return 0; 2471 } 2472 2473 static void psp_print_fw_hdr(struct psp_context *psp, 2474 struct amdgpu_firmware_info *ucode) 2475 { 2476 struct amdgpu_device *adev = psp->adev; 2477 struct common_firmware_header *hdr; 2478 2479 switch (ucode->ucode_id) { 2480 case AMDGPU_UCODE_ID_SDMA0: 2481 case AMDGPU_UCODE_ID_SDMA1: 2482 case AMDGPU_UCODE_ID_SDMA2: 2483 case AMDGPU_UCODE_ID_SDMA3: 2484 case AMDGPU_UCODE_ID_SDMA4: 2485 case AMDGPU_UCODE_ID_SDMA5: 2486 case AMDGPU_UCODE_ID_SDMA6: 2487 case AMDGPU_UCODE_ID_SDMA7: 2488 hdr = (struct common_firmware_header *) 2489 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2490 amdgpu_ucode_print_sdma_hdr(hdr); 2491 break; 2492 case AMDGPU_UCODE_ID_CP_CE: 2493 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2494 amdgpu_ucode_print_gfx_hdr(hdr); 2495 break; 2496 case AMDGPU_UCODE_ID_CP_PFP: 2497 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2498 amdgpu_ucode_print_gfx_hdr(hdr); 2499 break; 2500 case AMDGPU_UCODE_ID_CP_ME: 2501 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2502 amdgpu_ucode_print_gfx_hdr(hdr); 2503 break; 2504 case AMDGPU_UCODE_ID_CP_MEC1: 2505 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2506 amdgpu_ucode_print_gfx_hdr(hdr); 2507 break; 2508 case AMDGPU_UCODE_ID_RLC_G: 2509 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2510 amdgpu_ucode_print_rlc_hdr(hdr); 2511 break; 2512 case AMDGPU_UCODE_ID_SMC: 2513 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2514 amdgpu_ucode_print_smc_hdr(hdr); 2515 break; 2516 default: 2517 break; 2518 } 2519 } 2520 2521 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 2522 struct psp_gfx_cmd_resp *cmd) 2523 { 2524 int ret; 2525 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2526 2527 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2528 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2529 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2530 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2531 2532 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2533 if (ret) 2534 DRM_ERROR("Unknown firmware type\n"); 2535 2536 return ret; 2537 } 2538 2539 int psp_execute_ip_fw_load(struct psp_context *psp, 2540 struct amdgpu_firmware_info *ucode) 2541 { 2542 int ret = 0; 2543 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2544 2545 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd); 2546 if (!ret) { 2547 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2548 psp->fence_buf_mc_addr); 2549 } 2550 2551 release_psp_cmd_buf(psp); 2552 2553 return ret; 2554 } 2555 2556 static int psp_load_p2s_table(struct psp_context *psp) 2557 { 2558 int ret; 2559 struct amdgpu_device *adev = psp->adev; 2560 struct amdgpu_firmware_info *ucode = 2561 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2562 2563 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2564 return 0; 2565 2566 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 2567 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2568 0x0036003C; 2569 if (psp->sos.fw_version < supp_vers) 2570 return 0; 2571 } 2572 2573 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2574 return 0; 2575 2576 ret = psp_execute_ip_fw_load(psp, ucode); 2577 2578 return ret; 2579 } 2580 2581 static int psp_load_smu_fw(struct psp_context *psp) 2582 { 2583 int ret; 2584 struct amdgpu_device *adev = psp->adev; 2585 struct amdgpu_firmware_info *ucode = 2586 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2587 struct amdgpu_ras *ras = psp->ras_context.ras; 2588 2589 /* 2590 * Skip SMU FW reloading in case of using BACO for runpm only, 2591 * as SMU is always alive. 2592 */ 2593 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2594 return 0; 2595 2596 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2597 return 0; 2598 2599 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2600 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2601 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2602 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2603 if (ret) 2604 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 2605 } 2606 2607 ret = psp_execute_ip_fw_load(psp, ucode); 2608 2609 if (ret) 2610 DRM_ERROR("PSP load smu failed!\n"); 2611 2612 return ret; 2613 } 2614 2615 static bool fw_load_skip_check(struct psp_context *psp, 2616 struct amdgpu_firmware_info *ucode) 2617 { 2618 if (!ucode->fw || !ucode->ucode_size) 2619 return true; 2620 2621 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2622 return true; 2623 2624 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2625 (psp_smu_reload_quirk(psp) || 2626 psp->autoload_supported || 2627 psp->pmfw_centralized_cstate_management)) 2628 return true; 2629 2630 if (amdgpu_sriov_vf(psp->adev) && 2631 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2632 return true; 2633 2634 if (psp->autoload_supported && 2635 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2636 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2637 /* skip mec JT when autoload is enabled */ 2638 return true; 2639 2640 return false; 2641 } 2642 2643 int psp_load_fw_list(struct psp_context *psp, 2644 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2645 { 2646 int ret = 0, i; 2647 struct amdgpu_firmware_info *ucode; 2648 2649 for (i = 0; i < ucode_count; ++i) { 2650 ucode = ucode_list[i]; 2651 psp_print_fw_hdr(psp, ucode); 2652 ret = psp_execute_ip_fw_load(psp, ucode); 2653 if (ret) 2654 return ret; 2655 } 2656 return ret; 2657 } 2658 2659 static int psp_load_non_psp_fw(struct psp_context *psp) 2660 { 2661 int i, ret; 2662 struct amdgpu_firmware_info *ucode; 2663 struct amdgpu_device *adev = psp->adev; 2664 2665 if (psp->autoload_supported && 2666 !psp->pmfw_centralized_cstate_management) { 2667 ret = psp_load_smu_fw(psp); 2668 if (ret) 2669 return ret; 2670 } 2671 2672 /* Load P2S table first if it's available */ 2673 psp_load_p2s_table(psp); 2674 2675 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2676 ucode = &adev->firmware.ucode[i]; 2677 2678 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2679 !fw_load_skip_check(psp, ucode)) { 2680 ret = psp_load_smu_fw(psp); 2681 if (ret) 2682 return ret; 2683 continue; 2684 } 2685 2686 if (fw_load_skip_check(psp, ucode)) 2687 continue; 2688 2689 if (psp->autoload_supported && 2690 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2691 IP_VERSION(11, 0, 7) || 2692 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2693 IP_VERSION(11, 0, 11) || 2694 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2695 IP_VERSION(11, 0, 12)) && 2696 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2697 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2698 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2699 /* PSP only receive one SDMA fw for sienna_cichlid, 2700 * as all four sdma fw are same 2701 */ 2702 continue; 2703 2704 psp_print_fw_hdr(psp, ucode); 2705 2706 ret = psp_execute_ip_fw_load(psp, ucode); 2707 if (ret) 2708 return ret; 2709 2710 /* Start rlc autoload after psp recieved all the gfx firmware */ 2711 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2712 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2713 ret = psp_rlc_autoload_start(psp); 2714 if (ret) { 2715 DRM_ERROR("Failed to start rlc autoload\n"); 2716 return ret; 2717 } 2718 } 2719 } 2720 2721 return 0; 2722 } 2723 2724 static int psp_load_fw(struct amdgpu_device *adev) 2725 { 2726 int ret; 2727 struct psp_context *psp = &adev->psp; 2728 2729 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2730 /* should not destroy ring, only stop */ 2731 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2732 } else { 2733 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2734 2735 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2736 if (ret) { 2737 DRM_ERROR("PSP ring init failed!\n"); 2738 goto failed; 2739 } 2740 } 2741 2742 ret = psp_hw_start(psp); 2743 if (ret) 2744 goto failed; 2745 2746 ret = psp_load_non_psp_fw(psp); 2747 if (ret) 2748 goto failed1; 2749 2750 ret = psp_asd_initialize(psp); 2751 if (ret) { 2752 DRM_ERROR("PSP load asd failed!\n"); 2753 goto failed1; 2754 } 2755 2756 ret = psp_rl_load(adev); 2757 if (ret) { 2758 DRM_ERROR("PSP load RL failed!\n"); 2759 goto failed1; 2760 } 2761 2762 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2763 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2764 ret = psp_xgmi_initialize(psp, false, true); 2765 /* Warning the XGMI seesion initialize failure 2766 * Instead of stop driver initialization 2767 */ 2768 if (ret) 2769 dev_err(psp->adev->dev, 2770 "XGMI: Failed to initialize XGMI session\n"); 2771 } 2772 } 2773 2774 if (psp->ta_fw) { 2775 ret = psp_ras_initialize(psp); 2776 if (ret) 2777 dev_err(psp->adev->dev, 2778 "RAS: Failed to initialize RAS\n"); 2779 2780 ret = psp_hdcp_initialize(psp); 2781 if (ret) 2782 dev_err(psp->adev->dev, 2783 "HDCP: Failed to initialize HDCP\n"); 2784 2785 ret = psp_dtm_initialize(psp); 2786 if (ret) 2787 dev_err(psp->adev->dev, 2788 "DTM: Failed to initialize DTM\n"); 2789 2790 ret = psp_rap_initialize(psp); 2791 if (ret) 2792 dev_err(psp->adev->dev, 2793 "RAP: Failed to initialize RAP\n"); 2794 2795 ret = psp_securedisplay_initialize(psp); 2796 if (ret) 2797 dev_err(psp->adev->dev, 2798 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2799 } 2800 2801 return 0; 2802 2803 failed1: 2804 psp_free_shared_bufs(psp); 2805 failed: 2806 /* 2807 * all cleanup jobs (xgmi terminate, ras terminate, 2808 * ring destroy, cmd/fence/fw buffers destory, 2809 * psp->cmd destory) are delayed to psp_hw_fini 2810 */ 2811 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2812 return ret; 2813 } 2814 2815 static int psp_hw_init(void *handle) 2816 { 2817 int ret; 2818 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2819 2820 mutex_lock(&adev->firmware.mutex); 2821 /* 2822 * This sequence is just used on hw_init only once, no need on 2823 * resume. 2824 */ 2825 ret = amdgpu_ucode_init_bo(adev); 2826 if (ret) 2827 goto failed; 2828 2829 ret = psp_load_fw(adev); 2830 if (ret) { 2831 DRM_ERROR("PSP firmware loading failed\n"); 2832 goto failed; 2833 } 2834 2835 mutex_unlock(&adev->firmware.mutex); 2836 return 0; 2837 2838 failed: 2839 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2840 mutex_unlock(&adev->firmware.mutex); 2841 return -EINVAL; 2842 } 2843 2844 static int psp_hw_fini(void *handle) 2845 { 2846 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2847 struct psp_context *psp = &adev->psp; 2848 2849 if (psp->ta_fw) { 2850 psp_ras_terminate(psp); 2851 psp_securedisplay_terminate(psp); 2852 psp_rap_terminate(psp); 2853 psp_dtm_terminate(psp); 2854 psp_hdcp_terminate(psp); 2855 2856 if (adev->gmc.xgmi.num_physical_nodes > 1) 2857 psp_xgmi_terminate(psp); 2858 } 2859 2860 psp_asd_terminate(psp); 2861 psp_tmr_terminate(psp); 2862 2863 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2864 2865 return 0; 2866 } 2867 2868 static int psp_suspend(void *handle) 2869 { 2870 int ret = 0; 2871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2872 struct psp_context *psp = &adev->psp; 2873 2874 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2875 psp->xgmi_context.context.initialized) { 2876 ret = psp_xgmi_terminate(psp); 2877 if (ret) { 2878 DRM_ERROR("Failed to terminate xgmi ta\n"); 2879 goto out; 2880 } 2881 } 2882 2883 if (psp->ta_fw) { 2884 ret = psp_ras_terminate(psp); 2885 if (ret) { 2886 DRM_ERROR("Failed to terminate ras ta\n"); 2887 goto out; 2888 } 2889 ret = psp_hdcp_terminate(psp); 2890 if (ret) { 2891 DRM_ERROR("Failed to terminate hdcp ta\n"); 2892 goto out; 2893 } 2894 ret = psp_dtm_terminate(psp); 2895 if (ret) { 2896 DRM_ERROR("Failed to terminate dtm ta\n"); 2897 goto out; 2898 } 2899 ret = psp_rap_terminate(psp); 2900 if (ret) { 2901 DRM_ERROR("Failed to terminate rap ta\n"); 2902 goto out; 2903 } 2904 ret = psp_securedisplay_terminate(psp); 2905 if (ret) { 2906 DRM_ERROR("Failed to terminate securedisplay ta\n"); 2907 goto out; 2908 } 2909 } 2910 2911 ret = psp_asd_terminate(psp); 2912 if (ret) { 2913 DRM_ERROR("Failed to terminate asd\n"); 2914 goto out; 2915 } 2916 2917 ret = psp_tmr_terminate(psp); 2918 if (ret) { 2919 DRM_ERROR("Failed to terminate tmr\n"); 2920 goto out; 2921 } 2922 2923 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2924 if (ret) 2925 DRM_ERROR("PSP ring stop failed\n"); 2926 2927 out: 2928 return ret; 2929 } 2930 2931 static int psp_resume(void *handle) 2932 { 2933 int ret; 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2935 struct psp_context *psp = &adev->psp; 2936 2937 DRM_INFO("PSP is resuming...\n"); 2938 2939 if (psp->mem_train_ctx.enable_mem_training) { 2940 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2941 if (ret) { 2942 DRM_ERROR("Failed to process memory training!\n"); 2943 return ret; 2944 } 2945 } 2946 2947 mutex_lock(&adev->firmware.mutex); 2948 2949 ret = psp_hw_start(psp); 2950 if (ret) 2951 goto failed; 2952 2953 ret = psp_load_non_psp_fw(psp); 2954 if (ret) 2955 goto failed; 2956 2957 ret = psp_asd_initialize(psp); 2958 if (ret) { 2959 DRM_ERROR("PSP load asd failed!\n"); 2960 goto failed; 2961 } 2962 2963 ret = psp_rl_load(adev); 2964 if (ret) { 2965 dev_err(adev->dev, "PSP load RL failed!\n"); 2966 goto failed; 2967 } 2968 2969 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2970 ret = psp_xgmi_initialize(psp, false, true); 2971 /* Warning the XGMI seesion initialize failure 2972 * Instead of stop driver initialization 2973 */ 2974 if (ret) 2975 dev_err(psp->adev->dev, 2976 "XGMI: Failed to initialize XGMI session\n"); 2977 } 2978 2979 if (psp->ta_fw) { 2980 ret = psp_ras_initialize(psp); 2981 if (ret) 2982 dev_err(psp->adev->dev, 2983 "RAS: Failed to initialize RAS\n"); 2984 2985 ret = psp_hdcp_initialize(psp); 2986 if (ret) 2987 dev_err(psp->adev->dev, 2988 "HDCP: Failed to initialize HDCP\n"); 2989 2990 ret = psp_dtm_initialize(psp); 2991 if (ret) 2992 dev_err(psp->adev->dev, 2993 "DTM: Failed to initialize DTM\n"); 2994 2995 ret = psp_rap_initialize(psp); 2996 if (ret) 2997 dev_err(psp->adev->dev, 2998 "RAP: Failed to initialize RAP\n"); 2999 3000 ret = psp_securedisplay_initialize(psp); 3001 if (ret) 3002 dev_err(psp->adev->dev, 3003 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3004 } 3005 3006 mutex_unlock(&adev->firmware.mutex); 3007 3008 return 0; 3009 3010 failed: 3011 DRM_ERROR("PSP resume failed\n"); 3012 mutex_unlock(&adev->firmware.mutex); 3013 return ret; 3014 } 3015 3016 int psp_gpu_reset(struct amdgpu_device *adev) 3017 { 3018 int ret; 3019 3020 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3021 return 0; 3022 3023 mutex_lock(&adev->psp.mutex); 3024 ret = psp_mode1_reset(&adev->psp); 3025 mutex_unlock(&adev->psp.mutex); 3026 3027 return ret; 3028 } 3029 3030 int psp_rlc_autoload_start(struct psp_context *psp) 3031 { 3032 int ret; 3033 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3034 3035 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3036 3037 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3038 psp->fence_buf_mc_addr); 3039 3040 release_psp_cmd_buf(psp); 3041 3042 return ret; 3043 } 3044 3045 int psp_ring_cmd_submit(struct psp_context *psp, 3046 uint64_t cmd_buf_mc_addr, 3047 uint64_t fence_mc_addr, 3048 int index) 3049 { 3050 unsigned int psp_write_ptr_reg = 0; 3051 struct psp_gfx_rb_frame *write_frame; 3052 struct psp_ring *ring = &psp->km_ring; 3053 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3054 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3055 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3056 struct amdgpu_device *adev = psp->adev; 3057 uint32_t ring_size_dw = ring->ring_size / 4; 3058 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3059 3060 /* KM (GPCOM) prepare write pointer */ 3061 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3062 3063 /* Update KM RB frame pointer to new frame */ 3064 /* write_frame ptr increments by size of rb_frame in bytes */ 3065 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3066 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3067 write_frame = ring_buffer_start; 3068 else 3069 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3070 /* Check invalid write_frame ptr address */ 3071 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3072 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3073 ring_buffer_start, ring_buffer_end, write_frame); 3074 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 3075 return -EINVAL; 3076 } 3077 3078 /* Initialize KM RB frame */ 3079 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3080 3081 /* Update KM RB frame */ 3082 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3083 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3084 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3085 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3086 write_frame->fence_value = index; 3087 amdgpu_device_flush_hdp(adev, NULL); 3088 3089 /* Update the write Pointer in DWORDs */ 3090 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3091 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3092 return 0; 3093 } 3094 3095 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3096 { 3097 struct amdgpu_device *adev = psp->adev; 3098 char fw_name[PSP_FW_NAME_LEN]; 3099 const struct psp_firmware_header_v1_0 *asd_hdr; 3100 int err = 0; 3101 3102 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 3103 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 3104 if (err) 3105 goto out; 3106 3107 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3108 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3109 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3110 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3111 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3112 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3113 return 0; 3114 out: 3115 amdgpu_ucode_release(&adev->psp.asd_fw); 3116 return err; 3117 } 3118 3119 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3120 { 3121 struct amdgpu_device *adev = psp->adev; 3122 char fw_name[PSP_FW_NAME_LEN]; 3123 const struct psp_firmware_header_v1_0 *toc_hdr; 3124 int err = 0; 3125 3126 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 3127 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 3128 if (err) 3129 goto out; 3130 3131 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3132 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3133 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3134 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3135 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3136 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3137 return 0; 3138 out: 3139 amdgpu_ucode_release(&adev->psp.toc_fw); 3140 return err; 3141 } 3142 3143 static int parse_sos_bin_descriptor(struct psp_context *psp, 3144 const struct psp_fw_bin_desc *desc, 3145 const struct psp_firmware_header_v2_0 *sos_hdr) 3146 { 3147 uint8_t *ucode_start_addr = NULL; 3148 3149 if (!psp || !desc || !sos_hdr) 3150 return -EINVAL; 3151 3152 ucode_start_addr = (uint8_t *)sos_hdr + 3153 le32_to_cpu(desc->offset_bytes) + 3154 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3155 3156 switch (desc->fw_type) { 3157 case PSP_FW_TYPE_PSP_SOS: 3158 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3159 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3160 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3161 psp->sos.start_addr = ucode_start_addr; 3162 break; 3163 case PSP_FW_TYPE_PSP_SYS_DRV: 3164 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3165 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3166 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3167 psp->sys.start_addr = ucode_start_addr; 3168 break; 3169 case PSP_FW_TYPE_PSP_KDB: 3170 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3171 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3172 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3173 psp->kdb.start_addr = ucode_start_addr; 3174 break; 3175 case PSP_FW_TYPE_PSP_TOC: 3176 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3177 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3178 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3179 psp->toc.start_addr = ucode_start_addr; 3180 break; 3181 case PSP_FW_TYPE_PSP_SPL: 3182 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3183 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3184 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3185 psp->spl.start_addr = ucode_start_addr; 3186 break; 3187 case PSP_FW_TYPE_PSP_RL: 3188 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3189 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3190 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3191 psp->rl.start_addr = ucode_start_addr; 3192 break; 3193 case PSP_FW_TYPE_PSP_SOC_DRV: 3194 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3195 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3196 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3197 psp->soc_drv.start_addr = ucode_start_addr; 3198 break; 3199 case PSP_FW_TYPE_PSP_INTF_DRV: 3200 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3201 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3202 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3203 psp->intf_drv.start_addr = ucode_start_addr; 3204 break; 3205 case PSP_FW_TYPE_PSP_DBG_DRV: 3206 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3207 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3208 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3209 psp->dbg_drv.start_addr = ucode_start_addr; 3210 break; 3211 case PSP_FW_TYPE_PSP_RAS_DRV: 3212 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3213 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3214 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3215 psp->ras_drv.start_addr = ucode_start_addr; 3216 break; 3217 default: 3218 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3219 break; 3220 } 3221 3222 return 0; 3223 } 3224 3225 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3226 { 3227 const struct psp_firmware_header_v1_0 *sos_hdr; 3228 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3229 uint8_t *ucode_array_start_addr; 3230 3231 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3232 ucode_array_start_addr = (uint8_t *)sos_hdr + 3233 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3234 3235 if (adev->gmc.xgmi.connected_to_cpu || 3236 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3237 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3238 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3239 3240 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3241 adev->psp.sys.start_addr = ucode_array_start_addr; 3242 3243 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3244 adev->psp.sos.start_addr = ucode_array_start_addr + 3245 le32_to_cpu(sos_hdr->sos.offset_bytes); 3246 } else { 3247 /* Load alternate PSP SOS FW */ 3248 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3249 3250 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3251 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3252 3253 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3254 adev->psp.sys.start_addr = ucode_array_start_addr + 3255 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3256 3257 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3258 adev->psp.sos.start_addr = ucode_array_start_addr + 3259 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3260 } 3261 3262 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3263 dev_warn(adev->dev, "PSP SOS FW not available"); 3264 return -EINVAL; 3265 } 3266 3267 return 0; 3268 } 3269 3270 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3271 { 3272 struct amdgpu_device *adev = psp->adev; 3273 char fw_name[PSP_FW_NAME_LEN]; 3274 const struct psp_firmware_header_v1_0 *sos_hdr; 3275 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3276 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3277 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3278 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3279 int err = 0; 3280 uint8_t *ucode_array_start_addr; 3281 int fw_index = 0; 3282 3283 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3284 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3285 if (err) 3286 goto out; 3287 3288 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3289 ucode_array_start_addr = (uint8_t *)sos_hdr + 3290 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3291 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3292 3293 switch (sos_hdr->header.header_version_major) { 3294 case 1: 3295 err = psp_init_sos_base_fw(adev); 3296 if (err) 3297 goto out; 3298 3299 if (sos_hdr->header.header_version_minor == 1) { 3300 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3301 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3302 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3303 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3304 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3305 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3306 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3307 } 3308 if (sos_hdr->header.header_version_minor == 2) { 3309 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3310 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3311 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3312 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3313 } 3314 if (sos_hdr->header.header_version_minor == 3) { 3315 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3316 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3317 adev->psp.toc.start_addr = ucode_array_start_addr + 3318 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3319 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3320 adev->psp.kdb.start_addr = ucode_array_start_addr + 3321 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3322 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3323 adev->psp.spl.start_addr = ucode_array_start_addr + 3324 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3325 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3326 adev->psp.rl.start_addr = ucode_array_start_addr + 3327 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3328 } 3329 break; 3330 case 2: 3331 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3332 3333 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3334 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3335 err = -EINVAL; 3336 goto out; 3337 } 3338 3339 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3340 err = parse_sos_bin_descriptor(psp, 3341 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3342 sos_hdr_v2_0); 3343 if (err) 3344 goto out; 3345 } 3346 break; 3347 default: 3348 dev_err(adev->dev, 3349 "unsupported psp sos firmware\n"); 3350 err = -EINVAL; 3351 goto out; 3352 } 3353 3354 return 0; 3355 out: 3356 amdgpu_ucode_release(&adev->psp.sos_fw); 3357 3358 return err; 3359 } 3360 3361 static int parse_ta_bin_descriptor(struct psp_context *psp, 3362 const struct psp_fw_bin_desc *desc, 3363 const struct ta_firmware_header_v2_0 *ta_hdr) 3364 { 3365 uint8_t *ucode_start_addr = NULL; 3366 3367 if (!psp || !desc || !ta_hdr) 3368 return -EINVAL; 3369 3370 ucode_start_addr = (uint8_t *)ta_hdr + 3371 le32_to_cpu(desc->offset_bytes) + 3372 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3373 3374 switch (desc->fw_type) { 3375 case TA_FW_TYPE_PSP_ASD: 3376 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3377 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3378 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3379 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3380 break; 3381 case TA_FW_TYPE_PSP_XGMI: 3382 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3383 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3384 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3385 break; 3386 case TA_FW_TYPE_PSP_RAS: 3387 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3388 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3389 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3390 break; 3391 case TA_FW_TYPE_PSP_HDCP: 3392 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3393 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3394 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3395 break; 3396 case TA_FW_TYPE_PSP_DTM: 3397 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3398 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3399 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3400 break; 3401 case TA_FW_TYPE_PSP_RAP: 3402 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3403 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3404 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3405 break; 3406 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3407 psp->securedisplay_context.context.bin_desc.fw_version = 3408 le32_to_cpu(desc->fw_version); 3409 psp->securedisplay_context.context.bin_desc.size_bytes = 3410 le32_to_cpu(desc->size_bytes); 3411 psp->securedisplay_context.context.bin_desc.start_addr = 3412 ucode_start_addr; 3413 break; 3414 default: 3415 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3416 break; 3417 } 3418 3419 return 0; 3420 } 3421 3422 static int parse_ta_v1_microcode(struct psp_context *psp) 3423 { 3424 const struct ta_firmware_header_v1_0 *ta_hdr; 3425 struct amdgpu_device *adev = psp->adev; 3426 3427 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3428 3429 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3430 return -EINVAL; 3431 3432 adev->psp.xgmi_context.context.bin_desc.fw_version = 3433 le32_to_cpu(ta_hdr->xgmi.fw_version); 3434 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3435 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3436 adev->psp.xgmi_context.context.bin_desc.start_addr = 3437 (uint8_t *)ta_hdr + 3438 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3439 3440 adev->psp.ras_context.context.bin_desc.fw_version = 3441 le32_to_cpu(ta_hdr->ras.fw_version); 3442 adev->psp.ras_context.context.bin_desc.size_bytes = 3443 le32_to_cpu(ta_hdr->ras.size_bytes); 3444 adev->psp.ras_context.context.bin_desc.start_addr = 3445 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3446 le32_to_cpu(ta_hdr->ras.offset_bytes); 3447 3448 adev->psp.hdcp_context.context.bin_desc.fw_version = 3449 le32_to_cpu(ta_hdr->hdcp.fw_version); 3450 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3451 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3452 adev->psp.hdcp_context.context.bin_desc.start_addr = 3453 (uint8_t *)ta_hdr + 3454 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3455 3456 adev->psp.dtm_context.context.bin_desc.fw_version = 3457 le32_to_cpu(ta_hdr->dtm.fw_version); 3458 adev->psp.dtm_context.context.bin_desc.size_bytes = 3459 le32_to_cpu(ta_hdr->dtm.size_bytes); 3460 adev->psp.dtm_context.context.bin_desc.start_addr = 3461 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3462 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3463 3464 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3465 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3466 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3467 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3468 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3469 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3470 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3471 3472 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3473 3474 return 0; 3475 } 3476 3477 static int parse_ta_v2_microcode(struct psp_context *psp) 3478 { 3479 const struct ta_firmware_header_v2_0 *ta_hdr; 3480 struct amdgpu_device *adev = psp->adev; 3481 int err = 0; 3482 int ta_index = 0; 3483 3484 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3485 3486 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3487 return -EINVAL; 3488 3489 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3490 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3491 return -EINVAL; 3492 } 3493 3494 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3495 err = parse_ta_bin_descriptor(psp, 3496 &ta_hdr->ta_fw_bin[ta_index], 3497 ta_hdr); 3498 if (err) 3499 return err; 3500 } 3501 3502 return 0; 3503 } 3504 3505 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3506 { 3507 const struct common_firmware_header *hdr; 3508 struct amdgpu_device *adev = psp->adev; 3509 char fw_name[PSP_FW_NAME_LEN]; 3510 int err; 3511 3512 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3513 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3514 if (err) 3515 return err; 3516 3517 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3518 switch (le16_to_cpu(hdr->header_version_major)) { 3519 case 1: 3520 err = parse_ta_v1_microcode(psp); 3521 break; 3522 case 2: 3523 err = parse_ta_v2_microcode(psp); 3524 break; 3525 default: 3526 dev_err(adev->dev, "unsupported TA header version\n"); 3527 err = -EINVAL; 3528 } 3529 3530 if (err) 3531 amdgpu_ucode_release(&adev->psp.ta_fw); 3532 3533 return err; 3534 } 3535 3536 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3537 { 3538 struct amdgpu_device *adev = psp->adev; 3539 char fw_name[PSP_FW_NAME_LEN]; 3540 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3541 struct amdgpu_firmware_info *info = NULL; 3542 int err = 0; 3543 3544 if (!amdgpu_sriov_vf(adev)) { 3545 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3546 return -EINVAL; 3547 } 3548 3549 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3550 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3551 if (err) { 3552 if (err == -ENODEV) { 3553 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3554 err = 0; 3555 goto out; 3556 } 3557 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3558 } 3559 3560 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3561 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3562 info->fw = adev->psp.cap_fw; 3563 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3564 adev->psp.cap_fw->data; 3565 adev->firmware.fw_size += ALIGN( 3566 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3567 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3568 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3569 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3570 3571 return 0; 3572 3573 out: 3574 amdgpu_ucode_release(&adev->psp.cap_fw); 3575 return err; 3576 } 3577 3578 static int psp_set_clockgating_state(void *handle, 3579 enum amd_clockgating_state state) 3580 { 3581 return 0; 3582 } 3583 3584 static int psp_set_powergating_state(void *handle, 3585 enum amd_powergating_state state) 3586 { 3587 return 0; 3588 } 3589 3590 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3591 struct device_attribute *attr, 3592 char *buf) 3593 { 3594 struct drm_device *ddev = dev_get_drvdata(dev); 3595 struct amdgpu_device *adev = drm_to_adev(ddev); 3596 uint32_t fw_ver; 3597 int ret; 3598 3599 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3600 DRM_INFO("PSP block is not ready yet."); 3601 return -EBUSY; 3602 } 3603 3604 mutex_lock(&adev->psp.mutex); 3605 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3606 mutex_unlock(&adev->psp.mutex); 3607 3608 if (ret) { 3609 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 3610 return ret; 3611 } 3612 3613 return sysfs_emit(buf, "%x\n", fw_ver); 3614 } 3615 3616 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3617 struct device_attribute *attr, 3618 const char *buf, 3619 size_t count) 3620 { 3621 struct drm_device *ddev = dev_get_drvdata(dev); 3622 struct amdgpu_device *adev = drm_to_adev(ddev); 3623 int ret, idx; 3624 char fw_name[100]; 3625 const struct firmware *usbc_pd_fw; 3626 struct amdgpu_bo *fw_buf_bo = NULL; 3627 uint64_t fw_pri_mc_addr; 3628 void *fw_pri_cpu_addr; 3629 3630 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3631 DRM_INFO("PSP block is not ready yet."); 3632 return -EBUSY; 3633 } 3634 3635 if (!drm_dev_enter(ddev, &idx)) 3636 return -ENODEV; 3637 3638 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3639 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3640 if (ret) 3641 goto fail; 3642 3643 /* LFB address which is aligned to 1MB boundary per PSP request */ 3644 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3645 AMDGPU_GEM_DOMAIN_VRAM | 3646 AMDGPU_GEM_DOMAIN_GTT, 3647 &fw_buf_bo, &fw_pri_mc_addr, 3648 &fw_pri_cpu_addr); 3649 if (ret) 3650 goto rel_buf; 3651 3652 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3653 3654 mutex_lock(&adev->psp.mutex); 3655 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3656 mutex_unlock(&adev->psp.mutex); 3657 3658 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3659 3660 rel_buf: 3661 release_firmware(usbc_pd_fw); 3662 fail: 3663 if (ret) { 3664 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 3665 count = ret; 3666 } 3667 3668 drm_dev_exit(idx); 3669 return count; 3670 } 3671 3672 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3673 { 3674 int idx; 3675 3676 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3677 return; 3678 3679 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3680 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3681 3682 drm_dev_exit(idx); 3683 } 3684 3685 /** 3686 * DOC: usbc_pd_fw 3687 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3688 * this file will trigger the update process. 3689 */ 3690 static DEVICE_ATTR(usbc_pd_fw, 0644, 3691 psp_usbc_pd_fw_sysfs_read, 3692 psp_usbc_pd_fw_sysfs_write); 3693 3694 int is_psp_fw_valid(struct psp_bin_desc bin) 3695 { 3696 return bin.size_bytes; 3697 } 3698 3699 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3700 struct bin_attribute *bin_attr, 3701 char *buffer, loff_t pos, size_t count) 3702 { 3703 struct device *dev = kobj_to_dev(kobj); 3704 struct drm_device *ddev = dev_get_drvdata(dev); 3705 struct amdgpu_device *adev = drm_to_adev(ddev); 3706 3707 adev->psp.vbflash_done = false; 3708 3709 /* Safeguard against memory drain */ 3710 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3711 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B); 3712 kvfree(adev->psp.vbflash_tmp_buf); 3713 adev->psp.vbflash_tmp_buf = NULL; 3714 adev->psp.vbflash_image_size = 0; 3715 return -ENOMEM; 3716 } 3717 3718 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3719 if (!adev->psp.vbflash_tmp_buf) { 3720 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3721 if (!adev->psp.vbflash_tmp_buf) 3722 return -ENOMEM; 3723 } 3724 3725 mutex_lock(&adev->psp.mutex); 3726 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3727 adev->psp.vbflash_image_size += count; 3728 mutex_unlock(&adev->psp.mutex); 3729 3730 dev_dbg(adev->dev, "IFWI staged for update"); 3731 3732 return count; 3733 } 3734 3735 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3736 struct bin_attribute *bin_attr, char *buffer, 3737 loff_t pos, size_t count) 3738 { 3739 struct device *dev = kobj_to_dev(kobj); 3740 struct drm_device *ddev = dev_get_drvdata(dev); 3741 struct amdgpu_device *adev = drm_to_adev(ddev); 3742 struct amdgpu_bo *fw_buf_bo = NULL; 3743 uint64_t fw_pri_mc_addr; 3744 void *fw_pri_cpu_addr; 3745 int ret; 3746 3747 if (adev->psp.vbflash_image_size == 0) 3748 return -EINVAL; 3749 3750 dev_dbg(adev->dev, "PSP IFWI flash process initiated"); 3751 3752 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3753 AMDGPU_GPU_PAGE_SIZE, 3754 AMDGPU_GEM_DOMAIN_VRAM, 3755 &fw_buf_bo, 3756 &fw_pri_mc_addr, 3757 &fw_pri_cpu_addr); 3758 if (ret) 3759 goto rel_buf; 3760 3761 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3762 3763 mutex_lock(&adev->psp.mutex); 3764 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3765 mutex_unlock(&adev->psp.mutex); 3766 3767 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3768 3769 rel_buf: 3770 kvfree(adev->psp.vbflash_tmp_buf); 3771 adev->psp.vbflash_tmp_buf = NULL; 3772 adev->psp.vbflash_image_size = 0; 3773 3774 if (ret) { 3775 dev_err(adev->dev, "Failed to load IFWI, err = %d", ret); 3776 return ret; 3777 } 3778 3779 dev_dbg(adev->dev, "PSP IFWI flash process done"); 3780 return 0; 3781 } 3782 3783 /** 3784 * DOC: psp_vbflash 3785 * Writing to this file will stage an IFWI for update. Reading from this file 3786 * will trigger the update process. 3787 */ 3788 static struct bin_attribute psp_vbflash_bin_attr = { 3789 .attr = {.name = "psp_vbflash", .mode = 0660}, 3790 .size = 0, 3791 .write = amdgpu_psp_vbflash_write, 3792 .read = amdgpu_psp_vbflash_read, 3793 }; 3794 3795 /** 3796 * DOC: psp_vbflash_status 3797 * The status of the flash process. 3798 * 0: IFWI flash not complete. 3799 * 1: IFWI flash complete. 3800 */ 3801 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3802 struct device_attribute *attr, 3803 char *buf) 3804 { 3805 struct drm_device *ddev = dev_get_drvdata(dev); 3806 struct amdgpu_device *adev = drm_to_adev(ddev); 3807 uint32_t vbflash_status; 3808 3809 vbflash_status = psp_vbflash_status(&adev->psp); 3810 if (!adev->psp.vbflash_done) 3811 vbflash_status = 0; 3812 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3813 vbflash_status = 1; 3814 3815 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3816 } 3817 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 3818 3819 static struct bin_attribute *bin_flash_attrs[] = { 3820 &psp_vbflash_bin_attr, 3821 NULL 3822 }; 3823 3824 static struct attribute *flash_attrs[] = { 3825 &dev_attr_psp_vbflash_status.attr, 3826 &dev_attr_usbc_pd_fw.attr, 3827 NULL 3828 }; 3829 3830 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 3831 { 3832 struct device *dev = kobj_to_dev(kobj); 3833 struct drm_device *ddev = dev_get_drvdata(dev); 3834 struct amdgpu_device *adev = drm_to_adev(ddev); 3835 3836 if (attr == &dev_attr_usbc_pd_fw.attr) 3837 return adev->psp.sup_pd_fw_up ? 0660 : 0; 3838 3839 return adev->psp.sup_ifwi_up ? 0440 : 0; 3840 } 3841 3842 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 3843 struct bin_attribute *attr, 3844 int idx) 3845 { 3846 struct device *dev = kobj_to_dev(kobj); 3847 struct drm_device *ddev = dev_get_drvdata(dev); 3848 struct amdgpu_device *adev = drm_to_adev(ddev); 3849 3850 return adev->psp.sup_ifwi_up ? 0660 : 0; 3851 } 3852 3853 const struct attribute_group amdgpu_flash_attr_group = { 3854 .attrs = flash_attrs, 3855 .bin_attrs = bin_flash_attrs, 3856 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 3857 .is_visible = amdgpu_flash_attr_is_visible, 3858 }; 3859 3860 const struct amd_ip_funcs psp_ip_funcs = { 3861 .name = "psp", 3862 .early_init = psp_early_init, 3863 .late_init = NULL, 3864 .sw_init = psp_sw_init, 3865 .sw_fini = psp_sw_fini, 3866 .hw_init = psp_hw_init, 3867 .hw_fini = psp_hw_fini, 3868 .suspend = psp_suspend, 3869 .resume = psp_resume, 3870 .is_idle = NULL, 3871 .check_soft_reset = NULL, 3872 .wait_for_idle = NULL, 3873 .soft_reset = NULL, 3874 .set_clockgating_state = psp_set_clockgating_state, 3875 .set_powergating_state = psp_set_powergating_state, 3876 }; 3877 3878 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 3879 .type = AMD_IP_BLOCK_TYPE_PSP, 3880 .major = 3, 3881 .minor = 1, 3882 .rev = 0, 3883 .funcs = &psp_ip_funcs, 3884 }; 3885 3886 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 3887 .type = AMD_IP_BLOCK_TYPE_PSP, 3888 .major = 10, 3889 .minor = 0, 3890 .rev = 0, 3891 .funcs = &psp_ip_funcs, 3892 }; 3893 3894 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 3895 .type = AMD_IP_BLOCK_TYPE_PSP, 3896 .major = 11, 3897 .minor = 0, 3898 .rev = 0, 3899 .funcs = &psp_ip_funcs, 3900 }; 3901 3902 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 3903 .type = AMD_IP_BLOCK_TYPE_PSP, 3904 .major = 11, 3905 .minor = 0, 3906 .rev = 8, 3907 .funcs = &psp_ip_funcs, 3908 }; 3909 3910 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 3911 .type = AMD_IP_BLOCK_TYPE_PSP, 3912 .major = 12, 3913 .minor = 0, 3914 .rev = 0, 3915 .funcs = &psp_ip_funcs, 3916 }; 3917 3918 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 3919 .type = AMD_IP_BLOCK_TYPE_PSP, 3920 .major = 13, 3921 .minor = 0, 3922 .rev = 0, 3923 .funcs = &psp_ip_funcs, 3924 }; 3925 3926 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 3927 .type = AMD_IP_BLOCK_TYPE_PSP, 3928 .major = 13, 3929 .minor = 0, 3930 .rev = 4, 3931 .funcs = &psp_ip_funcs, 3932 }; 3933