1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 42 #include "amdgpu_ras.h" 43 #include "amdgpu_securedisplay.h" 44 #include "amdgpu_atomfirmware.h" 45 46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 47 48 static int psp_load_smu_fw(struct psp_context *psp); 49 static int psp_rap_terminate(struct psp_context *psp); 50 static int psp_securedisplay_terminate(struct psp_context *psp); 51 52 static int psp_ring_init(struct psp_context *psp, 53 enum psp_ring_type ring_type) 54 { 55 int ret = 0; 56 struct psp_ring *ring; 57 struct amdgpu_device *adev = psp->adev; 58 59 ring = &psp->km_ring; 60 61 ring->ring_type = ring_type; 62 63 /* allocate 4k Page of Local Frame Buffer memory for ring */ 64 ring->ring_size = 0x1000; 65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 66 AMDGPU_GEM_DOMAIN_VRAM | 67 AMDGPU_GEM_DOMAIN_GTT, 68 &adev->firmware.rbuf, 69 &ring->ring_mem_mc_addr, 70 (void **)&ring->ring_mem); 71 if (ret) { 72 ring->ring_size = 0; 73 return ret; 74 } 75 76 return 0; 77 } 78 79 /* 80 * Due to DF Cstate management centralized to PMFW, the firmware 81 * loading sequence will be updated as below: 82 * - Load KDB 83 * - Load SYS_DRV 84 * - Load tOS 85 * - Load PMFW 86 * - Setup TMR 87 * - Load other non-psp fw 88 * - Load ASD 89 * - Load XGMI/RAS/HDCP/DTM TA if any 90 * 91 * This new sequence is required for 92 * - Arcturus and onwards 93 */ 94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 95 { 96 struct amdgpu_device *adev = psp->adev; 97 98 if (amdgpu_sriov_vf(adev)) { 99 psp->pmfw_centralized_cstate_management = false; 100 return; 101 } 102 103 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 104 case IP_VERSION(11, 0, 0): 105 case IP_VERSION(11, 0, 4): 106 case IP_VERSION(11, 0, 5): 107 case IP_VERSION(11, 0, 7): 108 case IP_VERSION(11, 0, 9): 109 case IP_VERSION(11, 0, 11): 110 case IP_VERSION(11, 0, 12): 111 case IP_VERSION(11, 0, 13): 112 case IP_VERSION(13, 0, 0): 113 case IP_VERSION(13, 0, 2): 114 case IP_VERSION(13, 0, 7): 115 psp->pmfw_centralized_cstate_management = true; 116 break; 117 default: 118 psp->pmfw_centralized_cstate_management = false; 119 break; 120 } 121 } 122 123 static int psp_init_sriov_microcode(struct psp_context *psp) 124 { 125 struct amdgpu_device *adev = psp->adev; 126 char ucode_prefix[30]; 127 int ret = 0; 128 129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 130 131 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 132 case IP_VERSION(9, 0, 0): 133 case IP_VERSION(11, 0, 7): 134 case IP_VERSION(11, 0, 9): 135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 136 ret = psp_init_cap_microcode(psp, ucode_prefix); 137 break; 138 case IP_VERSION(13, 0, 2): 139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 140 ret = psp_init_cap_microcode(psp, ucode_prefix); 141 ret &= psp_init_ta_microcode(psp, ucode_prefix); 142 break; 143 case IP_VERSION(13, 0, 0): 144 adev->virt.autoload_ucode_id = 0; 145 break; 146 case IP_VERSION(13, 0, 6): 147 ret = psp_init_cap_microcode(psp, ucode_prefix); 148 ret &= psp_init_ta_microcode(psp, ucode_prefix); 149 break; 150 case IP_VERSION(13, 0, 10): 151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 152 ret = psp_init_cap_microcode(psp, ucode_prefix); 153 break; 154 default: 155 return -EINVAL; 156 } 157 return ret; 158 } 159 160 static int psp_early_init(void *handle) 161 { 162 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 163 struct psp_context *psp = &adev->psp; 164 165 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 166 case IP_VERSION(9, 0, 0): 167 psp_v3_1_set_psp_funcs(psp); 168 psp->autoload_supported = false; 169 break; 170 case IP_VERSION(10, 0, 0): 171 case IP_VERSION(10, 0, 1): 172 psp_v10_0_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 break; 175 case IP_VERSION(11, 0, 2): 176 case IP_VERSION(11, 0, 4): 177 psp_v11_0_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 break; 180 case IP_VERSION(11, 0, 0): 181 case IP_VERSION(11, 0, 7): 182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 183 fallthrough; 184 case IP_VERSION(11, 0, 5): 185 case IP_VERSION(11, 0, 9): 186 case IP_VERSION(11, 0, 11): 187 case IP_VERSION(11, 5, 0): 188 case IP_VERSION(11, 0, 12): 189 case IP_VERSION(11, 0, 13): 190 psp_v11_0_set_psp_funcs(psp); 191 psp->autoload_supported = true; 192 break; 193 case IP_VERSION(11, 0, 3): 194 case IP_VERSION(12, 0, 1): 195 psp_v12_0_set_psp_funcs(psp); 196 break; 197 case IP_VERSION(13, 0, 2): 198 case IP_VERSION(13, 0, 6): 199 psp_v13_0_set_psp_funcs(psp); 200 break; 201 case IP_VERSION(13, 0, 1): 202 case IP_VERSION(13, 0, 3): 203 case IP_VERSION(13, 0, 5): 204 case IP_VERSION(13, 0, 8): 205 case IP_VERSION(13, 0, 11): 206 case IP_VERSION(14, 0, 0): 207 psp_v13_0_set_psp_funcs(psp); 208 psp->autoload_supported = true; 209 break; 210 case IP_VERSION(11, 0, 8): 211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 212 psp_v11_0_8_set_psp_funcs(psp); 213 psp->autoload_supported = false; 214 } 215 break; 216 case IP_VERSION(13, 0, 0): 217 case IP_VERSION(13, 0, 7): 218 case IP_VERSION(13, 0, 10): 219 psp_v13_0_set_psp_funcs(psp); 220 psp->autoload_supported = true; 221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 222 break; 223 case IP_VERSION(13, 0, 4): 224 psp_v13_0_4_set_psp_funcs(psp); 225 psp->autoload_supported = true; 226 break; 227 default: 228 return -EINVAL; 229 } 230 231 psp->adev = adev; 232 233 psp_check_pmfw_centralized_cstate_management(psp); 234 235 if (amdgpu_sriov_vf(adev)) 236 return psp_init_sriov_microcode(psp); 237 else 238 return psp_init_microcode(psp); 239 } 240 241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 242 { 243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 244 &mem_ctx->shared_buf); 245 mem_ctx->shared_bo = NULL; 246 } 247 248 static void psp_free_shared_bufs(struct psp_context *psp) 249 { 250 void *tmr_buf; 251 void **pptr; 252 253 /* free TMR memory buffer */ 254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 256 psp->tmr_bo = NULL; 257 258 /* free xgmi shared memory */ 259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 260 261 /* free ras shared memory */ 262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 263 264 /* free hdcp shared memory */ 265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 266 267 /* free dtm shared memory */ 268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 269 270 /* free rap shared memory */ 271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 272 273 /* free securedisplay shared memory */ 274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 275 276 277 } 278 279 static void psp_memory_training_fini(struct psp_context *psp) 280 { 281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 282 283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 284 kfree(ctx->sys_cache); 285 ctx->sys_cache = NULL; 286 } 287 288 static int psp_memory_training_init(struct psp_context *psp) 289 { 290 int ret; 291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 292 293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 294 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 295 return 0; 296 } 297 298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 299 if (ctx->sys_cache == NULL) { 300 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 301 ret = -ENOMEM; 302 goto Err_out; 303 } 304 305 dev_dbg(psp->adev->dev, 306 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 307 ctx->train_data_size, 308 ctx->p2c_train_data_offset, 309 ctx->c2p_train_data_offset); 310 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 311 return 0; 312 313 Err_out: 314 psp_memory_training_fini(psp); 315 return ret; 316 } 317 318 /* 319 * Helper funciton to query psp runtime database entry 320 * 321 * @adev: amdgpu_device pointer 322 * @entry_type: the type of psp runtime database entry 323 * @db_entry: runtime database entry pointer 324 * 325 * Return false if runtime database doesn't exit or entry is invalid 326 * or true if the specific database entry is found, and copy to @db_entry 327 */ 328 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 329 enum psp_runtime_entry_type entry_type, 330 void *db_entry) 331 { 332 uint64_t db_header_pos, db_dir_pos; 333 struct psp_runtime_data_header db_header = {0}; 334 struct psp_runtime_data_directory db_dir = {0}; 335 bool ret = false; 336 int i; 337 338 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 339 return false; 340 341 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 342 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 343 344 /* read runtime db header from vram */ 345 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 346 sizeof(struct psp_runtime_data_header), false); 347 348 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 349 /* runtime db doesn't exist, exit */ 350 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 351 return false; 352 } 353 354 /* read runtime database entry from vram */ 355 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 356 sizeof(struct psp_runtime_data_directory), false); 357 358 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 359 /* invalid db entry count, exit */ 360 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 361 return false; 362 } 363 364 /* look up for requested entry type */ 365 for (i = 0; i < db_dir.entry_count && !ret; i++) { 366 if (db_dir.entry_list[i].entry_type == entry_type) { 367 switch (entry_type) { 368 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 369 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 370 /* invalid db entry size */ 371 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 372 return false; 373 } 374 /* read runtime database entry */ 375 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 376 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 377 ret = true; 378 break; 379 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 380 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 381 /* invalid db entry size */ 382 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 383 return false; 384 } 385 /* read runtime database entry */ 386 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 387 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 388 ret = true; 389 break; 390 default: 391 ret = false; 392 break; 393 } 394 } 395 } 396 397 return ret; 398 } 399 400 static int psp_sw_init(void *handle) 401 { 402 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 403 struct psp_context *psp = &adev->psp; 404 int ret; 405 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 406 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 407 struct psp_runtime_scpm_entry scpm_entry; 408 409 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 410 if (!psp->cmd) { 411 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 412 ret = -ENOMEM; 413 } 414 415 adev->psp.xgmi_context.supports_extended_data = 416 !adev->gmc.xgmi.connected_to_cpu && 417 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 418 419 memset(&scpm_entry, 0, sizeof(scpm_entry)); 420 if ((psp_get_runtime_db_entry(adev, 421 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 422 &scpm_entry)) && 423 (scpm_entry.scpm_status != SCPM_DISABLE)) { 424 adev->scpm_enabled = true; 425 adev->scpm_status = scpm_entry.scpm_status; 426 } else { 427 adev->scpm_enabled = false; 428 adev->scpm_status = SCPM_DISABLE; 429 } 430 431 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 432 433 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 434 if (psp_get_runtime_db_entry(adev, 435 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 436 &boot_cfg_entry)) { 437 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 438 if ((psp->boot_cfg_bitmask) & 439 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 440 /* If psp runtime database exists, then 441 * only enable two stage memory training 442 * when TWO_STAGE_DRAM_TRAINING bit is set 443 * in runtime database 444 */ 445 mem_training_ctx->enable_mem_training = true; 446 } 447 448 } else { 449 /* If psp runtime database doesn't exist or is 450 * invalid, force enable two stage memory training 451 */ 452 mem_training_ctx->enable_mem_training = true; 453 } 454 455 if (mem_training_ctx->enable_mem_training) { 456 ret = psp_memory_training_init(psp); 457 if (ret) { 458 dev_err(adev->dev, "Failed to initialize memory training!\n"); 459 return ret; 460 } 461 462 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 463 if (ret) { 464 dev_err(adev->dev, "Failed to process memory training!\n"); 465 return ret; 466 } 467 } 468 469 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 470 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 471 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 472 &psp->fw_pri_bo, 473 &psp->fw_pri_mc_addr, 474 &psp->fw_pri_buf); 475 if (ret) 476 return ret; 477 478 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 479 AMDGPU_GEM_DOMAIN_VRAM | 480 AMDGPU_GEM_DOMAIN_GTT, 481 &psp->fence_buf_bo, 482 &psp->fence_buf_mc_addr, 483 &psp->fence_buf); 484 if (ret) 485 goto failed1; 486 487 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 488 AMDGPU_GEM_DOMAIN_VRAM | 489 AMDGPU_GEM_DOMAIN_GTT, 490 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 491 (void **)&psp->cmd_buf_mem); 492 if (ret) 493 goto failed2; 494 495 return 0; 496 497 failed2: 498 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 499 &psp->fence_buf_mc_addr, &psp->fence_buf); 500 failed1: 501 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 502 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 503 return ret; 504 } 505 506 static int psp_sw_fini(void *handle) 507 { 508 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 509 struct psp_context *psp = &adev->psp; 510 struct psp_gfx_cmd_resp *cmd = psp->cmd; 511 512 psp_memory_training_fini(psp); 513 514 amdgpu_ucode_release(&psp->sos_fw); 515 amdgpu_ucode_release(&psp->asd_fw); 516 amdgpu_ucode_release(&psp->ta_fw); 517 amdgpu_ucode_release(&psp->cap_fw); 518 amdgpu_ucode_release(&psp->toc_fw); 519 520 kfree(cmd); 521 cmd = NULL; 522 523 psp_free_shared_bufs(psp); 524 525 if (psp->km_ring.ring_mem) 526 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 527 &psp->km_ring.ring_mem_mc_addr, 528 (void **)&psp->km_ring.ring_mem); 529 530 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 531 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 532 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 533 &psp->fence_buf_mc_addr, &psp->fence_buf); 534 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 535 (void **)&psp->cmd_buf_mem); 536 537 return 0; 538 } 539 540 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 541 uint32_t reg_val, uint32_t mask, bool check_changed) 542 { 543 uint32_t val; 544 int i; 545 struct amdgpu_device *adev = psp->adev; 546 547 if (psp->adev->no_hw_access) 548 return 0; 549 550 for (i = 0; i < adev->usec_timeout; i++) { 551 val = RREG32(reg_index); 552 if (check_changed) { 553 if (val != reg_val) 554 return 0; 555 } else { 556 if ((val & mask) == reg_val) 557 return 0; 558 } 559 udelay(1); 560 } 561 562 return -ETIME; 563 } 564 565 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 566 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 567 { 568 uint32_t val; 569 int i; 570 struct amdgpu_device *adev = psp->adev; 571 572 if (psp->adev->no_hw_access) 573 return 0; 574 575 for (i = 0; i < msec_timeout; i++) { 576 val = RREG32(reg_index); 577 if ((val & mask) == reg_val) 578 return 0; 579 msleep(1); 580 } 581 582 return -ETIME; 583 } 584 585 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 586 { 587 switch (cmd_id) { 588 case GFX_CMD_ID_LOAD_TA: 589 return "LOAD_TA"; 590 case GFX_CMD_ID_UNLOAD_TA: 591 return "UNLOAD_TA"; 592 case GFX_CMD_ID_INVOKE_CMD: 593 return "INVOKE_CMD"; 594 case GFX_CMD_ID_LOAD_ASD: 595 return "LOAD_ASD"; 596 case GFX_CMD_ID_SETUP_TMR: 597 return "SETUP_TMR"; 598 case GFX_CMD_ID_LOAD_IP_FW: 599 return "LOAD_IP_FW"; 600 case GFX_CMD_ID_DESTROY_TMR: 601 return "DESTROY_TMR"; 602 case GFX_CMD_ID_SAVE_RESTORE: 603 return "SAVE_RESTORE_IP_FW"; 604 case GFX_CMD_ID_SETUP_VMR: 605 return "SETUP_VMR"; 606 case GFX_CMD_ID_DESTROY_VMR: 607 return "DESTROY_VMR"; 608 case GFX_CMD_ID_PROG_REG: 609 return "PROG_REG"; 610 case GFX_CMD_ID_GET_FW_ATTESTATION: 611 return "GET_FW_ATTESTATION"; 612 case GFX_CMD_ID_LOAD_TOC: 613 return "ID_LOAD_TOC"; 614 case GFX_CMD_ID_AUTOLOAD_RLC: 615 return "AUTOLOAD_RLC"; 616 case GFX_CMD_ID_BOOT_CFG: 617 return "BOOT_CFG"; 618 default: 619 return "UNKNOWN CMD"; 620 } 621 } 622 623 static int 624 psp_cmd_submit_buf(struct psp_context *psp, 625 struct amdgpu_firmware_info *ucode, 626 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 627 { 628 int ret; 629 int index; 630 int timeout = 20000; 631 bool ras_intr = false; 632 bool skip_unsupport = false; 633 634 if (psp->adev->no_hw_access) 635 return 0; 636 637 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 638 639 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 640 641 index = atomic_inc_return(&psp->fence_value); 642 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 643 if (ret) { 644 atomic_dec(&psp->fence_value); 645 goto exit; 646 } 647 648 amdgpu_device_invalidate_hdp(psp->adev, NULL); 649 while (*((unsigned int *)psp->fence_buf) != index) { 650 if (--timeout == 0) 651 break; 652 /* 653 * Shouldn't wait for timeout when err_event_athub occurs, 654 * because gpu reset thread triggered and lock resource should 655 * be released for psp resume sequence. 656 */ 657 ras_intr = amdgpu_ras_intr_triggered(); 658 if (ras_intr) 659 break; 660 usleep_range(10, 100); 661 amdgpu_device_invalidate_hdp(psp->adev, NULL); 662 } 663 664 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 665 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 666 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 667 668 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 669 670 /* In some cases, psp response status is not 0 even there is no 671 * problem while the command is submitted. Some version of PSP FW 672 * doesn't write 0 to that field. 673 * So here we would like to only print a warning instead of an error 674 * during psp initialization to avoid breaking hw_init and it doesn't 675 * return -EINVAL. 676 */ 677 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 678 if (ucode) 679 dev_warn(psp->adev->dev, 680 "failed to load ucode %s(0x%X) ", 681 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 682 dev_warn(psp->adev->dev, 683 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 684 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 685 psp->cmd_buf_mem->resp.status); 686 /* If any firmware (including CAP) load fails under SRIOV, it should 687 * return failure to stop the VF from initializing. 688 * Also return failure in case of timeout 689 */ 690 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 691 ret = -EINVAL; 692 goto exit; 693 } 694 } 695 696 if (ucode) { 697 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 698 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 699 } 700 701 exit: 702 return ret; 703 } 704 705 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 706 { 707 struct psp_gfx_cmd_resp *cmd = psp->cmd; 708 709 mutex_lock(&psp->mutex); 710 711 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 712 713 return cmd; 714 } 715 716 static void release_psp_cmd_buf(struct psp_context *psp) 717 { 718 mutex_unlock(&psp->mutex); 719 } 720 721 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 722 struct psp_gfx_cmd_resp *cmd, 723 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 724 { 725 struct amdgpu_device *adev = psp->adev; 726 uint32_t size = 0; 727 uint64_t tmr_pa = 0; 728 729 if (tmr_bo) { 730 size = amdgpu_bo_size(tmr_bo); 731 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 732 } 733 734 if (amdgpu_sriov_vf(psp->adev)) 735 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 736 else 737 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 738 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 739 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 740 cmd->cmd.cmd_setup_tmr.buf_size = size; 741 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 742 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 743 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 744 } 745 746 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 747 uint64_t pri_buf_mc, uint32_t size) 748 { 749 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 750 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 751 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 752 cmd->cmd.cmd_load_toc.toc_size = size; 753 } 754 755 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 756 static int psp_load_toc(struct psp_context *psp, 757 uint32_t *tmr_size) 758 { 759 int ret; 760 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 761 762 /* Copy toc to psp firmware private buffer */ 763 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 764 765 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 766 767 ret = psp_cmd_submit_buf(psp, NULL, cmd, 768 psp->fence_buf_mc_addr); 769 if (!ret) 770 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 771 772 release_psp_cmd_buf(psp); 773 774 return ret; 775 } 776 777 static bool psp_boottime_tmr(struct psp_context *psp) 778 { 779 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 780 case IP_VERSION(13, 0, 6): 781 return true; 782 default: 783 return false; 784 } 785 } 786 787 /* Set up Trusted Memory Region */ 788 static int psp_tmr_init(struct psp_context *psp) 789 { 790 int ret = 0; 791 int tmr_size; 792 void *tmr_buf; 793 void **pptr; 794 795 /* 796 * According to HW engineer, they prefer the TMR address be "naturally 797 * aligned" , e.g. the start address be an integer divide of TMR size. 798 * 799 * Note: this memory need be reserved till the driver 800 * uninitializes. 801 */ 802 tmr_size = PSP_TMR_SIZE(psp->adev); 803 804 /* For ASICs support RLC autoload, psp will parse the toc 805 * and calculate the total size of TMR needed 806 */ 807 if (!amdgpu_sriov_vf(psp->adev) && 808 psp->toc.start_addr && 809 psp->toc.size_bytes && 810 psp->fw_pri_buf) { 811 ret = psp_load_toc(psp, &tmr_size); 812 if (ret) { 813 dev_err(psp->adev->dev, "Failed to load toc\n"); 814 return ret; 815 } 816 } 817 818 if (!psp->tmr_bo) { 819 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 820 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 821 PSP_TMR_ALIGNMENT, 822 AMDGPU_HAS_VRAM(psp->adev) ? 823 AMDGPU_GEM_DOMAIN_VRAM : 824 AMDGPU_GEM_DOMAIN_GTT, 825 &psp->tmr_bo, &psp->tmr_mc_addr, 826 pptr); 827 } 828 829 return ret; 830 } 831 832 static bool psp_skip_tmr(struct psp_context *psp) 833 { 834 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 835 case IP_VERSION(11, 0, 9): 836 case IP_VERSION(11, 0, 7): 837 case IP_VERSION(13, 0, 2): 838 case IP_VERSION(13, 0, 6): 839 case IP_VERSION(13, 0, 10): 840 return true; 841 default: 842 return false; 843 } 844 } 845 846 static int psp_tmr_load(struct psp_context *psp) 847 { 848 int ret; 849 struct psp_gfx_cmd_resp *cmd; 850 851 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 852 * Already set up by host driver. 853 */ 854 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 855 return 0; 856 857 cmd = acquire_psp_cmd_buf(psp); 858 859 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 860 if (psp->tmr_bo) 861 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 862 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 863 864 ret = psp_cmd_submit_buf(psp, NULL, cmd, 865 psp->fence_buf_mc_addr); 866 867 release_psp_cmd_buf(psp); 868 869 return ret; 870 } 871 872 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 873 struct psp_gfx_cmd_resp *cmd) 874 { 875 if (amdgpu_sriov_vf(psp->adev)) 876 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 877 else 878 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 879 } 880 881 static int psp_tmr_unload(struct psp_context *psp) 882 { 883 int ret; 884 struct psp_gfx_cmd_resp *cmd; 885 886 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 887 * as TMR is not loaded at all 888 */ 889 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 890 return 0; 891 892 cmd = acquire_psp_cmd_buf(psp); 893 894 psp_prep_tmr_unload_cmd_buf(psp, cmd); 895 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 896 897 ret = psp_cmd_submit_buf(psp, NULL, cmd, 898 psp->fence_buf_mc_addr); 899 900 release_psp_cmd_buf(psp); 901 902 return ret; 903 } 904 905 static int psp_tmr_terminate(struct psp_context *psp) 906 { 907 return psp_tmr_unload(psp); 908 } 909 910 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 911 uint64_t *output_ptr) 912 { 913 int ret; 914 struct psp_gfx_cmd_resp *cmd; 915 916 if (!output_ptr) 917 return -EINVAL; 918 919 if (amdgpu_sriov_vf(psp->adev)) 920 return 0; 921 922 cmd = acquire_psp_cmd_buf(psp); 923 924 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 925 926 ret = psp_cmd_submit_buf(psp, NULL, cmd, 927 psp->fence_buf_mc_addr); 928 929 if (!ret) { 930 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 931 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 932 } 933 934 release_psp_cmd_buf(psp); 935 936 return ret; 937 } 938 939 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 940 { 941 struct psp_context *psp = &adev->psp; 942 struct psp_gfx_cmd_resp *cmd; 943 int ret; 944 945 if (amdgpu_sriov_vf(adev)) 946 return 0; 947 948 cmd = acquire_psp_cmd_buf(psp); 949 950 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 951 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 952 953 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 954 if (!ret) { 955 *boot_cfg = 956 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 957 } 958 959 release_psp_cmd_buf(psp); 960 961 return ret; 962 } 963 964 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 965 { 966 int ret; 967 struct psp_context *psp = &adev->psp; 968 struct psp_gfx_cmd_resp *cmd; 969 970 if (amdgpu_sriov_vf(adev)) 971 return 0; 972 973 cmd = acquire_psp_cmd_buf(psp); 974 975 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 976 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 977 cmd->cmd.boot_cfg.boot_config = boot_cfg; 978 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 979 980 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 981 982 release_psp_cmd_buf(psp); 983 984 return ret; 985 } 986 987 static int psp_rl_load(struct amdgpu_device *adev) 988 { 989 int ret; 990 struct psp_context *psp = &adev->psp; 991 struct psp_gfx_cmd_resp *cmd; 992 993 if (!is_psp_fw_valid(psp->rl)) 994 return 0; 995 996 cmd = acquire_psp_cmd_buf(psp); 997 998 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 999 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1000 1001 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1002 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1003 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1004 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1005 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1006 1007 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1008 1009 release_psp_cmd_buf(psp); 1010 1011 return ret; 1012 } 1013 1014 int psp_spatial_partition(struct psp_context *psp, int mode) 1015 { 1016 struct psp_gfx_cmd_resp *cmd; 1017 int ret; 1018 1019 if (amdgpu_sriov_vf(psp->adev)) 1020 return 0; 1021 1022 cmd = acquire_psp_cmd_buf(psp); 1023 1024 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1025 cmd->cmd.cmd_spatial_part.mode = mode; 1026 1027 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1028 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1029 1030 release_psp_cmd_buf(psp); 1031 1032 return ret; 1033 } 1034 1035 static int psp_asd_initialize(struct psp_context *psp) 1036 { 1037 int ret; 1038 1039 /* If PSP version doesn't match ASD version, asd loading will be failed. 1040 * add workaround to bypass it for sriov now. 1041 * TODO: add version check to make it common 1042 */ 1043 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1044 return 0; 1045 1046 psp->asd_context.mem_context.shared_mc_addr = 0; 1047 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1048 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1049 1050 ret = psp_ta_load(psp, &psp->asd_context); 1051 if (!ret) 1052 psp->asd_context.initialized = true; 1053 1054 return ret; 1055 } 1056 1057 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1058 uint32_t session_id) 1059 { 1060 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1061 cmd->cmd.cmd_unload_ta.session_id = session_id; 1062 } 1063 1064 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1065 { 1066 int ret; 1067 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1068 1069 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1070 1071 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1072 1073 context->resp_status = cmd->resp.status; 1074 1075 release_psp_cmd_buf(psp); 1076 1077 return ret; 1078 } 1079 1080 static int psp_asd_terminate(struct psp_context *psp) 1081 { 1082 int ret; 1083 1084 if (amdgpu_sriov_vf(psp->adev)) 1085 return 0; 1086 1087 if (!psp->asd_context.initialized) 1088 return 0; 1089 1090 ret = psp_ta_unload(psp, &psp->asd_context); 1091 if (!ret) 1092 psp->asd_context.initialized = false; 1093 1094 return ret; 1095 } 1096 1097 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1098 uint32_t id, uint32_t value) 1099 { 1100 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1101 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1102 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1103 } 1104 1105 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1106 uint32_t value) 1107 { 1108 struct psp_gfx_cmd_resp *cmd; 1109 int ret = 0; 1110 1111 if (reg >= PSP_REG_LAST) 1112 return -EINVAL; 1113 1114 cmd = acquire_psp_cmd_buf(psp); 1115 1116 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1117 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1118 if (ret) 1119 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1120 1121 release_psp_cmd_buf(psp); 1122 1123 return ret; 1124 } 1125 1126 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1127 uint64_t ta_bin_mc, 1128 struct ta_context *context) 1129 { 1130 cmd->cmd_id = context->ta_load_type; 1131 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1132 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1133 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1134 1135 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1136 lower_32_bits(context->mem_context.shared_mc_addr); 1137 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1138 upper_32_bits(context->mem_context.shared_mc_addr); 1139 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1140 } 1141 1142 int psp_ta_init_shared_buf(struct psp_context *psp, 1143 struct ta_mem_context *mem_ctx) 1144 { 1145 /* 1146 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1147 * physical) for ta to host memory 1148 */ 1149 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1150 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1151 AMDGPU_GEM_DOMAIN_GTT, 1152 &mem_ctx->shared_bo, 1153 &mem_ctx->shared_mc_addr, 1154 &mem_ctx->shared_buf); 1155 } 1156 1157 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1158 uint32_t ta_cmd_id, 1159 uint32_t session_id) 1160 { 1161 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1162 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1163 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1164 } 1165 1166 int psp_ta_invoke(struct psp_context *psp, 1167 uint32_t ta_cmd_id, 1168 struct ta_context *context) 1169 { 1170 int ret; 1171 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1172 1173 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1174 1175 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1176 psp->fence_buf_mc_addr); 1177 1178 context->resp_status = cmd->resp.status; 1179 1180 release_psp_cmd_buf(psp); 1181 1182 return ret; 1183 } 1184 1185 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1186 { 1187 int ret; 1188 struct psp_gfx_cmd_resp *cmd; 1189 1190 cmd = acquire_psp_cmd_buf(psp); 1191 1192 psp_copy_fw(psp, context->bin_desc.start_addr, 1193 context->bin_desc.size_bytes); 1194 1195 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1196 1197 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1198 psp->fence_buf_mc_addr); 1199 1200 context->resp_status = cmd->resp.status; 1201 1202 if (!ret) 1203 context->session_id = cmd->resp.session_id; 1204 1205 release_psp_cmd_buf(psp); 1206 1207 return ret; 1208 } 1209 1210 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1211 { 1212 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1213 } 1214 1215 int psp_xgmi_terminate(struct psp_context *psp) 1216 { 1217 int ret; 1218 struct amdgpu_device *adev = psp->adev; 1219 1220 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1221 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1222 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1223 adev->gmc.xgmi.connected_to_cpu)) 1224 return 0; 1225 1226 if (!psp->xgmi_context.context.initialized) 1227 return 0; 1228 1229 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1230 1231 psp->xgmi_context.context.initialized = false; 1232 1233 return ret; 1234 } 1235 1236 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1237 { 1238 struct ta_xgmi_shared_memory *xgmi_cmd; 1239 int ret; 1240 1241 if (!psp->ta_fw || 1242 !psp->xgmi_context.context.bin_desc.size_bytes || 1243 !psp->xgmi_context.context.bin_desc.start_addr) 1244 return -ENOENT; 1245 1246 if (!load_ta) 1247 goto invoke; 1248 1249 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1250 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1251 1252 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1253 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1254 if (ret) 1255 return ret; 1256 } 1257 1258 /* Load XGMI TA */ 1259 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1260 if (!ret) 1261 psp->xgmi_context.context.initialized = true; 1262 else 1263 return ret; 1264 1265 invoke: 1266 /* Initialize XGMI session */ 1267 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1268 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1269 xgmi_cmd->flag_extend_link_record = set_extended_data; 1270 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1271 1272 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1273 /* note down the capbility flag for XGMI TA */ 1274 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1275 1276 return ret; 1277 } 1278 1279 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1280 { 1281 struct ta_xgmi_shared_memory *xgmi_cmd; 1282 int ret; 1283 1284 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1285 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1286 1287 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1288 1289 /* Invoke xgmi ta to get hive id */ 1290 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1291 if (ret) 1292 return ret; 1293 1294 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1295 1296 return 0; 1297 } 1298 1299 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1300 { 1301 struct ta_xgmi_shared_memory *xgmi_cmd; 1302 int ret; 1303 1304 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1305 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1306 1307 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1308 1309 /* Invoke xgmi ta to get the node id */ 1310 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1311 if (ret) 1312 return ret; 1313 1314 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1315 1316 return 0; 1317 } 1318 1319 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1320 { 1321 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1322 IP_VERSION(13, 0, 2) && 1323 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1324 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1325 IP_VERSION(13, 0, 6); 1326 } 1327 1328 /* 1329 * Chips that support extended topology information require the driver to 1330 * reflect topology information in the opposite direction. This is 1331 * because the TA has already exceeded its link record limit and if the 1332 * TA holds bi-directional information, the driver would have to do 1333 * multiple fetches instead of just two. 1334 */ 1335 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1336 struct psp_xgmi_node_info node_info) 1337 { 1338 struct amdgpu_device *mirror_adev; 1339 struct amdgpu_hive_info *hive; 1340 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1341 uint64_t dst_node_id = node_info.node_id; 1342 uint8_t dst_num_hops = node_info.num_hops; 1343 uint8_t dst_num_links = node_info.num_links; 1344 1345 hive = amdgpu_get_xgmi_hive(psp->adev); 1346 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1347 struct psp_xgmi_topology_info *mirror_top_info; 1348 int j; 1349 1350 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1351 continue; 1352 1353 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1354 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1355 if (mirror_top_info->nodes[j].node_id != src_node_id) 1356 continue; 1357 1358 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1359 /* 1360 * prevent 0 num_links value re-reflection since reflection 1361 * criteria is based on num_hops (direct or indirect). 1362 * 1363 */ 1364 if (dst_num_links) 1365 mirror_top_info->nodes[j].num_links = dst_num_links; 1366 1367 break; 1368 } 1369 1370 break; 1371 } 1372 1373 amdgpu_put_xgmi_hive(hive); 1374 } 1375 1376 int psp_xgmi_get_topology_info(struct psp_context *psp, 1377 int number_devices, 1378 struct psp_xgmi_topology_info *topology, 1379 bool get_extended_data) 1380 { 1381 struct ta_xgmi_shared_memory *xgmi_cmd; 1382 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1383 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1384 int i; 1385 int ret; 1386 1387 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1388 return -EINVAL; 1389 1390 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1391 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1392 xgmi_cmd->flag_extend_link_record = get_extended_data; 1393 1394 /* Fill in the shared memory with topology information as input */ 1395 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1396 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1397 topology_info_input->num_nodes = number_devices; 1398 1399 for (i = 0; i < topology_info_input->num_nodes; i++) { 1400 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1401 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1402 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1403 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1404 } 1405 1406 /* Invoke xgmi ta to get the topology information */ 1407 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1408 if (ret) 1409 return ret; 1410 1411 /* Read the output topology information from the shared memory */ 1412 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1413 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1414 for (i = 0; i < topology->num_nodes; i++) { 1415 /* extended data will either be 0 or equal to non-extended data */ 1416 if (topology_info_output->nodes[i].num_hops) 1417 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1418 1419 /* non-extended data gets everything here so no need to update */ 1420 if (!get_extended_data) { 1421 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1422 topology->nodes[i].is_sharing_enabled = 1423 topology_info_output->nodes[i].is_sharing_enabled; 1424 topology->nodes[i].sdma_engine = 1425 topology_info_output->nodes[i].sdma_engine; 1426 } 1427 1428 } 1429 1430 /* Invoke xgmi ta again to get the link information */ 1431 if (psp_xgmi_peer_link_info_supported(psp)) { 1432 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1433 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1434 bool requires_reflection = 1435 (psp->xgmi_context.supports_extended_data && 1436 get_extended_data) || 1437 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1438 IP_VERSION(13, 0, 6); 1439 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1440 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1441 1442 /* popluate the shared output buffer rather than the cmd input buffer 1443 * with node_ids as the input for GET_PEER_LINKS command execution. 1444 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1445 * The same requirement for GET_EXTEND_PEER_LINKS command. 1446 */ 1447 if (ta_port_num_support) { 1448 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1449 1450 for (i = 0; i < topology->num_nodes; i++) 1451 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1452 1453 link_extend_info_output->num_nodes = topology->num_nodes; 1454 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1455 } else { 1456 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1457 1458 for (i = 0; i < topology->num_nodes; i++) 1459 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1460 1461 link_info_output->num_nodes = topology->num_nodes; 1462 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1463 } 1464 1465 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1466 if (ret) 1467 return ret; 1468 1469 for (i = 0; i < topology->num_nodes; i++) { 1470 uint8_t node_num_links = ta_port_num_support ? 1471 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1472 /* accumulate num_links on extended data */ 1473 if (get_extended_data) { 1474 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1475 } else { 1476 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1477 topology->nodes[i].num_links : node_num_links; 1478 } 1479 /* popluate the connected port num info if supported and available */ 1480 if (ta_port_num_support && topology->nodes[i].num_links) { 1481 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1482 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1483 } 1484 1485 /* reflect the topology information for bi-directionality */ 1486 if (requires_reflection && topology->nodes[i].num_hops) 1487 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1488 } 1489 } 1490 1491 return 0; 1492 } 1493 1494 int psp_xgmi_set_topology_info(struct psp_context *psp, 1495 int number_devices, 1496 struct psp_xgmi_topology_info *topology) 1497 { 1498 struct ta_xgmi_shared_memory *xgmi_cmd; 1499 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1500 int i; 1501 1502 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1503 return -EINVAL; 1504 1505 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1506 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1507 1508 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1509 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1510 topology_info_input->num_nodes = number_devices; 1511 1512 for (i = 0; i < topology_info_input->num_nodes; i++) { 1513 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1514 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1515 topology_info_input->nodes[i].is_sharing_enabled = 1; 1516 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1517 } 1518 1519 /* Invoke xgmi ta to set topology information */ 1520 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1521 } 1522 1523 // ras begin 1524 static void psp_ras_ta_check_status(struct psp_context *psp) 1525 { 1526 struct ta_ras_shared_memory *ras_cmd = 1527 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1528 1529 switch (ras_cmd->ras_status) { 1530 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1531 dev_warn(psp->adev->dev, 1532 "RAS WARNING: cmd failed due to unsupported ip\n"); 1533 break; 1534 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1535 dev_warn(psp->adev->dev, 1536 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1537 break; 1538 case TA_RAS_STATUS__SUCCESS: 1539 break; 1540 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1541 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1542 dev_warn(psp->adev->dev, 1543 "RAS WARNING: Inject error to critical region is not allowed\n"); 1544 break; 1545 default: 1546 dev_warn(psp->adev->dev, 1547 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1548 break; 1549 } 1550 } 1551 1552 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1553 { 1554 struct ta_ras_shared_memory *ras_cmd; 1555 int ret; 1556 1557 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1558 1559 /* 1560 * TODO: bypass the loading in sriov for now 1561 */ 1562 if (amdgpu_sriov_vf(psp->adev)) 1563 return 0; 1564 1565 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1566 1567 if (amdgpu_ras_intr_triggered()) 1568 return ret; 1569 1570 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1571 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1572 return -EINVAL; 1573 } 1574 1575 if (!ret) { 1576 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1577 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1578 1579 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1580 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1581 dev_warn(psp->adev->dev, 1582 "RAS internal register access blocked\n"); 1583 1584 psp_ras_ta_check_status(psp); 1585 } 1586 1587 return ret; 1588 } 1589 1590 int psp_ras_enable_features(struct psp_context *psp, 1591 union ta_ras_cmd_input *info, bool enable) 1592 { 1593 struct ta_ras_shared_memory *ras_cmd; 1594 int ret; 1595 1596 if (!psp->ras_context.context.initialized) 1597 return -EINVAL; 1598 1599 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1600 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1601 1602 if (enable) 1603 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1604 else 1605 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1606 1607 ras_cmd->ras_in_message = *info; 1608 1609 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1610 if (ret) 1611 return -EINVAL; 1612 1613 return 0; 1614 } 1615 1616 int psp_ras_terminate(struct psp_context *psp) 1617 { 1618 int ret; 1619 1620 /* 1621 * TODO: bypass the terminate in sriov for now 1622 */ 1623 if (amdgpu_sriov_vf(psp->adev)) 1624 return 0; 1625 1626 if (!psp->ras_context.context.initialized) 1627 return 0; 1628 1629 ret = psp_ta_unload(psp, &psp->ras_context.context); 1630 1631 psp->ras_context.context.initialized = false; 1632 1633 return ret; 1634 } 1635 1636 int psp_ras_initialize(struct psp_context *psp) 1637 { 1638 int ret; 1639 uint32_t boot_cfg = 0xFF; 1640 struct amdgpu_device *adev = psp->adev; 1641 struct ta_ras_shared_memory *ras_cmd; 1642 1643 /* 1644 * TODO: bypass the initialize in sriov for now 1645 */ 1646 if (amdgpu_sriov_vf(adev)) 1647 return 0; 1648 1649 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1650 !adev->psp.ras_context.context.bin_desc.start_addr) { 1651 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1652 return 0; 1653 } 1654 1655 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1656 /* query GECC enablement status from boot config 1657 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1658 */ 1659 ret = psp_boot_config_get(adev, &boot_cfg); 1660 if (ret) 1661 dev_warn(adev->dev, "PSP get boot config failed\n"); 1662 1663 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1664 if (!boot_cfg) { 1665 dev_info(adev->dev, "GECC is disabled\n"); 1666 } else { 1667 /* disable GECC in next boot cycle if ras is 1668 * disabled by module parameter amdgpu_ras_enable 1669 * and/or amdgpu_ras_mask, or boot_config_get call 1670 * is failed 1671 */ 1672 ret = psp_boot_config_set(adev, 0); 1673 if (ret) 1674 dev_warn(adev->dev, "PSP set boot config failed\n"); 1675 else 1676 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1677 } 1678 } else { 1679 if (boot_cfg == 1) { 1680 dev_info(adev->dev, "GECC is enabled\n"); 1681 } else { 1682 /* enable GECC in next boot cycle if it is disabled 1683 * in boot config, or force enable GECC if failed to 1684 * get boot configuration 1685 */ 1686 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1687 if (ret) 1688 dev_warn(adev->dev, "PSP set boot config failed\n"); 1689 else 1690 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1691 } 1692 } 1693 } 1694 1695 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1696 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1697 1698 if (!psp->ras_context.context.mem_context.shared_buf) { 1699 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1700 if (ret) 1701 return ret; 1702 } 1703 1704 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1705 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1706 1707 if (amdgpu_ras_is_poison_mode_supported(adev)) 1708 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1709 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1710 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1711 ras_cmd->ras_in_message.init_flags.xcc_mask = 1712 adev->gfx.xcc_mask; 1713 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1714 1715 ret = psp_ta_load(psp, &psp->ras_context.context); 1716 1717 if (!ret && !ras_cmd->ras_status) 1718 psp->ras_context.context.initialized = true; 1719 else { 1720 if (ras_cmd->ras_status) 1721 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1722 1723 /* fail to load RAS TA */ 1724 psp->ras_context.context.initialized = false; 1725 } 1726 1727 return ret; 1728 } 1729 1730 int psp_ras_trigger_error(struct psp_context *psp, 1731 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1732 { 1733 struct ta_ras_shared_memory *ras_cmd; 1734 struct amdgpu_device *adev = psp->adev; 1735 int ret; 1736 uint32_t dev_mask; 1737 1738 if (!psp->ras_context.context.initialized) 1739 return -EINVAL; 1740 1741 switch (info->block_id) { 1742 case TA_RAS_BLOCK__GFX: 1743 dev_mask = GET_MASK(GC, instance_mask); 1744 break; 1745 case TA_RAS_BLOCK__SDMA: 1746 dev_mask = GET_MASK(SDMA0, instance_mask); 1747 break; 1748 case TA_RAS_BLOCK__VCN: 1749 case TA_RAS_BLOCK__JPEG: 1750 dev_mask = GET_MASK(VCN, instance_mask); 1751 break; 1752 default: 1753 dev_mask = instance_mask; 1754 break; 1755 } 1756 1757 /* reuse sub_block_index for backward compatibility */ 1758 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1759 dev_mask &= AMDGPU_RAS_INST_MASK; 1760 info->sub_block_index |= dev_mask; 1761 1762 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1763 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1764 1765 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1766 ras_cmd->ras_in_message.trigger_error = *info; 1767 1768 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1769 if (ret) 1770 return -EINVAL; 1771 1772 /* If err_event_athub occurs error inject was successful, however 1773 * return status from TA is no long reliable 1774 */ 1775 if (amdgpu_ras_intr_triggered()) 1776 return 0; 1777 1778 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1779 return -EACCES; 1780 else if (ras_cmd->ras_status) 1781 return -EINVAL; 1782 1783 return 0; 1784 } 1785 1786 int psp_ras_query_address(struct psp_context *psp, 1787 struct ta_ras_query_address_input *addr_in, 1788 struct ta_ras_query_address_output *addr_out) 1789 { 1790 struct ta_ras_shared_memory *ras_cmd; 1791 int ret; 1792 1793 if (!psp->ras_context.context.initialized) 1794 return -EINVAL; 1795 1796 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1797 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1798 1799 ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS; 1800 ras_cmd->ras_in_message.address = *addr_in; 1801 1802 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1803 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1804 return -EINVAL; 1805 1806 *addr_out = ras_cmd->ras_out_message.address; 1807 1808 return 0; 1809 } 1810 // ras end 1811 1812 // HDCP start 1813 static int psp_hdcp_initialize(struct psp_context *psp) 1814 { 1815 int ret; 1816 1817 /* 1818 * TODO: bypass the initialize in sriov for now 1819 */ 1820 if (amdgpu_sriov_vf(psp->adev)) 1821 return 0; 1822 1823 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1824 !psp->hdcp_context.context.bin_desc.start_addr) { 1825 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1826 return 0; 1827 } 1828 1829 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1830 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1831 1832 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1833 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1834 if (ret) 1835 return ret; 1836 } 1837 1838 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1839 if (!ret) { 1840 psp->hdcp_context.context.initialized = true; 1841 mutex_init(&psp->hdcp_context.mutex); 1842 } 1843 1844 return ret; 1845 } 1846 1847 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1848 { 1849 /* 1850 * TODO: bypass the loading in sriov for now 1851 */ 1852 if (amdgpu_sriov_vf(psp->adev)) 1853 return 0; 1854 1855 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1856 } 1857 1858 static int psp_hdcp_terminate(struct psp_context *psp) 1859 { 1860 int ret; 1861 1862 /* 1863 * TODO: bypass the terminate in sriov for now 1864 */ 1865 if (amdgpu_sriov_vf(psp->adev)) 1866 return 0; 1867 1868 if (!psp->hdcp_context.context.initialized) 1869 return 0; 1870 1871 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1872 1873 psp->hdcp_context.context.initialized = false; 1874 1875 return ret; 1876 } 1877 // HDCP end 1878 1879 // DTM start 1880 static int psp_dtm_initialize(struct psp_context *psp) 1881 { 1882 int ret; 1883 1884 /* 1885 * TODO: bypass the initialize in sriov for now 1886 */ 1887 if (amdgpu_sriov_vf(psp->adev)) 1888 return 0; 1889 1890 if (!psp->dtm_context.context.bin_desc.size_bytes || 1891 !psp->dtm_context.context.bin_desc.start_addr) { 1892 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1893 return 0; 1894 } 1895 1896 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1897 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1898 1899 if (!psp->dtm_context.context.mem_context.shared_buf) { 1900 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1901 if (ret) 1902 return ret; 1903 } 1904 1905 ret = psp_ta_load(psp, &psp->dtm_context.context); 1906 if (!ret) { 1907 psp->dtm_context.context.initialized = true; 1908 mutex_init(&psp->dtm_context.mutex); 1909 } 1910 1911 return ret; 1912 } 1913 1914 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1915 { 1916 /* 1917 * TODO: bypass the loading in sriov for now 1918 */ 1919 if (amdgpu_sriov_vf(psp->adev)) 1920 return 0; 1921 1922 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1923 } 1924 1925 static int psp_dtm_terminate(struct psp_context *psp) 1926 { 1927 int ret; 1928 1929 /* 1930 * TODO: bypass the terminate in sriov for now 1931 */ 1932 if (amdgpu_sriov_vf(psp->adev)) 1933 return 0; 1934 1935 if (!psp->dtm_context.context.initialized) 1936 return 0; 1937 1938 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1939 1940 psp->dtm_context.context.initialized = false; 1941 1942 return ret; 1943 } 1944 // DTM end 1945 1946 // RAP start 1947 static int psp_rap_initialize(struct psp_context *psp) 1948 { 1949 int ret; 1950 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1951 1952 /* 1953 * TODO: bypass the initialize in sriov for now 1954 */ 1955 if (amdgpu_sriov_vf(psp->adev)) 1956 return 0; 1957 1958 if (!psp->rap_context.context.bin_desc.size_bytes || 1959 !psp->rap_context.context.bin_desc.start_addr) { 1960 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1961 return 0; 1962 } 1963 1964 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 1965 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1966 1967 if (!psp->rap_context.context.mem_context.shared_buf) { 1968 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 1969 if (ret) 1970 return ret; 1971 } 1972 1973 ret = psp_ta_load(psp, &psp->rap_context.context); 1974 if (!ret) { 1975 psp->rap_context.context.initialized = true; 1976 mutex_init(&psp->rap_context.mutex); 1977 } else 1978 return ret; 1979 1980 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 1981 if (ret || status != TA_RAP_STATUS__SUCCESS) { 1982 psp_rap_terminate(psp); 1983 /* free rap shared memory */ 1984 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 1985 1986 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 1987 ret, status); 1988 1989 return ret; 1990 } 1991 1992 return 0; 1993 } 1994 1995 static int psp_rap_terminate(struct psp_context *psp) 1996 { 1997 int ret; 1998 1999 if (!psp->rap_context.context.initialized) 2000 return 0; 2001 2002 ret = psp_ta_unload(psp, &psp->rap_context.context); 2003 2004 psp->rap_context.context.initialized = false; 2005 2006 return ret; 2007 } 2008 2009 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2010 { 2011 struct ta_rap_shared_memory *rap_cmd; 2012 int ret = 0; 2013 2014 if (!psp->rap_context.context.initialized) 2015 return 0; 2016 2017 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2018 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2019 return -EINVAL; 2020 2021 mutex_lock(&psp->rap_context.mutex); 2022 2023 rap_cmd = (struct ta_rap_shared_memory *) 2024 psp->rap_context.context.mem_context.shared_buf; 2025 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2026 2027 rap_cmd->cmd_id = ta_cmd_id; 2028 rap_cmd->validation_method_id = METHOD_A; 2029 2030 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2031 if (ret) 2032 goto out_unlock; 2033 2034 if (status) 2035 *status = rap_cmd->rap_status; 2036 2037 out_unlock: 2038 mutex_unlock(&psp->rap_context.mutex); 2039 2040 return ret; 2041 } 2042 // RAP end 2043 2044 /* securedisplay start */ 2045 static int psp_securedisplay_initialize(struct psp_context *psp) 2046 { 2047 int ret; 2048 struct ta_securedisplay_cmd *securedisplay_cmd; 2049 2050 /* 2051 * TODO: bypass the initialize in sriov for now 2052 */ 2053 if (amdgpu_sriov_vf(psp->adev)) 2054 return 0; 2055 2056 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2057 !psp->securedisplay_context.context.bin_desc.start_addr) { 2058 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2059 return 0; 2060 } 2061 2062 psp->securedisplay_context.context.mem_context.shared_mem_size = 2063 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2064 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2065 2066 if (!psp->securedisplay_context.context.initialized) { 2067 ret = psp_ta_init_shared_buf(psp, 2068 &psp->securedisplay_context.context.mem_context); 2069 if (ret) 2070 return ret; 2071 } 2072 2073 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2074 if (!ret) { 2075 psp->securedisplay_context.context.initialized = true; 2076 mutex_init(&psp->securedisplay_context.mutex); 2077 } else 2078 return ret; 2079 2080 mutex_lock(&psp->securedisplay_context.mutex); 2081 2082 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2083 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2084 2085 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2086 2087 mutex_unlock(&psp->securedisplay_context.mutex); 2088 2089 if (ret) { 2090 psp_securedisplay_terminate(psp); 2091 /* free securedisplay shared memory */ 2092 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2093 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2094 return -EINVAL; 2095 } 2096 2097 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2098 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2099 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2100 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2101 /* don't try again */ 2102 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2103 } 2104 2105 return 0; 2106 } 2107 2108 static int psp_securedisplay_terminate(struct psp_context *psp) 2109 { 2110 int ret; 2111 2112 /* 2113 * TODO:bypass the terminate in sriov for now 2114 */ 2115 if (amdgpu_sriov_vf(psp->adev)) 2116 return 0; 2117 2118 if (!psp->securedisplay_context.context.initialized) 2119 return 0; 2120 2121 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2122 2123 psp->securedisplay_context.context.initialized = false; 2124 2125 return ret; 2126 } 2127 2128 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2129 { 2130 int ret; 2131 2132 if (!psp->securedisplay_context.context.initialized) 2133 return -EINVAL; 2134 2135 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2136 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 2137 return -EINVAL; 2138 2139 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2140 2141 return ret; 2142 } 2143 /* SECUREDISPLAY end */ 2144 2145 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2146 { 2147 struct psp_context *psp = &adev->psp; 2148 int ret = 0; 2149 2150 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2151 ret = psp->funcs->wait_for_bootloader(psp); 2152 2153 return ret; 2154 } 2155 2156 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2157 { 2158 if (psp->funcs && 2159 psp->funcs->get_ras_capability) { 2160 return psp->funcs->get_ras_capability(psp); 2161 } else { 2162 return false; 2163 } 2164 } 2165 2166 static int psp_hw_start(struct psp_context *psp) 2167 { 2168 struct amdgpu_device *adev = psp->adev; 2169 int ret; 2170 2171 if (!amdgpu_sriov_vf(adev)) { 2172 if ((is_psp_fw_valid(psp->kdb)) && 2173 (psp->funcs->bootloader_load_kdb != NULL)) { 2174 ret = psp_bootloader_load_kdb(psp); 2175 if (ret) { 2176 dev_err(adev->dev, "PSP load kdb failed!\n"); 2177 return ret; 2178 } 2179 } 2180 2181 if ((is_psp_fw_valid(psp->spl)) && 2182 (psp->funcs->bootloader_load_spl != NULL)) { 2183 ret = psp_bootloader_load_spl(psp); 2184 if (ret) { 2185 dev_err(adev->dev, "PSP load spl failed!\n"); 2186 return ret; 2187 } 2188 } 2189 2190 if ((is_psp_fw_valid(psp->sys)) && 2191 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2192 ret = psp_bootloader_load_sysdrv(psp); 2193 if (ret) { 2194 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2195 return ret; 2196 } 2197 } 2198 2199 if ((is_psp_fw_valid(psp->soc_drv)) && 2200 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2201 ret = psp_bootloader_load_soc_drv(psp); 2202 if (ret) { 2203 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2204 return ret; 2205 } 2206 } 2207 2208 if ((is_psp_fw_valid(psp->intf_drv)) && 2209 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2210 ret = psp_bootloader_load_intf_drv(psp); 2211 if (ret) { 2212 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2213 return ret; 2214 } 2215 } 2216 2217 if ((is_psp_fw_valid(psp->dbg_drv)) && 2218 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2219 ret = psp_bootloader_load_dbg_drv(psp); 2220 if (ret) { 2221 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2222 return ret; 2223 } 2224 } 2225 2226 if ((is_psp_fw_valid(psp->ras_drv)) && 2227 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2228 ret = psp_bootloader_load_ras_drv(psp); 2229 if (ret) { 2230 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2231 return ret; 2232 } 2233 } 2234 2235 if ((is_psp_fw_valid(psp->sos)) && 2236 (psp->funcs->bootloader_load_sos != NULL)) { 2237 ret = psp_bootloader_load_sos(psp); 2238 if (ret) { 2239 dev_err(adev->dev, "PSP load sos failed!\n"); 2240 return ret; 2241 } 2242 } 2243 } 2244 2245 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2246 if (ret) { 2247 dev_err(adev->dev, "PSP create ring failed!\n"); 2248 return ret; 2249 } 2250 2251 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2252 goto skip_pin_bo; 2253 2254 if (!psp_boottime_tmr(psp)) { 2255 ret = psp_tmr_init(psp); 2256 if (ret) { 2257 dev_err(adev->dev, "PSP tmr init failed!\n"); 2258 return ret; 2259 } 2260 } 2261 2262 skip_pin_bo: 2263 /* 2264 * For ASICs with DF Cstate management centralized 2265 * to PMFW, TMR setup should be performed after PMFW 2266 * loaded and before other non-psp firmware loaded. 2267 */ 2268 if (psp->pmfw_centralized_cstate_management) { 2269 ret = psp_load_smu_fw(psp); 2270 if (ret) 2271 return ret; 2272 } 2273 2274 ret = psp_tmr_load(psp); 2275 if (ret) { 2276 dev_err(adev->dev, "PSP load tmr failed!\n"); 2277 return ret; 2278 } 2279 2280 return 0; 2281 } 2282 2283 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2284 enum psp_gfx_fw_type *type) 2285 { 2286 switch (ucode->ucode_id) { 2287 case AMDGPU_UCODE_ID_CAP: 2288 *type = GFX_FW_TYPE_CAP; 2289 break; 2290 case AMDGPU_UCODE_ID_SDMA0: 2291 *type = GFX_FW_TYPE_SDMA0; 2292 break; 2293 case AMDGPU_UCODE_ID_SDMA1: 2294 *type = GFX_FW_TYPE_SDMA1; 2295 break; 2296 case AMDGPU_UCODE_ID_SDMA2: 2297 *type = GFX_FW_TYPE_SDMA2; 2298 break; 2299 case AMDGPU_UCODE_ID_SDMA3: 2300 *type = GFX_FW_TYPE_SDMA3; 2301 break; 2302 case AMDGPU_UCODE_ID_SDMA4: 2303 *type = GFX_FW_TYPE_SDMA4; 2304 break; 2305 case AMDGPU_UCODE_ID_SDMA5: 2306 *type = GFX_FW_TYPE_SDMA5; 2307 break; 2308 case AMDGPU_UCODE_ID_SDMA6: 2309 *type = GFX_FW_TYPE_SDMA6; 2310 break; 2311 case AMDGPU_UCODE_ID_SDMA7: 2312 *type = GFX_FW_TYPE_SDMA7; 2313 break; 2314 case AMDGPU_UCODE_ID_CP_MES: 2315 *type = GFX_FW_TYPE_CP_MES; 2316 break; 2317 case AMDGPU_UCODE_ID_CP_MES_DATA: 2318 *type = GFX_FW_TYPE_MES_STACK; 2319 break; 2320 case AMDGPU_UCODE_ID_CP_MES1: 2321 *type = GFX_FW_TYPE_CP_MES_KIQ; 2322 break; 2323 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2324 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2325 break; 2326 case AMDGPU_UCODE_ID_CP_CE: 2327 *type = GFX_FW_TYPE_CP_CE; 2328 break; 2329 case AMDGPU_UCODE_ID_CP_PFP: 2330 *type = GFX_FW_TYPE_CP_PFP; 2331 break; 2332 case AMDGPU_UCODE_ID_CP_ME: 2333 *type = GFX_FW_TYPE_CP_ME; 2334 break; 2335 case AMDGPU_UCODE_ID_CP_MEC1: 2336 *type = GFX_FW_TYPE_CP_MEC; 2337 break; 2338 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2339 *type = GFX_FW_TYPE_CP_MEC_ME1; 2340 break; 2341 case AMDGPU_UCODE_ID_CP_MEC2: 2342 *type = GFX_FW_TYPE_CP_MEC; 2343 break; 2344 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2345 *type = GFX_FW_TYPE_CP_MEC_ME2; 2346 break; 2347 case AMDGPU_UCODE_ID_RLC_P: 2348 *type = GFX_FW_TYPE_RLC_P; 2349 break; 2350 case AMDGPU_UCODE_ID_RLC_V: 2351 *type = GFX_FW_TYPE_RLC_V; 2352 break; 2353 case AMDGPU_UCODE_ID_RLC_G: 2354 *type = GFX_FW_TYPE_RLC_G; 2355 break; 2356 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2357 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2358 break; 2359 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2360 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2361 break; 2362 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2363 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2364 break; 2365 case AMDGPU_UCODE_ID_RLC_IRAM: 2366 *type = GFX_FW_TYPE_RLC_IRAM; 2367 break; 2368 case AMDGPU_UCODE_ID_RLC_DRAM: 2369 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2370 break; 2371 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2372 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2373 break; 2374 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2375 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2376 break; 2377 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2378 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2379 break; 2380 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2381 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2382 break; 2383 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2384 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2385 break; 2386 case AMDGPU_UCODE_ID_SMC: 2387 *type = GFX_FW_TYPE_SMU; 2388 break; 2389 case AMDGPU_UCODE_ID_PPTABLE: 2390 *type = GFX_FW_TYPE_PPTABLE; 2391 break; 2392 case AMDGPU_UCODE_ID_UVD: 2393 *type = GFX_FW_TYPE_UVD; 2394 break; 2395 case AMDGPU_UCODE_ID_UVD1: 2396 *type = GFX_FW_TYPE_UVD1; 2397 break; 2398 case AMDGPU_UCODE_ID_VCE: 2399 *type = GFX_FW_TYPE_VCE; 2400 break; 2401 case AMDGPU_UCODE_ID_VCN: 2402 *type = GFX_FW_TYPE_VCN; 2403 break; 2404 case AMDGPU_UCODE_ID_VCN1: 2405 *type = GFX_FW_TYPE_VCN1; 2406 break; 2407 case AMDGPU_UCODE_ID_DMCU_ERAM: 2408 *type = GFX_FW_TYPE_DMCU_ERAM; 2409 break; 2410 case AMDGPU_UCODE_ID_DMCU_INTV: 2411 *type = GFX_FW_TYPE_DMCU_ISR; 2412 break; 2413 case AMDGPU_UCODE_ID_VCN0_RAM: 2414 *type = GFX_FW_TYPE_VCN0_RAM; 2415 break; 2416 case AMDGPU_UCODE_ID_VCN1_RAM: 2417 *type = GFX_FW_TYPE_VCN1_RAM; 2418 break; 2419 case AMDGPU_UCODE_ID_DMCUB: 2420 *type = GFX_FW_TYPE_DMUB; 2421 break; 2422 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2423 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2424 break; 2425 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2426 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2427 break; 2428 case AMDGPU_UCODE_ID_IMU_I: 2429 *type = GFX_FW_TYPE_IMU_I; 2430 break; 2431 case AMDGPU_UCODE_ID_IMU_D: 2432 *type = GFX_FW_TYPE_IMU_D; 2433 break; 2434 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2435 *type = GFX_FW_TYPE_RS64_PFP; 2436 break; 2437 case AMDGPU_UCODE_ID_CP_RS64_ME: 2438 *type = GFX_FW_TYPE_RS64_ME; 2439 break; 2440 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2441 *type = GFX_FW_TYPE_RS64_MEC; 2442 break; 2443 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2444 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2445 break; 2446 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2447 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2448 break; 2449 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2450 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2451 break; 2452 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2453 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2454 break; 2455 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2456 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2457 break; 2458 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2459 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2460 break; 2461 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2462 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2463 break; 2464 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2465 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2466 break; 2467 case AMDGPU_UCODE_ID_VPE_CTX: 2468 *type = GFX_FW_TYPE_VPEC_FW1; 2469 break; 2470 case AMDGPU_UCODE_ID_VPE_CTL: 2471 *type = GFX_FW_TYPE_VPEC_FW2; 2472 break; 2473 case AMDGPU_UCODE_ID_VPE: 2474 *type = GFX_FW_TYPE_VPE; 2475 break; 2476 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2477 *type = GFX_FW_TYPE_UMSCH_UCODE; 2478 break; 2479 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2480 *type = GFX_FW_TYPE_UMSCH_DATA; 2481 break; 2482 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2483 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2484 break; 2485 case AMDGPU_UCODE_ID_P2S_TABLE: 2486 *type = GFX_FW_TYPE_P2S_TABLE; 2487 break; 2488 case AMDGPU_UCODE_ID_MAXIMUM: 2489 default: 2490 return -EINVAL; 2491 } 2492 2493 return 0; 2494 } 2495 2496 static void psp_print_fw_hdr(struct psp_context *psp, 2497 struct amdgpu_firmware_info *ucode) 2498 { 2499 struct amdgpu_device *adev = psp->adev; 2500 struct common_firmware_header *hdr; 2501 2502 switch (ucode->ucode_id) { 2503 case AMDGPU_UCODE_ID_SDMA0: 2504 case AMDGPU_UCODE_ID_SDMA1: 2505 case AMDGPU_UCODE_ID_SDMA2: 2506 case AMDGPU_UCODE_ID_SDMA3: 2507 case AMDGPU_UCODE_ID_SDMA4: 2508 case AMDGPU_UCODE_ID_SDMA5: 2509 case AMDGPU_UCODE_ID_SDMA6: 2510 case AMDGPU_UCODE_ID_SDMA7: 2511 hdr = (struct common_firmware_header *) 2512 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2513 amdgpu_ucode_print_sdma_hdr(hdr); 2514 break; 2515 case AMDGPU_UCODE_ID_CP_CE: 2516 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2517 amdgpu_ucode_print_gfx_hdr(hdr); 2518 break; 2519 case AMDGPU_UCODE_ID_CP_PFP: 2520 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2521 amdgpu_ucode_print_gfx_hdr(hdr); 2522 break; 2523 case AMDGPU_UCODE_ID_CP_ME: 2524 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2525 amdgpu_ucode_print_gfx_hdr(hdr); 2526 break; 2527 case AMDGPU_UCODE_ID_CP_MEC1: 2528 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2529 amdgpu_ucode_print_gfx_hdr(hdr); 2530 break; 2531 case AMDGPU_UCODE_ID_RLC_G: 2532 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2533 amdgpu_ucode_print_rlc_hdr(hdr); 2534 break; 2535 case AMDGPU_UCODE_ID_SMC: 2536 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2537 amdgpu_ucode_print_smc_hdr(hdr); 2538 break; 2539 default: 2540 break; 2541 } 2542 } 2543 2544 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2545 struct amdgpu_firmware_info *ucode, 2546 struct psp_gfx_cmd_resp *cmd) 2547 { 2548 int ret; 2549 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2550 2551 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2552 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2553 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2554 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2555 2556 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2557 if (ret) 2558 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2559 2560 return ret; 2561 } 2562 2563 int psp_execute_ip_fw_load(struct psp_context *psp, 2564 struct amdgpu_firmware_info *ucode) 2565 { 2566 int ret = 0; 2567 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2568 2569 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2570 if (!ret) { 2571 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2572 psp->fence_buf_mc_addr); 2573 } 2574 2575 release_psp_cmd_buf(psp); 2576 2577 return ret; 2578 } 2579 2580 static int psp_load_p2s_table(struct psp_context *psp) 2581 { 2582 int ret; 2583 struct amdgpu_device *adev = psp->adev; 2584 struct amdgpu_firmware_info *ucode = 2585 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2586 2587 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2588 return 0; 2589 2590 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 2591 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2592 0x0036003C; 2593 if (psp->sos.fw_version < supp_vers) 2594 return 0; 2595 } 2596 2597 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2598 return 0; 2599 2600 ret = psp_execute_ip_fw_load(psp, ucode); 2601 2602 return ret; 2603 } 2604 2605 static int psp_load_smu_fw(struct psp_context *psp) 2606 { 2607 int ret; 2608 struct amdgpu_device *adev = psp->adev; 2609 struct amdgpu_firmware_info *ucode = 2610 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2611 struct amdgpu_ras *ras = psp->ras_context.ras; 2612 2613 /* 2614 * Skip SMU FW reloading in case of using BACO for runpm only, 2615 * as SMU is always alive. 2616 */ 2617 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2618 return 0; 2619 2620 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2621 return 0; 2622 2623 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2624 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2625 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2626 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2627 if (ret) 2628 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2629 } 2630 2631 ret = psp_execute_ip_fw_load(psp, ucode); 2632 2633 if (ret) 2634 dev_err(adev->dev, "PSP load smu failed!\n"); 2635 2636 return ret; 2637 } 2638 2639 static bool fw_load_skip_check(struct psp_context *psp, 2640 struct amdgpu_firmware_info *ucode) 2641 { 2642 if (!ucode->fw || !ucode->ucode_size) 2643 return true; 2644 2645 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2646 return true; 2647 2648 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2649 (psp_smu_reload_quirk(psp) || 2650 psp->autoload_supported || 2651 psp->pmfw_centralized_cstate_management)) 2652 return true; 2653 2654 if (amdgpu_sriov_vf(psp->adev) && 2655 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2656 return true; 2657 2658 if (psp->autoload_supported && 2659 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2660 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2661 /* skip mec JT when autoload is enabled */ 2662 return true; 2663 2664 return false; 2665 } 2666 2667 int psp_load_fw_list(struct psp_context *psp, 2668 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2669 { 2670 int ret = 0, i; 2671 struct amdgpu_firmware_info *ucode; 2672 2673 for (i = 0; i < ucode_count; ++i) { 2674 ucode = ucode_list[i]; 2675 psp_print_fw_hdr(psp, ucode); 2676 ret = psp_execute_ip_fw_load(psp, ucode); 2677 if (ret) 2678 return ret; 2679 } 2680 return ret; 2681 } 2682 2683 static int psp_load_non_psp_fw(struct psp_context *psp) 2684 { 2685 int i, ret; 2686 struct amdgpu_firmware_info *ucode; 2687 struct amdgpu_device *adev = psp->adev; 2688 2689 if (psp->autoload_supported && 2690 !psp->pmfw_centralized_cstate_management) { 2691 ret = psp_load_smu_fw(psp); 2692 if (ret) 2693 return ret; 2694 } 2695 2696 /* Load P2S table first if it's available */ 2697 psp_load_p2s_table(psp); 2698 2699 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2700 ucode = &adev->firmware.ucode[i]; 2701 2702 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2703 !fw_load_skip_check(psp, ucode)) { 2704 ret = psp_load_smu_fw(psp); 2705 if (ret) 2706 return ret; 2707 continue; 2708 } 2709 2710 if (fw_load_skip_check(psp, ucode)) 2711 continue; 2712 2713 if (psp->autoload_supported && 2714 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2715 IP_VERSION(11, 0, 7) || 2716 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2717 IP_VERSION(11, 0, 11) || 2718 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2719 IP_VERSION(11, 0, 12)) && 2720 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2721 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2722 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2723 /* PSP only receive one SDMA fw for sienna_cichlid, 2724 * as all four sdma fw are same 2725 */ 2726 continue; 2727 2728 psp_print_fw_hdr(psp, ucode); 2729 2730 ret = psp_execute_ip_fw_load(psp, ucode); 2731 if (ret) 2732 return ret; 2733 2734 /* Start rlc autoload after psp recieved all the gfx firmware */ 2735 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2736 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2737 ret = psp_rlc_autoload_start(psp); 2738 if (ret) { 2739 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2740 return ret; 2741 } 2742 } 2743 } 2744 2745 return 0; 2746 } 2747 2748 static int psp_load_fw(struct amdgpu_device *adev) 2749 { 2750 int ret; 2751 struct psp_context *psp = &adev->psp; 2752 2753 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2754 /* should not destroy ring, only stop */ 2755 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2756 } else { 2757 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2758 2759 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2760 if (ret) { 2761 dev_err(adev->dev, "PSP ring init failed!\n"); 2762 goto failed; 2763 } 2764 } 2765 2766 ret = psp_hw_start(psp); 2767 if (ret) 2768 goto failed; 2769 2770 ret = psp_load_non_psp_fw(psp); 2771 if (ret) 2772 goto failed1; 2773 2774 ret = psp_asd_initialize(psp); 2775 if (ret) { 2776 dev_err(adev->dev, "PSP load asd failed!\n"); 2777 goto failed1; 2778 } 2779 2780 ret = psp_rl_load(adev); 2781 if (ret) { 2782 dev_err(adev->dev, "PSP load RL failed!\n"); 2783 goto failed1; 2784 } 2785 2786 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2787 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2788 ret = psp_xgmi_initialize(psp, false, true); 2789 /* Warning the XGMI seesion initialize failure 2790 * Instead of stop driver initialization 2791 */ 2792 if (ret) 2793 dev_err(psp->adev->dev, 2794 "XGMI: Failed to initialize XGMI session\n"); 2795 } 2796 } 2797 2798 if (psp->ta_fw) { 2799 ret = psp_ras_initialize(psp); 2800 if (ret) 2801 dev_err(psp->adev->dev, 2802 "RAS: Failed to initialize RAS\n"); 2803 2804 ret = psp_hdcp_initialize(psp); 2805 if (ret) 2806 dev_err(psp->adev->dev, 2807 "HDCP: Failed to initialize HDCP\n"); 2808 2809 ret = psp_dtm_initialize(psp); 2810 if (ret) 2811 dev_err(psp->adev->dev, 2812 "DTM: Failed to initialize DTM\n"); 2813 2814 ret = psp_rap_initialize(psp); 2815 if (ret) 2816 dev_err(psp->adev->dev, 2817 "RAP: Failed to initialize RAP\n"); 2818 2819 ret = psp_securedisplay_initialize(psp); 2820 if (ret) 2821 dev_err(psp->adev->dev, 2822 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2823 } 2824 2825 return 0; 2826 2827 failed1: 2828 psp_free_shared_bufs(psp); 2829 failed: 2830 /* 2831 * all cleanup jobs (xgmi terminate, ras terminate, 2832 * ring destroy, cmd/fence/fw buffers destory, 2833 * psp->cmd destory) are delayed to psp_hw_fini 2834 */ 2835 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2836 return ret; 2837 } 2838 2839 static int psp_hw_init(void *handle) 2840 { 2841 int ret; 2842 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2843 2844 mutex_lock(&adev->firmware.mutex); 2845 /* 2846 * This sequence is just used on hw_init only once, no need on 2847 * resume. 2848 */ 2849 ret = amdgpu_ucode_init_bo(adev); 2850 if (ret) 2851 goto failed; 2852 2853 ret = psp_load_fw(adev); 2854 if (ret) { 2855 dev_err(adev->dev, "PSP firmware loading failed\n"); 2856 goto failed; 2857 } 2858 2859 mutex_unlock(&adev->firmware.mutex); 2860 return 0; 2861 2862 failed: 2863 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2864 mutex_unlock(&adev->firmware.mutex); 2865 return -EINVAL; 2866 } 2867 2868 static int psp_hw_fini(void *handle) 2869 { 2870 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2871 struct psp_context *psp = &adev->psp; 2872 2873 if (psp->ta_fw) { 2874 psp_ras_terminate(psp); 2875 psp_securedisplay_terminate(psp); 2876 psp_rap_terminate(psp); 2877 psp_dtm_terminate(psp); 2878 psp_hdcp_terminate(psp); 2879 2880 if (adev->gmc.xgmi.num_physical_nodes > 1) 2881 psp_xgmi_terminate(psp); 2882 } 2883 2884 psp_asd_terminate(psp); 2885 psp_tmr_terminate(psp); 2886 2887 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2888 2889 return 0; 2890 } 2891 2892 static int psp_suspend(void *handle) 2893 { 2894 int ret = 0; 2895 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2896 struct psp_context *psp = &adev->psp; 2897 2898 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2899 psp->xgmi_context.context.initialized) { 2900 ret = psp_xgmi_terminate(psp); 2901 if (ret) { 2902 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 2903 goto out; 2904 } 2905 } 2906 2907 if (psp->ta_fw) { 2908 ret = psp_ras_terminate(psp); 2909 if (ret) { 2910 dev_err(adev->dev, "Failed to terminate ras ta\n"); 2911 goto out; 2912 } 2913 ret = psp_hdcp_terminate(psp); 2914 if (ret) { 2915 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 2916 goto out; 2917 } 2918 ret = psp_dtm_terminate(psp); 2919 if (ret) { 2920 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 2921 goto out; 2922 } 2923 ret = psp_rap_terminate(psp); 2924 if (ret) { 2925 dev_err(adev->dev, "Failed to terminate rap ta\n"); 2926 goto out; 2927 } 2928 ret = psp_securedisplay_terminate(psp); 2929 if (ret) { 2930 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 2931 goto out; 2932 } 2933 } 2934 2935 ret = psp_asd_terminate(psp); 2936 if (ret) { 2937 dev_err(adev->dev, "Failed to terminate asd\n"); 2938 goto out; 2939 } 2940 2941 ret = psp_tmr_terminate(psp); 2942 if (ret) { 2943 dev_err(adev->dev, "Failed to terminate tmr\n"); 2944 goto out; 2945 } 2946 2947 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2948 if (ret) 2949 dev_err(adev->dev, "PSP ring stop failed\n"); 2950 2951 out: 2952 return ret; 2953 } 2954 2955 static int psp_resume(void *handle) 2956 { 2957 int ret; 2958 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2959 struct psp_context *psp = &adev->psp; 2960 2961 dev_info(adev->dev, "PSP is resuming...\n"); 2962 2963 if (psp->mem_train_ctx.enable_mem_training) { 2964 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2965 if (ret) { 2966 dev_err(adev->dev, "Failed to process memory training!\n"); 2967 return ret; 2968 } 2969 } 2970 2971 mutex_lock(&adev->firmware.mutex); 2972 2973 ret = psp_hw_start(psp); 2974 if (ret) 2975 goto failed; 2976 2977 ret = psp_load_non_psp_fw(psp); 2978 if (ret) 2979 goto failed; 2980 2981 ret = psp_asd_initialize(psp); 2982 if (ret) { 2983 dev_err(adev->dev, "PSP load asd failed!\n"); 2984 goto failed; 2985 } 2986 2987 ret = psp_rl_load(adev); 2988 if (ret) { 2989 dev_err(adev->dev, "PSP load RL failed!\n"); 2990 goto failed; 2991 } 2992 2993 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2994 ret = psp_xgmi_initialize(psp, false, true); 2995 /* Warning the XGMI seesion initialize failure 2996 * Instead of stop driver initialization 2997 */ 2998 if (ret) 2999 dev_err(psp->adev->dev, 3000 "XGMI: Failed to initialize XGMI session\n"); 3001 } 3002 3003 if (psp->ta_fw) { 3004 ret = psp_ras_initialize(psp); 3005 if (ret) 3006 dev_err(psp->adev->dev, 3007 "RAS: Failed to initialize RAS\n"); 3008 3009 ret = psp_hdcp_initialize(psp); 3010 if (ret) 3011 dev_err(psp->adev->dev, 3012 "HDCP: Failed to initialize HDCP\n"); 3013 3014 ret = psp_dtm_initialize(psp); 3015 if (ret) 3016 dev_err(psp->adev->dev, 3017 "DTM: Failed to initialize DTM\n"); 3018 3019 ret = psp_rap_initialize(psp); 3020 if (ret) 3021 dev_err(psp->adev->dev, 3022 "RAP: Failed to initialize RAP\n"); 3023 3024 ret = psp_securedisplay_initialize(psp); 3025 if (ret) 3026 dev_err(psp->adev->dev, 3027 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3028 } 3029 3030 mutex_unlock(&adev->firmware.mutex); 3031 3032 return 0; 3033 3034 failed: 3035 dev_err(adev->dev, "PSP resume failed\n"); 3036 mutex_unlock(&adev->firmware.mutex); 3037 return ret; 3038 } 3039 3040 int psp_gpu_reset(struct amdgpu_device *adev) 3041 { 3042 int ret; 3043 3044 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3045 return 0; 3046 3047 mutex_lock(&adev->psp.mutex); 3048 ret = psp_mode1_reset(&adev->psp); 3049 mutex_unlock(&adev->psp.mutex); 3050 3051 return ret; 3052 } 3053 3054 int psp_rlc_autoload_start(struct psp_context *psp) 3055 { 3056 int ret; 3057 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3058 3059 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3060 3061 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3062 psp->fence_buf_mc_addr); 3063 3064 release_psp_cmd_buf(psp); 3065 3066 return ret; 3067 } 3068 3069 int psp_ring_cmd_submit(struct psp_context *psp, 3070 uint64_t cmd_buf_mc_addr, 3071 uint64_t fence_mc_addr, 3072 int index) 3073 { 3074 unsigned int psp_write_ptr_reg = 0; 3075 struct psp_gfx_rb_frame *write_frame; 3076 struct psp_ring *ring = &psp->km_ring; 3077 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3078 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3079 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3080 struct amdgpu_device *adev = psp->adev; 3081 uint32_t ring_size_dw = ring->ring_size / 4; 3082 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3083 3084 /* KM (GPCOM) prepare write pointer */ 3085 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3086 3087 /* Update KM RB frame pointer to new frame */ 3088 /* write_frame ptr increments by size of rb_frame in bytes */ 3089 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3090 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3091 write_frame = ring_buffer_start; 3092 else 3093 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3094 /* Check invalid write_frame ptr address */ 3095 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3096 dev_err(adev->dev, 3097 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3098 ring_buffer_start, ring_buffer_end, write_frame); 3099 dev_err(adev->dev, 3100 "write_frame is pointing to address out of bounds\n"); 3101 return -EINVAL; 3102 } 3103 3104 /* Initialize KM RB frame */ 3105 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3106 3107 /* Update KM RB frame */ 3108 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3109 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3110 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3111 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3112 write_frame->fence_value = index; 3113 amdgpu_device_flush_hdp(adev, NULL); 3114 3115 /* Update the write Pointer in DWORDs */ 3116 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3117 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3118 return 0; 3119 } 3120 3121 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3122 { 3123 struct amdgpu_device *adev = psp->adev; 3124 char fw_name[PSP_FW_NAME_LEN]; 3125 const struct psp_firmware_header_v1_0 *asd_hdr; 3126 int err = 0; 3127 3128 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 3129 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 3130 if (err) 3131 goto out; 3132 3133 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3134 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3135 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3136 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3137 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3138 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3139 return 0; 3140 out: 3141 amdgpu_ucode_release(&adev->psp.asd_fw); 3142 return err; 3143 } 3144 3145 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3146 { 3147 struct amdgpu_device *adev = psp->adev; 3148 char fw_name[PSP_FW_NAME_LEN]; 3149 const struct psp_firmware_header_v1_0 *toc_hdr; 3150 int err = 0; 3151 3152 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 3153 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 3154 if (err) 3155 goto out; 3156 3157 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3158 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3159 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3160 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3161 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3162 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3163 return 0; 3164 out: 3165 amdgpu_ucode_release(&adev->psp.toc_fw); 3166 return err; 3167 } 3168 3169 static int parse_sos_bin_descriptor(struct psp_context *psp, 3170 const struct psp_fw_bin_desc *desc, 3171 const struct psp_firmware_header_v2_0 *sos_hdr) 3172 { 3173 uint8_t *ucode_start_addr = NULL; 3174 3175 if (!psp || !desc || !sos_hdr) 3176 return -EINVAL; 3177 3178 ucode_start_addr = (uint8_t *)sos_hdr + 3179 le32_to_cpu(desc->offset_bytes) + 3180 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3181 3182 switch (desc->fw_type) { 3183 case PSP_FW_TYPE_PSP_SOS: 3184 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3185 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3186 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3187 psp->sos.start_addr = ucode_start_addr; 3188 break; 3189 case PSP_FW_TYPE_PSP_SYS_DRV: 3190 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3191 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3192 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3193 psp->sys.start_addr = ucode_start_addr; 3194 break; 3195 case PSP_FW_TYPE_PSP_KDB: 3196 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3197 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3198 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3199 psp->kdb.start_addr = ucode_start_addr; 3200 break; 3201 case PSP_FW_TYPE_PSP_TOC: 3202 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3203 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3204 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3205 psp->toc.start_addr = ucode_start_addr; 3206 break; 3207 case PSP_FW_TYPE_PSP_SPL: 3208 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3209 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3210 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3211 psp->spl.start_addr = ucode_start_addr; 3212 break; 3213 case PSP_FW_TYPE_PSP_RL: 3214 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3215 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3216 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3217 psp->rl.start_addr = ucode_start_addr; 3218 break; 3219 case PSP_FW_TYPE_PSP_SOC_DRV: 3220 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3221 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3222 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3223 psp->soc_drv.start_addr = ucode_start_addr; 3224 break; 3225 case PSP_FW_TYPE_PSP_INTF_DRV: 3226 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3227 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3228 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3229 psp->intf_drv.start_addr = ucode_start_addr; 3230 break; 3231 case PSP_FW_TYPE_PSP_DBG_DRV: 3232 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3233 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3234 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3235 psp->dbg_drv.start_addr = ucode_start_addr; 3236 break; 3237 case PSP_FW_TYPE_PSP_RAS_DRV: 3238 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3239 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3240 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3241 psp->ras_drv.start_addr = ucode_start_addr; 3242 break; 3243 default: 3244 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3245 break; 3246 } 3247 3248 return 0; 3249 } 3250 3251 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3252 { 3253 const struct psp_firmware_header_v1_0 *sos_hdr; 3254 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3255 uint8_t *ucode_array_start_addr; 3256 3257 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3258 ucode_array_start_addr = (uint8_t *)sos_hdr + 3259 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3260 3261 if (adev->gmc.xgmi.connected_to_cpu || 3262 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3263 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3264 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3265 3266 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3267 adev->psp.sys.start_addr = ucode_array_start_addr; 3268 3269 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3270 adev->psp.sos.start_addr = ucode_array_start_addr + 3271 le32_to_cpu(sos_hdr->sos.offset_bytes); 3272 } else { 3273 /* Load alternate PSP SOS FW */ 3274 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3275 3276 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3277 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3278 3279 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3280 adev->psp.sys.start_addr = ucode_array_start_addr + 3281 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3282 3283 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3284 adev->psp.sos.start_addr = ucode_array_start_addr + 3285 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3286 } 3287 3288 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3289 dev_warn(adev->dev, "PSP SOS FW not available"); 3290 return -EINVAL; 3291 } 3292 3293 return 0; 3294 } 3295 3296 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3297 { 3298 struct amdgpu_device *adev = psp->adev; 3299 char fw_name[PSP_FW_NAME_LEN]; 3300 const struct psp_firmware_header_v1_0 *sos_hdr; 3301 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3302 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3303 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3304 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3305 int err = 0; 3306 uint8_t *ucode_array_start_addr; 3307 int fw_index = 0; 3308 3309 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3310 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3311 if (err) 3312 goto out; 3313 3314 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3315 ucode_array_start_addr = (uint8_t *)sos_hdr + 3316 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3317 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3318 3319 switch (sos_hdr->header.header_version_major) { 3320 case 1: 3321 err = psp_init_sos_base_fw(adev); 3322 if (err) 3323 goto out; 3324 3325 if (sos_hdr->header.header_version_minor == 1) { 3326 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3327 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3328 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3329 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3330 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3331 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3332 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3333 } 3334 if (sos_hdr->header.header_version_minor == 2) { 3335 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3336 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3337 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3338 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3339 } 3340 if (sos_hdr->header.header_version_minor == 3) { 3341 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3342 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3343 adev->psp.toc.start_addr = ucode_array_start_addr + 3344 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3345 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3346 adev->psp.kdb.start_addr = ucode_array_start_addr + 3347 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3348 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3349 adev->psp.spl.start_addr = ucode_array_start_addr + 3350 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3351 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3352 adev->psp.rl.start_addr = ucode_array_start_addr + 3353 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3354 } 3355 break; 3356 case 2: 3357 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3358 3359 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3360 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3361 err = -EINVAL; 3362 goto out; 3363 } 3364 3365 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3366 err = parse_sos_bin_descriptor(psp, 3367 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3368 sos_hdr_v2_0); 3369 if (err) 3370 goto out; 3371 } 3372 break; 3373 default: 3374 dev_err(adev->dev, 3375 "unsupported psp sos firmware\n"); 3376 err = -EINVAL; 3377 goto out; 3378 } 3379 3380 return 0; 3381 out: 3382 amdgpu_ucode_release(&adev->psp.sos_fw); 3383 3384 return err; 3385 } 3386 3387 static int parse_ta_bin_descriptor(struct psp_context *psp, 3388 const struct psp_fw_bin_desc *desc, 3389 const struct ta_firmware_header_v2_0 *ta_hdr) 3390 { 3391 uint8_t *ucode_start_addr = NULL; 3392 3393 if (!psp || !desc || !ta_hdr) 3394 return -EINVAL; 3395 3396 ucode_start_addr = (uint8_t *)ta_hdr + 3397 le32_to_cpu(desc->offset_bytes) + 3398 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3399 3400 switch (desc->fw_type) { 3401 case TA_FW_TYPE_PSP_ASD: 3402 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3403 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3404 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3405 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3406 break; 3407 case TA_FW_TYPE_PSP_XGMI: 3408 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3409 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3410 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3411 break; 3412 case TA_FW_TYPE_PSP_RAS: 3413 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3414 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3415 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3416 break; 3417 case TA_FW_TYPE_PSP_HDCP: 3418 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3419 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3420 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3421 break; 3422 case TA_FW_TYPE_PSP_DTM: 3423 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3424 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3425 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3426 break; 3427 case TA_FW_TYPE_PSP_RAP: 3428 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3429 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3430 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3431 break; 3432 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3433 psp->securedisplay_context.context.bin_desc.fw_version = 3434 le32_to_cpu(desc->fw_version); 3435 psp->securedisplay_context.context.bin_desc.size_bytes = 3436 le32_to_cpu(desc->size_bytes); 3437 psp->securedisplay_context.context.bin_desc.start_addr = 3438 ucode_start_addr; 3439 break; 3440 default: 3441 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3442 break; 3443 } 3444 3445 return 0; 3446 } 3447 3448 static int parse_ta_v1_microcode(struct psp_context *psp) 3449 { 3450 const struct ta_firmware_header_v1_0 *ta_hdr; 3451 struct amdgpu_device *adev = psp->adev; 3452 3453 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3454 3455 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3456 return -EINVAL; 3457 3458 adev->psp.xgmi_context.context.bin_desc.fw_version = 3459 le32_to_cpu(ta_hdr->xgmi.fw_version); 3460 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3461 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3462 adev->psp.xgmi_context.context.bin_desc.start_addr = 3463 (uint8_t *)ta_hdr + 3464 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3465 3466 adev->psp.ras_context.context.bin_desc.fw_version = 3467 le32_to_cpu(ta_hdr->ras.fw_version); 3468 adev->psp.ras_context.context.bin_desc.size_bytes = 3469 le32_to_cpu(ta_hdr->ras.size_bytes); 3470 adev->psp.ras_context.context.bin_desc.start_addr = 3471 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3472 le32_to_cpu(ta_hdr->ras.offset_bytes); 3473 3474 adev->psp.hdcp_context.context.bin_desc.fw_version = 3475 le32_to_cpu(ta_hdr->hdcp.fw_version); 3476 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3477 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3478 adev->psp.hdcp_context.context.bin_desc.start_addr = 3479 (uint8_t *)ta_hdr + 3480 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3481 3482 adev->psp.dtm_context.context.bin_desc.fw_version = 3483 le32_to_cpu(ta_hdr->dtm.fw_version); 3484 adev->psp.dtm_context.context.bin_desc.size_bytes = 3485 le32_to_cpu(ta_hdr->dtm.size_bytes); 3486 adev->psp.dtm_context.context.bin_desc.start_addr = 3487 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3488 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3489 3490 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3491 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3492 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3493 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3494 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3495 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3496 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3497 3498 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3499 3500 return 0; 3501 } 3502 3503 static int parse_ta_v2_microcode(struct psp_context *psp) 3504 { 3505 const struct ta_firmware_header_v2_0 *ta_hdr; 3506 struct amdgpu_device *adev = psp->adev; 3507 int err = 0; 3508 int ta_index = 0; 3509 3510 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3511 3512 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3513 return -EINVAL; 3514 3515 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3516 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3517 return -EINVAL; 3518 } 3519 3520 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3521 err = parse_ta_bin_descriptor(psp, 3522 &ta_hdr->ta_fw_bin[ta_index], 3523 ta_hdr); 3524 if (err) 3525 return err; 3526 } 3527 3528 return 0; 3529 } 3530 3531 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3532 { 3533 const struct common_firmware_header *hdr; 3534 struct amdgpu_device *adev = psp->adev; 3535 char fw_name[PSP_FW_NAME_LEN]; 3536 int err; 3537 3538 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3539 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3540 if (err) 3541 return err; 3542 3543 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3544 switch (le16_to_cpu(hdr->header_version_major)) { 3545 case 1: 3546 err = parse_ta_v1_microcode(psp); 3547 break; 3548 case 2: 3549 err = parse_ta_v2_microcode(psp); 3550 break; 3551 default: 3552 dev_err(adev->dev, "unsupported TA header version\n"); 3553 err = -EINVAL; 3554 } 3555 3556 if (err) 3557 amdgpu_ucode_release(&adev->psp.ta_fw); 3558 3559 return err; 3560 } 3561 3562 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3563 { 3564 struct amdgpu_device *adev = psp->adev; 3565 char fw_name[PSP_FW_NAME_LEN]; 3566 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3567 struct amdgpu_firmware_info *info = NULL; 3568 int err = 0; 3569 3570 if (!amdgpu_sriov_vf(adev)) { 3571 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3572 return -EINVAL; 3573 } 3574 3575 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3576 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3577 if (err) { 3578 if (err == -ENODEV) { 3579 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3580 err = 0; 3581 goto out; 3582 } 3583 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3584 } 3585 3586 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3587 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3588 info->fw = adev->psp.cap_fw; 3589 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3590 adev->psp.cap_fw->data; 3591 adev->firmware.fw_size += ALIGN( 3592 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3593 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3594 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3595 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3596 3597 return 0; 3598 3599 out: 3600 amdgpu_ucode_release(&adev->psp.cap_fw); 3601 return err; 3602 } 3603 3604 static int psp_set_clockgating_state(void *handle, 3605 enum amd_clockgating_state state) 3606 { 3607 return 0; 3608 } 3609 3610 static int psp_set_powergating_state(void *handle, 3611 enum amd_powergating_state state) 3612 { 3613 return 0; 3614 } 3615 3616 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3617 struct device_attribute *attr, 3618 char *buf) 3619 { 3620 struct drm_device *ddev = dev_get_drvdata(dev); 3621 struct amdgpu_device *adev = drm_to_adev(ddev); 3622 uint32_t fw_ver; 3623 int ret; 3624 3625 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3626 dev_info(adev->dev, "PSP block is not ready yet\n."); 3627 return -EBUSY; 3628 } 3629 3630 mutex_lock(&adev->psp.mutex); 3631 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3632 mutex_unlock(&adev->psp.mutex); 3633 3634 if (ret) { 3635 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3636 return ret; 3637 } 3638 3639 return sysfs_emit(buf, "%x\n", fw_ver); 3640 } 3641 3642 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3643 struct device_attribute *attr, 3644 const char *buf, 3645 size_t count) 3646 { 3647 struct drm_device *ddev = dev_get_drvdata(dev); 3648 struct amdgpu_device *adev = drm_to_adev(ddev); 3649 int ret, idx; 3650 char fw_name[100]; 3651 const struct firmware *usbc_pd_fw; 3652 struct amdgpu_bo *fw_buf_bo = NULL; 3653 uint64_t fw_pri_mc_addr; 3654 void *fw_pri_cpu_addr; 3655 3656 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3657 dev_err(adev->dev, "PSP block is not ready yet."); 3658 return -EBUSY; 3659 } 3660 3661 if (!drm_dev_enter(ddev, &idx)) 3662 return -ENODEV; 3663 3664 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3665 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3666 if (ret) 3667 goto fail; 3668 3669 /* LFB address which is aligned to 1MB boundary per PSP request */ 3670 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3671 AMDGPU_GEM_DOMAIN_VRAM | 3672 AMDGPU_GEM_DOMAIN_GTT, 3673 &fw_buf_bo, &fw_pri_mc_addr, 3674 &fw_pri_cpu_addr); 3675 if (ret) 3676 goto rel_buf; 3677 3678 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3679 3680 mutex_lock(&adev->psp.mutex); 3681 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3682 mutex_unlock(&adev->psp.mutex); 3683 3684 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3685 3686 rel_buf: 3687 release_firmware(usbc_pd_fw); 3688 fail: 3689 if (ret) { 3690 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3691 count = ret; 3692 } 3693 3694 drm_dev_exit(idx); 3695 return count; 3696 } 3697 3698 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3699 { 3700 int idx; 3701 3702 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3703 return; 3704 3705 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3706 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3707 3708 drm_dev_exit(idx); 3709 } 3710 3711 /** 3712 * DOC: usbc_pd_fw 3713 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3714 * this file will trigger the update process. 3715 */ 3716 static DEVICE_ATTR(usbc_pd_fw, 0644, 3717 psp_usbc_pd_fw_sysfs_read, 3718 psp_usbc_pd_fw_sysfs_write); 3719 3720 int is_psp_fw_valid(struct psp_bin_desc bin) 3721 { 3722 return bin.size_bytes; 3723 } 3724 3725 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3726 struct bin_attribute *bin_attr, 3727 char *buffer, loff_t pos, size_t count) 3728 { 3729 struct device *dev = kobj_to_dev(kobj); 3730 struct drm_device *ddev = dev_get_drvdata(dev); 3731 struct amdgpu_device *adev = drm_to_adev(ddev); 3732 3733 adev->psp.vbflash_done = false; 3734 3735 /* Safeguard against memory drain */ 3736 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3737 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 3738 kvfree(adev->psp.vbflash_tmp_buf); 3739 adev->psp.vbflash_tmp_buf = NULL; 3740 adev->psp.vbflash_image_size = 0; 3741 return -ENOMEM; 3742 } 3743 3744 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3745 if (!adev->psp.vbflash_tmp_buf) { 3746 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3747 if (!adev->psp.vbflash_tmp_buf) 3748 return -ENOMEM; 3749 } 3750 3751 mutex_lock(&adev->psp.mutex); 3752 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3753 adev->psp.vbflash_image_size += count; 3754 mutex_unlock(&adev->psp.mutex); 3755 3756 dev_dbg(adev->dev, "IFWI staged for update\n"); 3757 3758 return count; 3759 } 3760 3761 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3762 struct bin_attribute *bin_attr, char *buffer, 3763 loff_t pos, size_t count) 3764 { 3765 struct device *dev = kobj_to_dev(kobj); 3766 struct drm_device *ddev = dev_get_drvdata(dev); 3767 struct amdgpu_device *adev = drm_to_adev(ddev); 3768 struct amdgpu_bo *fw_buf_bo = NULL; 3769 uint64_t fw_pri_mc_addr; 3770 void *fw_pri_cpu_addr; 3771 int ret; 3772 3773 if (adev->psp.vbflash_image_size == 0) 3774 return -EINVAL; 3775 3776 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 3777 3778 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3779 AMDGPU_GPU_PAGE_SIZE, 3780 AMDGPU_GEM_DOMAIN_VRAM, 3781 &fw_buf_bo, 3782 &fw_pri_mc_addr, 3783 &fw_pri_cpu_addr); 3784 if (ret) 3785 goto rel_buf; 3786 3787 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3788 3789 mutex_lock(&adev->psp.mutex); 3790 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3791 mutex_unlock(&adev->psp.mutex); 3792 3793 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3794 3795 rel_buf: 3796 kvfree(adev->psp.vbflash_tmp_buf); 3797 adev->psp.vbflash_tmp_buf = NULL; 3798 adev->psp.vbflash_image_size = 0; 3799 3800 if (ret) { 3801 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 3802 return ret; 3803 } 3804 3805 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 3806 return 0; 3807 } 3808 3809 /** 3810 * DOC: psp_vbflash 3811 * Writing to this file will stage an IFWI for update. Reading from this file 3812 * will trigger the update process. 3813 */ 3814 static struct bin_attribute psp_vbflash_bin_attr = { 3815 .attr = {.name = "psp_vbflash", .mode = 0660}, 3816 .size = 0, 3817 .write = amdgpu_psp_vbflash_write, 3818 .read = amdgpu_psp_vbflash_read, 3819 }; 3820 3821 /** 3822 * DOC: psp_vbflash_status 3823 * The status of the flash process. 3824 * 0: IFWI flash not complete. 3825 * 1: IFWI flash complete. 3826 */ 3827 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3828 struct device_attribute *attr, 3829 char *buf) 3830 { 3831 struct drm_device *ddev = dev_get_drvdata(dev); 3832 struct amdgpu_device *adev = drm_to_adev(ddev); 3833 uint32_t vbflash_status; 3834 3835 vbflash_status = psp_vbflash_status(&adev->psp); 3836 if (!adev->psp.vbflash_done) 3837 vbflash_status = 0; 3838 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3839 vbflash_status = 1; 3840 3841 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3842 } 3843 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 3844 3845 static struct bin_attribute *bin_flash_attrs[] = { 3846 &psp_vbflash_bin_attr, 3847 NULL 3848 }; 3849 3850 static struct attribute *flash_attrs[] = { 3851 &dev_attr_psp_vbflash_status.attr, 3852 &dev_attr_usbc_pd_fw.attr, 3853 NULL 3854 }; 3855 3856 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 3857 { 3858 struct device *dev = kobj_to_dev(kobj); 3859 struct drm_device *ddev = dev_get_drvdata(dev); 3860 struct amdgpu_device *adev = drm_to_adev(ddev); 3861 3862 if (attr == &dev_attr_usbc_pd_fw.attr) 3863 return adev->psp.sup_pd_fw_up ? 0660 : 0; 3864 3865 return adev->psp.sup_ifwi_up ? 0440 : 0; 3866 } 3867 3868 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 3869 struct bin_attribute *attr, 3870 int idx) 3871 { 3872 struct device *dev = kobj_to_dev(kobj); 3873 struct drm_device *ddev = dev_get_drvdata(dev); 3874 struct amdgpu_device *adev = drm_to_adev(ddev); 3875 3876 return adev->psp.sup_ifwi_up ? 0660 : 0; 3877 } 3878 3879 const struct attribute_group amdgpu_flash_attr_group = { 3880 .attrs = flash_attrs, 3881 .bin_attrs = bin_flash_attrs, 3882 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 3883 .is_visible = amdgpu_flash_attr_is_visible, 3884 }; 3885 3886 const struct amd_ip_funcs psp_ip_funcs = { 3887 .name = "psp", 3888 .early_init = psp_early_init, 3889 .late_init = NULL, 3890 .sw_init = psp_sw_init, 3891 .sw_fini = psp_sw_fini, 3892 .hw_init = psp_hw_init, 3893 .hw_fini = psp_hw_fini, 3894 .suspend = psp_suspend, 3895 .resume = psp_resume, 3896 .is_idle = NULL, 3897 .check_soft_reset = NULL, 3898 .wait_for_idle = NULL, 3899 .soft_reset = NULL, 3900 .set_clockgating_state = psp_set_clockgating_state, 3901 .set_powergating_state = psp_set_powergating_state, 3902 }; 3903 3904 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 3905 .type = AMD_IP_BLOCK_TYPE_PSP, 3906 .major = 3, 3907 .minor = 1, 3908 .rev = 0, 3909 .funcs = &psp_ip_funcs, 3910 }; 3911 3912 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 3913 .type = AMD_IP_BLOCK_TYPE_PSP, 3914 .major = 10, 3915 .minor = 0, 3916 .rev = 0, 3917 .funcs = &psp_ip_funcs, 3918 }; 3919 3920 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 3921 .type = AMD_IP_BLOCK_TYPE_PSP, 3922 .major = 11, 3923 .minor = 0, 3924 .rev = 0, 3925 .funcs = &psp_ip_funcs, 3926 }; 3927 3928 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 3929 .type = AMD_IP_BLOCK_TYPE_PSP, 3930 .major = 11, 3931 .minor = 0, 3932 .rev = 8, 3933 .funcs = &psp_ip_funcs, 3934 }; 3935 3936 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 3937 .type = AMD_IP_BLOCK_TYPE_PSP, 3938 .major = 12, 3939 .minor = 0, 3940 .rev = 0, 3941 .funcs = &psp_ip_funcs, 3942 }; 3943 3944 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 3945 .type = AMD_IP_BLOCK_TYPE_PSP, 3946 .major = 13, 3947 .minor = 0, 3948 .rev = 0, 3949 .funcs = &psp_ip_funcs, 3950 }; 3951 3952 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 3953 .type = AMD_IP_BLOCK_TYPE_PSP, 3954 .major = 13, 3955 .minor = 0, 3956 .rev = 4, 3957 .funcs = &psp_ip_funcs, 3958 }; 3959