1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 ret = psp_init_cap_microcode(psp, ucode_prefix); 149 ret &= psp_init_ta_microcode(psp, ucode_prefix); 150 break; 151 case IP_VERSION(13, 0, 10): 152 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 153 ret = psp_init_cap_microcode(psp, ucode_prefix); 154 break; 155 default: 156 return -EINVAL; 157 } 158 return ret; 159 } 160 161 static int psp_early_init(void *handle) 162 { 163 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 164 struct psp_context *psp = &adev->psp; 165 166 psp->autoload_supported = true; 167 psp->boot_time_tmr = true; 168 169 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 170 case IP_VERSION(9, 0, 0): 171 psp_v3_1_set_psp_funcs(psp); 172 psp->autoload_supported = false; 173 psp->boot_time_tmr = false; 174 break; 175 case IP_VERSION(10, 0, 0): 176 case IP_VERSION(10, 0, 1): 177 psp_v10_0_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 psp->boot_time_tmr = false; 180 break; 181 case IP_VERSION(11, 0, 2): 182 case IP_VERSION(11, 0, 4): 183 psp_v11_0_set_psp_funcs(psp); 184 psp->autoload_supported = false; 185 psp->boot_time_tmr = false; 186 break; 187 case IP_VERSION(11, 0, 0): 188 case IP_VERSION(11, 0, 7): 189 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 190 fallthrough; 191 case IP_VERSION(11, 0, 5): 192 case IP_VERSION(11, 0, 9): 193 case IP_VERSION(11, 0, 11): 194 case IP_VERSION(11, 5, 0): 195 case IP_VERSION(11, 0, 12): 196 case IP_VERSION(11, 0, 13): 197 psp_v11_0_set_psp_funcs(psp); 198 psp->boot_time_tmr = false; 199 break; 200 case IP_VERSION(11, 0, 3): 201 case IP_VERSION(12, 0, 1): 202 psp_v12_0_set_psp_funcs(psp); 203 psp->autoload_supported = false; 204 psp->boot_time_tmr = false; 205 break; 206 case IP_VERSION(13, 0, 2): 207 psp->boot_time_tmr = false; 208 fallthrough; 209 case IP_VERSION(13, 0, 6): 210 psp_v13_0_set_psp_funcs(psp); 211 psp->autoload_supported = false; 212 break; 213 case IP_VERSION(13, 0, 1): 214 case IP_VERSION(13, 0, 3): 215 case IP_VERSION(13, 0, 5): 216 case IP_VERSION(13, 0, 8): 217 case IP_VERSION(13, 0, 11): 218 case IP_VERSION(14, 0, 0): 219 case IP_VERSION(14, 0, 1): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->boot_time_tmr = false; 222 break; 223 case IP_VERSION(11, 0, 8): 224 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 225 psp_v11_0_8_set_psp_funcs(psp); 226 } 227 psp->autoload_supported = false; 228 psp->boot_time_tmr = false; 229 break; 230 case IP_VERSION(13, 0, 0): 231 case IP_VERSION(13, 0, 7): 232 case IP_VERSION(13, 0, 10): 233 psp_v13_0_set_psp_funcs(psp); 234 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 235 psp->boot_time_tmr = false; 236 break; 237 case IP_VERSION(13, 0, 4): 238 psp_v13_0_4_set_psp_funcs(psp); 239 psp->boot_time_tmr = false; 240 break; 241 case IP_VERSION(14, 0, 2): 242 case IP_VERSION(14, 0, 3): 243 psp_v14_0_set_psp_funcs(psp); 244 break; 245 default: 246 return -EINVAL; 247 } 248 249 psp->adev = adev; 250 251 adev->psp_timeout = 20000; 252 253 psp_check_pmfw_centralized_cstate_management(psp); 254 255 if (amdgpu_sriov_vf(adev)) 256 return psp_init_sriov_microcode(psp); 257 else 258 return psp_init_microcode(psp); 259 } 260 261 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 262 { 263 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 264 &mem_ctx->shared_buf); 265 mem_ctx->shared_bo = NULL; 266 } 267 268 static void psp_free_shared_bufs(struct psp_context *psp) 269 { 270 void *tmr_buf; 271 void **pptr; 272 273 /* free TMR memory buffer */ 274 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 275 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 276 psp->tmr_bo = NULL; 277 278 /* free xgmi shared memory */ 279 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 280 281 /* free ras shared memory */ 282 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 283 284 /* free hdcp shared memory */ 285 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 286 287 /* free dtm shared memory */ 288 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 289 290 /* free rap shared memory */ 291 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 292 293 /* free securedisplay shared memory */ 294 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 295 296 297 } 298 299 static void psp_memory_training_fini(struct psp_context *psp) 300 { 301 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 302 303 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 304 kfree(ctx->sys_cache); 305 ctx->sys_cache = NULL; 306 } 307 308 static int psp_memory_training_init(struct psp_context *psp) 309 { 310 int ret; 311 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 312 313 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 314 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 315 return 0; 316 } 317 318 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 319 if (ctx->sys_cache == NULL) { 320 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 321 ret = -ENOMEM; 322 goto Err_out; 323 } 324 325 dev_dbg(psp->adev->dev, 326 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 327 ctx->train_data_size, 328 ctx->p2c_train_data_offset, 329 ctx->c2p_train_data_offset); 330 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 331 return 0; 332 333 Err_out: 334 psp_memory_training_fini(psp); 335 return ret; 336 } 337 338 /* 339 * Helper funciton to query psp runtime database entry 340 * 341 * @adev: amdgpu_device pointer 342 * @entry_type: the type of psp runtime database entry 343 * @db_entry: runtime database entry pointer 344 * 345 * Return false if runtime database doesn't exit or entry is invalid 346 * or true if the specific database entry is found, and copy to @db_entry 347 */ 348 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 349 enum psp_runtime_entry_type entry_type, 350 void *db_entry) 351 { 352 uint64_t db_header_pos, db_dir_pos; 353 struct psp_runtime_data_header db_header = {0}; 354 struct psp_runtime_data_directory db_dir = {0}; 355 bool ret = false; 356 int i; 357 358 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 359 return false; 360 361 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 362 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 363 364 /* read runtime db header from vram */ 365 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 366 sizeof(struct psp_runtime_data_header), false); 367 368 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 369 /* runtime db doesn't exist, exit */ 370 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 371 return false; 372 } 373 374 /* read runtime database entry from vram */ 375 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 376 sizeof(struct psp_runtime_data_directory), false); 377 378 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 379 /* invalid db entry count, exit */ 380 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 381 return false; 382 } 383 384 /* look up for requested entry type */ 385 for (i = 0; i < db_dir.entry_count && !ret; i++) { 386 if (db_dir.entry_list[i].entry_type == entry_type) { 387 switch (entry_type) { 388 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 389 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 390 /* invalid db entry size */ 391 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 392 return false; 393 } 394 /* read runtime database entry */ 395 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 396 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 397 ret = true; 398 break; 399 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 400 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 401 /* invalid db entry size */ 402 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 403 return false; 404 } 405 /* read runtime database entry */ 406 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 407 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 408 ret = true; 409 break; 410 default: 411 ret = false; 412 break; 413 } 414 } 415 } 416 417 return ret; 418 } 419 420 static int psp_sw_init(void *handle) 421 { 422 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 423 struct psp_context *psp = &adev->psp; 424 int ret; 425 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 426 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 427 struct psp_runtime_scpm_entry scpm_entry; 428 429 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 430 if (!psp->cmd) { 431 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 432 ret = -ENOMEM; 433 } 434 435 adev->psp.xgmi_context.supports_extended_data = 436 !adev->gmc.xgmi.connected_to_cpu && 437 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 438 439 memset(&scpm_entry, 0, sizeof(scpm_entry)); 440 if ((psp_get_runtime_db_entry(adev, 441 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 442 &scpm_entry)) && 443 (scpm_entry.scpm_status != SCPM_DISABLE)) { 444 adev->scpm_enabled = true; 445 adev->scpm_status = scpm_entry.scpm_status; 446 } else { 447 adev->scpm_enabled = false; 448 adev->scpm_status = SCPM_DISABLE; 449 } 450 451 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 452 453 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 454 if (psp_get_runtime_db_entry(adev, 455 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 456 &boot_cfg_entry)) { 457 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 458 if ((psp->boot_cfg_bitmask) & 459 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 460 /* If psp runtime database exists, then 461 * only enable two stage memory training 462 * when TWO_STAGE_DRAM_TRAINING bit is set 463 * in runtime database 464 */ 465 mem_training_ctx->enable_mem_training = true; 466 } 467 468 } else { 469 /* If psp runtime database doesn't exist or is 470 * invalid, force enable two stage memory training 471 */ 472 mem_training_ctx->enable_mem_training = true; 473 } 474 475 if (mem_training_ctx->enable_mem_training) { 476 ret = psp_memory_training_init(psp); 477 if (ret) { 478 dev_err(adev->dev, "Failed to initialize memory training!\n"); 479 return ret; 480 } 481 482 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 483 if (ret) { 484 dev_err(adev->dev, "Failed to process memory training!\n"); 485 return ret; 486 } 487 } 488 489 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 490 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 491 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 492 &psp->fw_pri_bo, 493 &psp->fw_pri_mc_addr, 494 &psp->fw_pri_buf); 495 if (ret) 496 return ret; 497 498 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 499 AMDGPU_GEM_DOMAIN_VRAM | 500 AMDGPU_GEM_DOMAIN_GTT, 501 &psp->fence_buf_bo, 502 &psp->fence_buf_mc_addr, 503 &psp->fence_buf); 504 if (ret) 505 goto failed1; 506 507 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 508 AMDGPU_GEM_DOMAIN_VRAM | 509 AMDGPU_GEM_DOMAIN_GTT, 510 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 511 (void **)&psp->cmd_buf_mem); 512 if (ret) 513 goto failed2; 514 515 return 0; 516 517 failed2: 518 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 519 &psp->fence_buf_mc_addr, &psp->fence_buf); 520 failed1: 521 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 522 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 523 return ret; 524 } 525 526 static int psp_sw_fini(void *handle) 527 { 528 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 529 struct psp_context *psp = &adev->psp; 530 struct psp_gfx_cmd_resp *cmd = psp->cmd; 531 532 psp_memory_training_fini(psp); 533 534 amdgpu_ucode_release(&psp->sos_fw); 535 amdgpu_ucode_release(&psp->asd_fw); 536 amdgpu_ucode_release(&psp->ta_fw); 537 amdgpu_ucode_release(&psp->cap_fw); 538 amdgpu_ucode_release(&psp->toc_fw); 539 540 kfree(cmd); 541 cmd = NULL; 542 543 psp_free_shared_bufs(psp); 544 545 if (psp->km_ring.ring_mem) 546 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 547 &psp->km_ring.ring_mem_mc_addr, 548 (void **)&psp->km_ring.ring_mem); 549 550 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 551 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 552 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 553 &psp->fence_buf_mc_addr, &psp->fence_buf); 554 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 555 (void **)&psp->cmd_buf_mem); 556 557 return 0; 558 } 559 560 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 561 uint32_t reg_val, uint32_t mask, bool check_changed) 562 { 563 uint32_t val; 564 int i; 565 struct amdgpu_device *adev = psp->adev; 566 567 if (psp->adev->no_hw_access) 568 return 0; 569 570 for (i = 0; i < adev->usec_timeout; i++) { 571 val = RREG32(reg_index); 572 if (check_changed) { 573 if (val != reg_val) 574 return 0; 575 } else { 576 if ((val & mask) == reg_val) 577 return 0; 578 } 579 udelay(1); 580 } 581 582 return -ETIME; 583 } 584 585 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 586 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 587 { 588 uint32_t val; 589 int i; 590 struct amdgpu_device *adev = psp->adev; 591 592 if (psp->adev->no_hw_access) 593 return 0; 594 595 for (i = 0; i < msec_timeout; i++) { 596 val = RREG32(reg_index); 597 if ((val & mask) == reg_val) 598 return 0; 599 msleep(1); 600 } 601 602 return -ETIME; 603 } 604 605 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 606 { 607 switch (cmd_id) { 608 case GFX_CMD_ID_LOAD_TA: 609 return "LOAD_TA"; 610 case GFX_CMD_ID_UNLOAD_TA: 611 return "UNLOAD_TA"; 612 case GFX_CMD_ID_INVOKE_CMD: 613 return "INVOKE_CMD"; 614 case GFX_CMD_ID_LOAD_ASD: 615 return "LOAD_ASD"; 616 case GFX_CMD_ID_SETUP_TMR: 617 return "SETUP_TMR"; 618 case GFX_CMD_ID_LOAD_IP_FW: 619 return "LOAD_IP_FW"; 620 case GFX_CMD_ID_DESTROY_TMR: 621 return "DESTROY_TMR"; 622 case GFX_CMD_ID_SAVE_RESTORE: 623 return "SAVE_RESTORE_IP_FW"; 624 case GFX_CMD_ID_SETUP_VMR: 625 return "SETUP_VMR"; 626 case GFX_CMD_ID_DESTROY_VMR: 627 return "DESTROY_VMR"; 628 case GFX_CMD_ID_PROG_REG: 629 return "PROG_REG"; 630 case GFX_CMD_ID_GET_FW_ATTESTATION: 631 return "GET_FW_ATTESTATION"; 632 case GFX_CMD_ID_LOAD_TOC: 633 return "ID_LOAD_TOC"; 634 case GFX_CMD_ID_AUTOLOAD_RLC: 635 return "AUTOLOAD_RLC"; 636 case GFX_CMD_ID_BOOT_CFG: 637 return "BOOT_CFG"; 638 default: 639 return "UNKNOWN CMD"; 640 } 641 } 642 643 static bool psp_err_warn(struct psp_context *psp) 644 { 645 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 646 647 /* This response indicates reg list is already loaded */ 648 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 649 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 650 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 651 cmd->resp.status == TEE_ERROR_CANCEL) 652 return false; 653 654 return true; 655 } 656 657 static int 658 psp_cmd_submit_buf(struct psp_context *psp, 659 struct amdgpu_firmware_info *ucode, 660 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 661 { 662 int ret; 663 int index; 664 int timeout = psp->adev->psp_timeout; 665 bool ras_intr = false; 666 bool skip_unsupport = false; 667 668 if (psp->adev->no_hw_access) 669 return 0; 670 671 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 672 673 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 674 675 index = atomic_inc_return(&psp->fence_value); 676 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 677 if (ret) { 678 atomic_dec(&psp->fence_value); 679 goto exit; 680 } 681 682 amdgpu_device_invalidate_hdp(psp->adev, NULL); 683 while (*((unsigned int *)psp->fence_buf) != index) { 684 if (--timeout == 0) 685 break; 686 /* 687 * Shouldn't wait for timeout when err_event_athub occurs, 688 * because gpu reset thread triggered and lock resource should 689 * be released for psp resume sequence. 690 */ 691 ras_intr = amdgpu_ras_intr_triggered(); 692 if (ras_intr) 693 break; 694 usleep_range(10, 100); 695 amdgpu_device_invalidate_hdp(psp->adev, NULL); 696 } 697 698 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 699 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 700 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 701 702 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 703 704 /* In some cases, psp response status is not 0 even there is no 705 * problem while the command is submitted. Some version of PSP FW 706 * doesn't write 0 to that field. 707 * So here we would like to only print a warning instead of an error 708 * during psp initialization to avoid breaking hw_init and it doesn't 709 * return -EINVAL. 710 */ 711 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 712 if (ucode) 713 dev_warn(psp->adev->dev, 714 "failed to load ucode %s(0x%X) ", 715 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 716 if (psp_err_warn(psp)) 717 dev_warn( 718 psp->adev->dev, 719 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 720 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 721 psp->cmd_buf_mem->cmd_id, 722 psp->cmd_buf_mem->resp.status); 723 /* If any firmware (including CAP) load fails under SRIOV, it should 724 * return failure to stop the VF from initializing. 725 * Also return failure in case of timeout 726 */ 727 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 728 ret = -EINVAL; 729 goto exit; 730 } 731 } 732 733 if (ucode) { 734 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 735 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 736 } 737 738 exit: 739 return ret; 740 } 741 742 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 743 { 744 struct psp_gfx_cmd_resp *cmd = psp->cmd; 745 746 mutex_lock(&psp->mutex); 747 748 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 749 750 return cmd; 751 } 752 753 static void release_psp_cmd_buf(struct psp_context *psp) 754 { 755 mutex_unlock(&psp->mutex); 756 } 757 758 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 759 struct psp_gfx_cmd_resp *cmd, 760 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 761 { 762 struct amdgpu_device *adev = psp->adev; 763 uint32_t size = 0; 764 uint64_t tmr_pa = 0; 765 766 if (tmr_bo) { 767 size = amdgpu_bo_size(tmr_bo); 768 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 769 } 770 771 if (amdgpu_sriov_vf(psp->adev)) 772 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 773 else 774 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 775 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 776 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 777 cmd->cmd.cmd_setup_tmr.buf_size = size; 778 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 779 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 780 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 781 } 782 783 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 784 uint64_t pri_buf_mc, uint32_t size) 785 { 786 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 787 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 788 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 789 cmd->cmd.cmd_load_toc.toc_size = size; 790 } 791 792 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 793 static int psp_load_toc(struct psp_context *psp, 794 uint32_t *tmr_size) 795 { 796 int ret; 797 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 798 799 /* Copy toc to psp firmware private buffer */ 800 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 801 802 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 803 804 ret = psp_cmd_submit_buf(psp, NULL, cmd, 805 psp->fence_buf_mc_addr); 806 if (!ret) 807 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 808 809 release_psp_cmd_buf(psp); 810 811 return ret; 812 } 813 814 /* Set up Trusted Memory Region */ 815 static int psp_tmr_init(struct psp_context *psp) 816 { 817 int ret = 0; 818 int tmr_size; 819 void *tmr_buf; 820 void **pptr; 821 822 /* 823 * According to HW engineer, they prefer the TMR address be "naturally 824 * aligned" , e.g. the start address be an integer divide of TMR size. 825 * 826 * Note: this memory need be reserved till the driver 827 * uninitializes. 828 */ 829 tmr_size = PSP_TMR_SIZE(psp->adev); 830 831 /* For ASICs support RLC autoload, psp will parse the toc 832 * and calculate the total size of TMR needed 833 */ 834 if (!amdgpu_sriov_vf(psp->adev) && 835 psp->toc.start_addr && 836 psp->toc.size_bytes && 837 psp->fw_pri_buf) { 838 ret = psp_load_toc(psp, &tmr_size); 839 if (ret) { 840 dev_err(psp->adev->dev, "Failed to load toc\n"); 841 return ret; 842 } 843 } 844 845 if (!psp->tmr_bo && !psp->boot_time_tmr) { 846 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 847 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 848 PSP_TMR_ALIGNMENT, 849 AMDGPU_HAS_VRAM(psp->adev) ? 850 AMDGPU_GEM_DOMAIN_VRAM : 851 AMDGPU_GEM_DOMAIN_GTT, 852 &psp->tmr_bo, &psp->tmr_mc_addr, 853 pptr); 854 } 855 856 return ret; 857 } 858 859 static bool psp_skip_tmr(struct psp_context *psp) 860 { 861 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 862 case IP_VERSION(11, 0, 9): 863 case IP_VERSION(11, 0, 7): 864 case IP_VERSION(13, 0, 2): 865 case IP_VERSION(13, 0, 6): 866 case IP_VERSION(13, 0, 10): 867 return true; 868 default: 869 return false; 870 } 871 } 872 873 static int psp_tmr_load(struct psp_context *psp) 874 { 875 int ret; 876 struct psp_gfx_cmd_resp *cmd; 877 878 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 879 * Already set up by host driver. 880 */ 881 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 882 return 0; 883 884 cmd = acquire_psp_cmd_buf(psp); 885 886 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 887 if (psp->tmr_bo) 888 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 889 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 890 891 ret = psp_cmd_submit_buf(psp, NULL, cmd, 892 psp->fence_buf_mc_addr); 893 894 release_psp_cmd_buf(psp); 895 896 return ret; 897 } 898 899 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 900 struct psp_gfx_cmd_resp *cmd) 901 { 902 if (amdgpu_sriov_vf(psp->adev)) 903 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 904 else 905 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 906 } 907 908 static int psp_tmr_unload(struct psp_context *psp) 909 { 910 int ret; 911 struct psp_gfx_cmd_resp *cmd; 912 913 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 914 * as TMR is not loaded at all 915 */ 916 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 917 return 0; 918 919 cmd = acquire_psp_cmd_buf(psp); 920 921 psp_prep_tmr_unload_cmd_buf(psp, cmd); 922 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 923 924 ret = psp_cmd_submit_buf(psp, NULL, cmd, 925 psp->fence_buf_mc_addr); 926 927 release_psp_cmd_buf(psp); 928 929 return ret; 930 } 931 932 static int psp_tmr_terminate(struct psp_context *psp) 933 { 934 return psp_tmr_unload(psp); 935 } 936 937 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 938 uint64_t *output_ptr) 939 { 940 int ret; 941 struct psp_gfx_cmd_resp *cmd; 942 943 if (!output_ptr) 944 return -EINVAL; 945 946 if (amdgpu_sriov_vf(psp->adev)) 947 return 0; 948 949 cmd = acquire_psp_cmd_buf(psp); 950 951 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 952 953 ret = psp_cmd_submit_buf(psp, NULL, cmd, 954 psp->fence_buf_mc_addr); 955 956 if (!ret) { 957 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 958 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 959 } 960 961 release_psp_cmd_buf(psp); 962 963 return ret; 964 } 965 966 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 967 { 968 struct psp_context *psp = &adev->psp; 969 struct psp_gfx_cmd_resp *cmd; 970 int ret; 971 972 if (amdgpu_sriov_vf(adev)) 973 return 0; 974 975 cmd = acquire_psp_cmd_buf(psp); 976 977 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 978 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 979 980 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 981 if (!ret) { 982 *boot_cfg = 983 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 984 } 985 986 release_psp_cmd_buf(psp); 987 988 return ret; 989 } 990 991 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 992 { 993 int ret; 994 struct psp_context *psp = &adev->psp; 995 struct psp_gfx_cmd_resp *cmd; 996 997 if (amdgpu_sriov_vf(adev)) 998 return 0; 999 1000 cmd = acquire_psp_cmd_buf(psp); 1001 1002 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1003 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1004 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1005 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1006 1007 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1008 1009 release_psp_cmd_buf(psp); 1010 1011 return ret; 1012 } 1013 1014 static int psp_rl_load(struct amdgpu_device *adev) 1015 { 1016 int ret; 1017 struct psp_context *psp = &adev->psp; 1018 struct psp_gfx_cmd_resp *cmd; 1019 1020 if (!is_psp_fw_valid(psp->rl)) 1021 return 0; 1022 1023 cmd = acquire_psp_cmd_buf(psp); 1024 1025 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1026 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1027 1028 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1029 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1030 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1031 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1032 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1033 1034 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1035 1036 release_psp_cmd_buf(psp); 1037 1038 return ret; 1039 } 1040 1041 int psp_spatial_partition(struct psp_context *psp, int mode) 1042 { 1043 struct psp_gfx_cmd_resp *cmd; 1044 int ret; 1045 1046 if (amdgpu_sriov_vf(psp->adev)) 1047 return 0; 1048 1049 cmd = acquire_psp_cmd_buf(psp); 1050 1051 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1052 cmd->cmd.cmd_spatial_part.mode = mode; 1053 1054 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1055 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1056 1057 release_psp_cmd_buf(psp); 1058 1059 return ret; 1060 } 1061 1062 static int psp_asd_initialize(struct psp_context *psp) 1063 { 1064 int ret; 1065 1066 /* If PSP version doesn't match ASD version, asd loading will be failed. 1067 * add workaround to bypass it for sriov now. 1068 * TODO: add version check to make it common 1069 */ 1070 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1071 return 0; 1072 1073 /* bypass asd if display hardware is not available */ 1074 if (!amdgpu_device_has_display_hardware(psp->adev) && 1075 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1076 return 0; 1077 1078 psp->asd_context.mem_context.shared_mc_addr = 0; 1079 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1080 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1081 1082 ret = psp_ta_load(psp, &psp->asd_context); 1083 if (!ret) 1084 psp->asd_context.initialized = true; 1085 1086 return ret; 1087 } 1088 1089 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1090 uint32_t session_id) 1091 { 1092 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1093 cmd->cmd.cmd_unload_ta.session_id = session_id; 1094 } 1095 1096 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1097 { 1098 int ret; 1099 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1100 1101 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1102 1103 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1104 1105 context->resp_status = cmd->resp.status; 1106 1107 release_psp_cmd_buf(psp); 1108 1109 return ret; 1110 } 1111 1112 static int psp_asd_terminate(struct psp_context *psp) 1113 { 1114 int ret; 1115 1116 if (amdgpu_sriov_vf(psp->adev)) 1117 return 0; 1118 1119 if (!psp->asd_context.initialized) 1120 return 0; 1121 1122 ret = psp_ta_unload(psp, &psp->asd_context); 1123 if (!ret) 1124 psp->asd_context.initialized = false; 1125 1126 return ret; 1127 } 1128 1129 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1130 uint32_t id, uint32_t value) 1131 { 1132 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1133 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1134 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1135 } 1136 1137 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1138 uint32_t value) 1139 { 1140 struct psp_gfx_cmd_resp *cmd; 1141 int ret = 0; 1142 1143 if (reg >= PSP_REG_LAST) 1144 return -EINVAL; 1145 1146 cmd = acquire_psp_cmd_buf(psp); 1147 1148 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1149 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1150 if (ret) 1151 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1152 1153 release_psp_cmd_buf(psp); 1154 1155 return ret; 1156 } 1157 1158 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1159 uint64_t ta_bin_mc, 1160 struct ta_context *context) 1161 { 1162 cmd->cmd_id = context->ta_load_type; 1163 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1164 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1165 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1166 1167 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1168 lower_32_bits(context->mem_context.shared_mc_addr); 1169 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1170 upper_32_bits(context->mem_context.shared_mc_addr); 1171 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1172 } 1173 1174 int psp_ta_init_shared_buf(struct psp_context *psp, 1175 struct ta_mem_context *mem_ctx) 1176 { 1177 /* 1178 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1179 * physical) for ta to host memory 1180 */ 1181 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1182 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1183 AMDGPU_GEM_DOMAIN_GTT, 1184 &mem_ctx->shared_bo, 1185 &mem_ctx->shared_mc_addr, 1186 &mem_ctx->shared_buf); 1187 } 1188 1189 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1190 uint32_t ta_cmd_id, 1191 uint32_t session_id) 1192 { 1193 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1194 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1195 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1196 } 1197 1198 int psp_ta_invoke(struct psp_context *psp, 1199 uint32_t ta_cmd_id, 1200 struct ta_context *context) 1201 { 1202 int ret; 1203 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1204 1205 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1206 1207 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1208 psp->fence_buf_mc_addr); 1209 1210 context->resp_status = cmd->resp.status; 1211 1212 release_psp_cmd_buf(psp); 1213 1214 return ret; 1215 } 1216 1217 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1218 { 1219 int ret; 1220 struct psp_gfx_cmd_resp *cmd; 1221 1222 cmd = acquire_psp_cmd_buf(psp); 1223 1224 psp_copy_fw(psp, context->bin_desc.start_addr, 1225 context->bin_desc.size_bytes); 1226 1227 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1228 1229 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1230 psp->fence_buf_mc_addr); 1231 1232 context->resp_status = cmd->resp.status; 1233 1234 if (!ret) 1235 context->session_id = cmd->resp.session_id; 1236 1237 release_psp_cmd_buf(psp); 1238 1239 return ret; 1240 } 1241 1242 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1243 { 1244 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1245 } 1246 1247 int psp_xgmi_terminate(struct psp_context *psp) 1248 { 1249 int ret; 1250 struct amdgpu_device *adev = psp->adev; 1251 1252 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1253 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1254 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1255 adev->gmc.xgmi.connected_to_cpu)) 1256 return 0; 1257 1258 if (!psp->xgmi_context.context.initialized) 1259 return 0; 1260 1261 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1262 1263 psp->xgmi_context.context.initialized = false; 1264 1265 return ret; 1266 } 1267 1268 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1269 { 1270 struct ta_xgmi_shared_memory *xgmi_cmd; 1271 int ret; 1272 1273 if (!psp->ta_fw || 1274 !psp->xgmi_context.context.bin_desc.size_bytes || 1275 !psp->xgmi_context.context.bin_desc.start_addr) 1276 return -ENOENT; 1277 1278 if (!load_ta) 1279 goto invoke; 1280 1281 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1282 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1283 1284 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1285 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1286 if (ret) 1287 return ret; 1288 } 1289 1290 /* Load XGMI TA */ 1291 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1292 if (!ret) 1293 psp->xgmi_context.context.initialized = true; 1294 else 1295 return ret; 1296 1297 invoke: 1298 /* Initialize XGMI session */ 1299 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1300 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1301 xgmi_cmd->flag_extend_link_record = set_extended_data; 1302 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1303 1304 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1305 /* note down the capbility flag for XGMI TA */ 1306 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1307 1308 return ret; 1309 } 1310 1311 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1312 { 1313 struct ta_xgmi_shared_memory *xgmi_cmd; 1314 int ret; 1315 1316 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1317 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1318 1319 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1320 1321 /* Invoke xgmi ta to get hive id */ 1322 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1323 if (ret) 1324 return ret; 1325 1326 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1327 1328 return 0; 1329 } 1330 1331 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1332 { 1333 struct ta_xgmi_shared_memory *xgmi_cmd; 1334 int ret; 1335 1336 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1337 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1338 1339 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1340 1341 /* Invoke xgmi ta to get the node id */ 1342 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1343 if (ret) 1344 return ret; 1345 1346 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1347 1348 return 0; 1349 } 1350 1351 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1352 { 1353 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1354 IP_VERSION(13, 0, 2) && 1355 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1356 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1357 IP_VERSION(13, 0, 6); 1358 } 1359 1360 /* 1361 * Chips that support extended topology information require the driver to 1362 * reflect topology information in the opposite direction. This is 1363 * because the TA has already exceeded its link record limit and if the 1364 * TA holds bi-directional information, the driver would have to do 1365 * multiple fetches instead of just two. 1366 */ 1367 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1368 struct psp_xgmi_node_info node_info) 1369 { 1370 struct amdgpu_device *mirror_adev; 1371 struct amdgpu_hive_info *hive; 1372 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1373 uint64_t dst_node_id = node_info.node_id; 1374 uint8_t dst_num_hops = node_info.num_hops; 1375 uint8_t dst_num_links = node_info.num_links; 1376 1377 hive = amdgpu_get_xgmi_hive(psp->adev); 1378 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1379 struct psp_xgmi_topology_info *mirror_top_info; 1380 int j; 1381 1382 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1383 continue; 1384 1385 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1386 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1387 if (mirror_top_info->nodes[j].node_id != src_node_id) 1388 continue; 1389 1390 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1391 /* 1392 * prevent 0 num_links value re-reflection since reflection 1393 * criteria is based on num_hops (direct or indirect). 1394 * 1395 */ 1396 if (dst_num_links) 1397 mirror_top_info->nodes[j].num_links = dst_num_links; 1398 1399 break; 1400 } 1401 1402 break; 1403 } 1404 1405 amdgpu_put_xgmi_hive(hive); 1406 } 1407 1408 int psp_xgmi_get_topology_info(struct psp_context *psp, 1409 int number_devices, 1410 struct psp_xgmi_topology_info *topology, 1411 bool get_extended_data) 1412 { 1413 struct ta_xgmi_shared_memory *xgmi_cmd; 1414 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1415 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1416 int i; 1417 int ret; 1418 1419 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1420 return -EINVAL; 1421 1422 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1423 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1424 xgmi_cmd->flag_extend_link_record = get_extended_data; 1425 1426 /* Fill in the shared memory with topology information as input */ 1427 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1428 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1429 topology_info_input->num_nodes = number_devices; 1430 1431 for (i = 0; i < topology_info_input->num_nodes; i++) { 1432 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1433 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1434 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1435 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1436 } 1437 1438 /* Invoke xgmi ta to get the topology information */ 1439 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1440 if (ret) 1441 return ret; 1442 1443 /* Read the output topology information from the shared memory */ 1444 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1445 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1446 for (i = 0; i < topology->num_nodes; i++) { 1447 /* extended data will either be 0 or equal to non-extended data */ 1448 if (topology_info_output->nodes[i].num_hops) 1449 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1450 1451 /* non-extended data gets everything here so no need to update */ 1452 if (!get_extended_data) { 1453 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1454 topology->nodes[i].is_sharing_enabled = 1455 topology_info_output->nodes[i].is_sharing_enabled; 1456 topology->nodes[i].sdma_engine = 1457 topology_info_output->nodes[i].sdma_engine; 1458 } 1459 1460 } 1461 1462 /* Invoke xgmi ta again to get the link information */ 1463 if (psp_xgmi_peer_link_info_supported(psp)) { 1464 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1465 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1466 bool requires_reflection = 1467 (psp->xgmi_context.supports_extended_data && 1468 get_extended_data) || 1469 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1470 IP_VERSION(13, 0, 6); 1471 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1472 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1473 1474 /* popluate the shared output buffer rather than the cmd input buffer 1475 * with node_ids as the input for GET_PEER_LINKS command execution. 1476 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1477 * The same requirement for GET_EXTEND_PEER_LINKS command. 1478 */ 1479 if (ta_port_num_support) { 1480 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1481 1482 for (i = 0; i < topology->num_nodes; i++) 1483 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1484 1485 link_extend_info_output->num_nodes = topology->num_nodes; 1486 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1487 } else { 1488 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1489 1490 for (i = 0; i < topology->num_nodes; i++) 1491 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1492 1493 link_info_output->num_nodes = topology->num_nodes; 1494 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1495 } 1496 1497 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1498 if (ret) 1499 return ret; 1500 1501 for (i = 0; i < topology->num_nodes; i++) { 1502 uint8_t node_num_links = ta_port_num_support ? 1503 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1504 /* accumulate num_links on extended data */ 1505 if (get_extended_data) { 1506 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1507 } else { 1508 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1509 topology->nodes[i].num_links : node_num_links; 1510 } 1511 /* popluate the connected port num info if supported and available */ 1512 if (ta_port_num_support && topology->nodes[i].num_links) { 1513 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1514 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1515 } 1516 1517 /* reflect the topology information for bi-directionality */ 1518 if (requires_reflection && topology->nodes[i].num_hops) 1519 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1520 } 1521 } 1522 1523 return 0; 1524 } 1525 1526 int psp_xgmi_set_topology_info(struct psp_context *psp, 1527 int number_devices, 1528 struct psp_xgmi_topology_info *topology) 1529 { 1530 struct ta_xgmi_shared_memory *xgmi_cmd; 1531 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1532 int i; 1533 1534 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1535 return -EINVAL; 1536 1537 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1538 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1539 1540 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1541 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1542 topology_info_input->num_nodes = number_devices; 1543 1544 for (i = 0; i < topology_info_input->num_nodes; i++) { 1545 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1546 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1547 topology_info_input->nodes[i].is_sharing_enabled = 1; 1548 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1549 } 1550 1551 /* Invoke xgmi ta to set topology information */ 1552 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1553 } 1554 1555 // ras begin 1556 static void psp_ras_ta_check_status(struct psp_context *psp) 1557 { 1558 struct ta_ras_shared_memory *ras_cmd = 1559 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1560 1561 switch (ras_cmd->ras_status) { 1562 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1563 dev_warn(psp->adev->dev, 1564 "RAS WARNING: cmd failed due to unsupported ip\n"); 1565 break; 1566 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1567 dev_warn(psp->adev->dev, 1568 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1569 break; 1570 case TA_RAS_STATUS__SUCCESS: 1571 break; 1572 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1573 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1574 dev_warn(psp->adev->dev, 1575 "RAS WARNING: Inject error to critical region is not allowed\n"); 1576 break; 1577 default: 1578 dev_warn(psp->adev->dev, 1579 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1580 break; 1581 } 1582 } 1583 1584 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1585 { 1586 struct ta_ras_shared_memory *ras_cmd; 1587 int ret; 1588 1589 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1590 1591 /* 1592 * TODO: bypass the loading in sriov for now 1593 */ 1594 if (amdgpu_sriov_vf(psp->adev)) 1595 return 0; 1596 1597 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1598 1599 if (amdgpu_ras_intr_triggered()) 1600 return ret; 1601 1602 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1603 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1604 return -EINVAL; 1605 } 1606 1607 if (!ret) { 1608 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1609 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1610 1611 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1612 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1613 dev_warn(psp->adev->dev, 1614 "RAS internal register access blocked\n"); 1615 1616 psp_ras_ta_check_status(psp); 1617 } 1618 1619 return ret; 1620 } 1621 1622 int psp_ras_enable_features(struct psp_context *psp, 1623 union ta_ras_cmd_input *info, bool enable) 1624 { 1625 struct ta_ras_shared_memory *ras_cmd; 1626 int ret; 1627 1628 if (!psp->ras_context.context.initialized) 1629 return -EINVAL; 1630 1631 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1632 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1633 1634 if (enable) 1635 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1636 else 1637 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1638 1639 ras_cmd->ras_in_message = *info; 1640 1641 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1642 if (ret) 1643 return -EINVAL; 1644 1645 return 0; 1646 } 1647 1648 int psp_ras_terminate(struct psp_context *psp) 1649 { 1650 int ret; 1651 1652 /* 1653 * TODO: bypass the terminate in sriov for now 1654 */ 1655 if (amdgpu_sriov_vf(psp->adev)) 1656 return 0; 1657 1658 if (!psp->ras_context.context.initialized) 1659 return 0; 1660 1661 ret = psp_ta_unload(psp, &psp->ras_context.context); 1662 1663 psp->ras_context.context.initialized = false; 1664 1665 return ret; 1666 } 1667 1668 int psp_ras_initialize(struct psp_context *psp) 1669 { 1670 int ret; 1671 uint32_t boot_cfg = 0xFF; 1672 struct amdgpu_device *adev = psp->adev; 1673 struct ta_ras_shared_memory *ras_cmd; 1674 1675 /* 1676 * TODO: bypass the initialize in sriov for now 1677 */ 1678 if (amdgpu_sriov_vf(adev)) 1679 return 0; 1680 1681 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1682 !adev->psp.ras_context.context.bin_desc.start_addr) { 1683 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1684 return 0; 1685 } 1686 1687 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1688 /* query GECC enablement status from boot config 1689 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1690 */ 1691 ret = psp_boot_config_get(adev, &boot_cfg); 1692 if (ret) 1693 dev_warn(adev->dev, "PSP get boot config failed\n"); 1694 1695 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1696 if (!boot_cfg) { 1697 dev_info(adev->dev, "GECC is disabled\n"); 1698 } else { 1699 /* disable GECC in next boot cycle if ras is 1700 * disabled by module parameter amdgpu_ras_enable 1701 * and/or amdgpu_ras_mask, or boot_config_get call 1702 * is failed 1703 */ 1704 ret = psp_boot_config_set(adev, 0); 1705 if (ret) 1706 dev_warn(adev->dev, "PSP set boot config failed\n"); 1707 else 1708 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1709 } 1710 } else { 1711 if (boot_cfg == 1) { 1712 dev_info(adev->dev, "GECC is enabled\n"); 1713 } else { 1714 /* enable GECC in next boot cycle if it is disabled 1715 * in boot config, or force enable GECC if failed to 1716 * get boot configuration 1717 */ 1718 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1719 if (ret) 1720 dev_warn(adev->dev, "PSP set boot config failed\n"); 1721 else 1722 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1723 } 1724 } 1725 } 1726 1727 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1728 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1729 1730 if (!psp->ras_context.context.mem_context.shared_buf) { 1731 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1732 if (ret) 1733 return ret; 1734 } 1735 1736 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1737 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1738 1739 if (amdgpu_ras_is_poison_mode_supported(adev)) 1740 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1741 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1742 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1743 ras_cmd->ras_in_message.init_flags.xcc_mask = 1744 adev->gfx.xcc_mask; 1745 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1746 1747 ret = psp_ta_load(psp, &psp->ras_context.context); 1748 1749 if (!ret && !ras_cmd->ras_status) 1750 psp->ras_context.context.initialized = true; 1751 else { 1752 if (ras_cmd->ras_status) 1753 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1754 1755 /* fail to load RAS TA */ 1756 psp->ras_context.context.initialized = false; 1757 } 1758 1759 return ret; 1760 } 1761 1762 int psp_ras_trigger_error(struct psp_context *psp, 1763 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1764 { 1765 struct ta_ras_shared_memory *ras_cmd; 1766 struct amdgpu_device *adev = psp->adev; 1767 int ret; 1768 uint32_t dev_mask; 1769 1770 if (!psp->ras_context.context.initialized) 1771 return -EINVAL; 1772 1773 switch (info->block_id) { 1774 case TA_RAS_BLOCK__GFX: 1775 dev_mask = GET_MASK(GC, instance_mask); 1776 break; 1777 case TA_RAS_BLOCK__SDMA: 1778 dev_mask = GET_MASK(SDMA0, instance_mask); 1779 break; 1780 case TA_RAS_BLOCK__VCN: 1781 case TA_RAS_BLOCK__JPEG: 1782 dev_mask = GET_MASK(VCN, instance_mask); 1783 break; 1784 default: 1785 dev_mask = instance_mask; 1786 break; 1787 } 1788 1789 /* reuse sub_block_index for backward compatibility */ 1790 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1791 dev_mask &= AMDGPU_RAS_INST_MASK; 1792 info->sub_block_index |= dev_mask; 1793 1794 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1795 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1796 1797 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1798 ras_cmd->ras_in_message.trigger_error = *info; 1799 1800 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1801 if (ret) 1802 return -EINVAL; 1803 1804 /* If err_event_athub occurs error inject was successful, however 1805 * return status from TA is no long reliable 1806 */ 1807 if (amdgpu_ras_intr_triggered()) 1808 return 0; 1809 1810 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1811 return -EACCES; 1812 else if (ras_cmd->ras_status) 1813 return -EINVAL; 1814 1815 return 0; 1816 } 1817 1818 int psp_ras_query_address(struct psp_context *psp, 1819 struct ta_ras_query_address_input *addr_in, 1820 struct ta_ras_query_address_output *addr_out) 1821 { 1822 struct ta_ras_shared_memory *ras_cmd; 1823 int ret; 1824 1825 if (!psp->ras_context.context.initialized) 1826 return -EINVAL; 1827 1828 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1829 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1830 1831 ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS; 1832 ras_cmd->ras_in_message.address = *addr_in; 1833 1834 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1835 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1836 return -EINVAL; 1837 1838 *addr_out = ras_cmd->ras_out_message.address; 1839 1840 return 0; 1841 } 1842 // ras end 1843 1844 // HDCP start 1845 static int psp_hdcp_initialize(struct psp_context *psp) 1846 { 1847 int ret; 1848 1849 /* 1850 * TODO: bypass the initialize in sriov for now 1851 */ 1852 if (amdgpu_sriov_vf(psp->adev)) 1853 return 0; 1854 1855 /* bypass hdcp initialization if dmu is harvested */ 1856 if (!amdgpu_device_has_display_hardware(psp->adev)) 1857 return 0; 1858 1859 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1860 !psp->hdcp_context.context.bin_desc.start_addr) { 1861 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1862 return 0; 1863 } 1864 1865 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1866 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1867 1868 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1869 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1870 if (ret) 1871 return ret; 1872 } 1873 1874 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1875 if (!ret) { 1876 psp->hdcp_context.context.initialized = true; 1877 mutex_init(&psp->hdcp_context.mutex); 1878 } 1879 1880 return ret; 1881 } 1882 1883 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1884 { 1885 /* 1886 * TODO: bypass the loading in sriov for now 1887 */ 1888 if (amdgpu_sriov_vf(psp->adev)) 1889 return 0; 1890 1891 if (!psp->hdcp_context.context.initialized) 1892 return 0; 1893 1894 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1895 } 1896 1897 static int psp_hdcp_terminate(struct psp_context *psp) 1898 { 1899 int ret; 1900 1901 /* 1902 * TODO: bypass the terminate in sriov for now 1903 */ 1904 if (amdgpu_sriov_vf(psp->adev)) 1905 return 0; 1906 1907 if (!psp->hdcp_context.context.initialized) 1908 return 0; 1909 1910 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1911 1912 psp->hdcp_context.context.initialized = false; 1913 1914 return ret; 1915 } 1916 // HDCP end 1917 1918 // DTM start 1919 static int psp_dtm_initialize(struct psp_context *psp) 1920 { 1921 int ret; 1922 1923 /* 1924 * TODO: bypass the initialize in sriov for now 1925 */ 1926 if (amdgpu_sriov_vf(psp->adev)) 1927 return 0; 1928 1929 /* bypass dtm initialization if dmu is harvested */ 1930 if (!amdgpu_device_has_display_hardware(psp->adev)) 1931 return 0; 1932 1933 if (!psp->dtm_context.context.bin_desc.size_bytes || 1934 !psp->dtm_context.context.bin_desc.start_addr) { 1935 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1936 return 0; 1937 } 1938 1939 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1940 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1941 1942 if (!psp->dtm_context.context.mem_context.shared_buf) { 1943 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1944 if (ret) 1945 return ret; 1946 } 1947 1948 ret = psp_ta_load(psp, &psp->dtm_context.context); 1949 if (!ret) { 1950 psp->dtm_context.context.initialized = true; 1951 mutex_init(&psp->dtm_context.mutex); 1952 } 1953 1954 return ret; 1955 } 1956 1957 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1958 { 1959 /* 1960 * TODO: bypass the loading in sriov for now 1961 */ 1962 if (amdgpu_sriov_vf(psp->adev)) 1963 return 0; 1964 1965 if (!psp->dtm_context.context.initialized) 1966 return 0; 1967 1968 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1969 } 1970 1971 static int psp_dtm_terminate(struct psp_context *psp) 1972 { 1973 int ret; 1974 1975 /* 1976 * TODO: bypass the terminate in sriov for now 1977 */ 1978 if (amdgpu_sriov_vf(psp->adev)) 1979 return 0; 1980 1981 if (!psp->dtm_context.context.initialized) 1982 return 0; 1983 1984 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1985 1986 psp->dtm_context.context.initialized = false; 1987 1988 return ret; 1989 } 1990 // DTM end 1991 1992 // RAP start 1993 static int psp_rap_initialize(struct psp_context *psp) 1994 { 1995 int ret; 1996 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1997 1998 /* 1999 * TODO: bypass the initialize in sriov for now 2000 */ 2001 if (amdgpu_sriov_vf(psp->adev)) 2002 return 0; 2003 2004 if (!psp->rap_context.context.bin_desc.size_bytes || 2005 !psp->rap_context.context.bin_desc.start_addr) { 2006 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2007 return 0; 2008 } 2009 2010 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2011 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2012 2013 if (!psp->rap_context.context.mem_context.shared_buf) { 2014 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2015 if (ret) 2016 return ret; 2017 } 2018 2019 ret = psp_ta_load(psp, &psp->rap_context.context); 2020 if (!ret) { 2021 psp->rap_context.context.initialized = true; 2022 mutex_init(&psp->rap_context.mutex); 2023 } else 2024 return ret; 2025 2026 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2027 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2028 psp_rap_terminate(psp); 2029 /* free rap shared memory */ 2030 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2031 2032 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2033 ret, status); 2034 2035 return ret; 2036 } 2037 2038 return 0; 2039 } 2040 2041 static int psp_rap_terminate(struct psp_context *psp) 2042 { 2043 int ret; 2044 2045 if (!psp->rap_context.context.initialized) 2046 return 0; 2047 2048 ret = psp_ta_unload(psp, &psp->rap_context.context); 2049 2050 psp->rap_context.context.initialized = false; 2051 2052 return ret; 2053 } 2054 2055 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2056 { 2057 struct ta_rap_shared_memory *rap_cmd; 2058 int ret = 0; 2059 2060 if (!psp->rap_context.context.initialized) 2061 return 0; 2062 2063 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2064 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2065 return -EINVAL; 2066 2067 mutex_lock(&psp->rap_context.mutex); 2068 2069 rap_cmd = (struct ta_rap_shared_memory *) 2070 psp->rap_context.context.mem_context.shared_buf; 2071 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2072 2073 rap_cmd->cmd_id = ta_cmd_id; 2074 rap_cmd->validation_method_id = METHOD_A; 2075 2076 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2077 if (ret) 2078 goto out_unlock; 2079 2080 if (status) 2081 *status = rap_cmd->rap_status; 2082 2083 out_unlock: 2084 mutex_unlock(&psp->rap_context.mutex); 2085 2086 return ret; 2087 } 2088 // RAP end 2089 2090 /* securedisplay start */ 2091 static int psp_securedisplay_initialize(struct psp_context *psp) 2092 { 2093 int ret; 2094 struct ta_securedisplay_cmd *securedisplay_cmd; 2095 2096 /* 2097 * TODO: bypass the initialize in sriov for now 2098 */ 2099 if (amdgpu_sriov_vf(psp->adev)) 2100 return 0; 2101 2102 /* bypass securedisplay initialization if dmu is harvested */ 2103 if (!amdgpu_device_has_display_hardware(psp->adev)) 2104 return 0; 2105 2106 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2107 !psp->securedisplay_context.context.bin_desc.start_addr) { 2108 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2109 return 0; 2110 } 2111 2112 psp->securedisplay_context.context.mem_context.shared_mem_size = 2113 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2114 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2115 2116 if (!psp->securedisplay_context.context.initialized) { 2117 ret = psp_ta_init_shared_buf(psp, 2118 &psp->securedisplay_context.context.mem_context); 2119 if (ret) 2120 return ret; 2121 } 2122 2123 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2124 if (!ret) { 2125 psp->securedisplay_context.context.initialized = true; 2126 mutex_init(&psp->securedisplay_context.mutex); 2127 } else 2128 return ret; 2129 2130 mutex_lock(&psp->securedisplay_context.mutex); 2131 2132 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2133 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2134 2135 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2136 2137 mutex_unlock(&psp->securedisplay_context.mutex); 2138 2139 if (ret) { 2140 psp_securedisplay_terminate(psp); 2141 /* free securedisplay shared memory */ 2142 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2143 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2144 return -EINVAL; 2145 } 2146 2147 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2148 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2149 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2150 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2151 /* don't try again */ 2152 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2153 } 2154 2155 return 0; 2156 } 2157 2158 static int psp_securedisplay_terminate(struct psp_context *psp) 2159 { 2160 int ret; 2161 2162 /* 2163 * TODO:bypass the terminate in sriov for now 2164 */ 2165 if (amdgpu_sriov_vf(psp->adev)) 2166 return 0; 2167 2168 if (!psp->securedisplay_context.context.initialized) 2169 return 0; 2170 2171 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2172 2173 psp->securedisplay_context.context.initialized = false; 2174 2175 return ret; 2176 } 2177 2178 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2179 { 2180 int ret; 2181 2182 if (!psp->securedisplay_context.context.initialized) 2183 return -EINVAL; 2184 2185 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2186 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 2187 return -EINVAL; 2188 2189 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2190 2191 return ret; 2192 } 2193 /* SECUREDISPLAY end */ 2194 2195 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2196 { 2197 struct psp_context *psp = &adev->psp; 2198 int ret = 0; 2199 2200 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2201 ret = psp->funcs->wait_for_bootloader(psp); 2202 2203 return ret; 2204 } 2205 2206 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2207 { 2208 if (psp->funcs && 2209 psp->funcs->get_ras_capability) { 2210 return psp->funcs->get_ras_capability(psp); 2211 } else { 2212 return false; 2213 } 2214 } 2215 2216 static int psp_hw_start(struct psp_context *psp) 2217 { 2218 struct amdgpu_device *adev = psp->adev; 2219 int ret; 2220 2221 if (!amdgpu_sriov_vf(adev)) { 2222 if ((is_psp_fw_valid(psp->kdb)) && 2223 (psp->funcs->bootloader_load_kdb != NULL)) { 2224 ret = psp_bootloader_load_kdb(psp); 2225 if (ret) { 2226 dev_err(adev->dev, "PSP load kdb failed!\n"); 2227 return ret; 2228 } 2229 } 2230 2231 if ((is_psp_fw_valid(psp->spl)) && 2232 (psp->funcs->bootloader_load_spl != NULL)) { 2233 ret = psp_bootloader_load_spl(psp); 2234 if (ret) { 2235 dev_err(adev->dev, "PSP load spl failed!\n"); 2236 return ret; 2237 } 2238 } 2239 2240 if ((is_psp_fw_valid(psp->sys)) && 2241 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2242 ret = psp_bootloader_load_sysdrv(psp); 2243 if (ret) { 2244 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2245 return ret; 2246 } 2247 } 2248 2249 if ((is_psp_fw_valid(psp->soc_drv)) && 2250 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2251 ret = psp_bootloader_load_soc_drv(psp); 2252 if (ret) { 2253 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2254 return ret; 2255 } 2256 } 2257 2258 if ((is_psp_fw_valid(psp->intf_drv)) && 2259 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2260 ret = psp_bootloader_load_intf_drv(psp); 2261 if (ret) { 2262 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2263 return ret; 2264 } 2265 } 2266 2267 if ((is_psp_fw_valid(psp->dbg_drv)) && 2268 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2269 ret = psp_bootloader_load_dbg_drv(psp); 2270 if (ret) { 2271 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2272 return ret; 2273 } 2274 } 2275 2276 if ((is_psp_fw_valid(psp->ras_drv)) && 2277 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2278 ret = psp_bootloader_load_ras_drv(psp); 2279 if (ret) { 2280 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2281 return ret; 2282 } 2283 } 2284 2285 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2286 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2287 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2288 if (ret) { 2289 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2290 return ret; 2291 } 2292 } 2293 2294 if ((is_psp_fw_valid(psp->sos)) && 2295 (psp->funcs->bootloader_load_sos != NULL)) { 2296 ret = psp_bootloader_load_sos(psp); 2297 if (ret) { 2298 dev_err(adev->dev, "PSP load sos failed!\n"); 2299 return ret; 2300 } 2301 } 2302 } 2303 2304 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2305 if (ret) { 2306 dev_err(adev->dev, "PSP create ring failed!\n"); 2307 return ret; 2308 } 2309 2310 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2311 goto skip_pin_bo; 2312 2313 if (!psp->boot_time_tmr || psp->autoload_supported) { 2314 ret = psp_tmr_init(psp); 2315 if (ret) { 2316 dev_err(adev->dev, "PSP tmr init failed!\n"); 2317 return ret; 2318 } 2319 } 2320 2321 skip_pin_bo: 2322 /* 2323 * For ASICs with DF Cstate management centralized 2324 * to PMFW, TMR setup should be performed after PMFW 2325 * loaded and before other non-psp firmware loaded. 2326 */ 2327 if (psp->pmfw_centralized_cstate_management) { 2328 ret = psp_load_smu_fw(psp); 2329 if (ret) 2330 return ret; 2331 } 2332 2333 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2334 ret = psp_tmr_load(psp); 2335 if (ret) { 2336 dev_err(adev->dev, "PSP load tmr failed!\n"); 2337 return ret; 2338 } 2339 } 2340 2341 return 0; 2342 } 2343 2344 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2345 enum psp_gfx_fw_type *type) 2346 { 2347 switch (ucode->ucode_id) { 2348 case AMDGPU_UCODE_ID_CAP: 2349 *type = GFX_FW_TYPE_CAP; 2350 break; 2351 case AMDGPU_UCODE_ID_SDMA0: 2352 *type = GFX_FW_TYPE_SDMA0; 2353 break; 2354 case AMDGPU_UCODE_ID_SDMA1: 2355 *type = GFX_FW_TYPE_SDMA1; 2356 break; 2357 case AMDGPU_UCODE_ID_SDMA2: 2358 *type = GFX_FW_TYPE_SDMA2; 2359 break; 2360 case AMDGPU_UCODE_ID_SDMA3: 2361 *type = GFX_FW_TYPE_SDMA3; 2362 break; 2363 case AMDGPU_UCODE_ID_SDMA4: 2364 *type = GFX_FW_TYPE_SDMA4; 2365 break; 2366 case AMDGPU_UCODE_ID_SDMA5: 2367 *type = GFX_FW_TYPE_SDMA5; 2368 break; 2369 case AMDGPU_UCODE_ID_SDMA6: 2370 *type = GFX_FW_TYPE_SDMA6; 2371 break; 2372 case AMDGPU_UCODE_ID_SDMA7: 2373 *type = GFX_FW_TYPE_SDMA7; 2374 break; 2375 case AMDGPU_UCODE_ID_CP_MES: 2376 *type = GFX_FW_TYPE_CP_MES; 2377 break; 2378 case AMDGPU_UCODE_ID_CP_MES_DATA: 2379 *type = GFX_FW_TYPE_MES_STACK; 2380 break; 2381 case AMDGPU_UCODE_ID_CP_MES1: 2382 *type = GFX_FW_TYPE_CP_MES_KIQ; 2383 break; 2384 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2385 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2386 break; 2387 case AMDGPU_UCODE_ID_CP_CE: 2388 *type = GFX_FW_TYPE_CP_CE; 2389 break; 2390 case AMDGPU_UCODE_ID_CP_PFP: 2391 *type = GFX_FW_TYPE_CP_PFP; 2392 break; 2393 case AMDGPU_UCODE_ID_CP_ME: 2394 *type = GFX_FW_TYPE_CP_ME; 2395 break; 2396 case AMDGPU_UCODE_ID_CP_MEC1: 2397 *type = GFX_FW_TYPE_CP_MEC; 2398 break; 2399 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2400 *type = GFX_FW_TYPE_CP_MEC_ME1; 2401 break; 2402 case AMDGPU_UCODE_ID_CP_MEC2: 2403 *type = GFX_FW_TYPE_CP_MEC; 2404 break; 2405 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2406 *type = GFX_FW_TYPE_CP_MEC_ME2; 2407 break; 2408 case AMDGPU_UCODE_ID_RLC_P: 2409 *type = GFX_FW_TYPE_RLC_P; 2410 break; 2411 case AMDGPU_UCODE_ID_RLC_V: 2412 *type = GFX_FW_TYPE_RLC_V; 2413 break; 2414 case AMDGPU_UCODE_ID_RLC_G: 2415 *type = GFX_FW_TYPE_RLC_G; 2416 break; 2417 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2418 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2419 break; 2420 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2421 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2422 break; 2423 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2424 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2425 break; 2426 case AMDGPU_UCODE_ID_RLC_IRAM: 2427 *type = GFX_FW_TYPE_RLC_IRAM; 2428 break; 2429 case AMDGPU_UCODE_ID_RLC_DRAM: 2430 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2431 break; 2432 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2433 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2434 break; 2435 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2436 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2437 break; 2438 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2439 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2440 break; 2441 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2442 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2443 break; 2444 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2445 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2446 break; 2447 case AMDGPU_UCODE_ID_SMC: 2448 *type = GFX_FW_TYPE_SMU; 2449 break; 2450 case AMDGPU_UCODE_ID_PPTABLE: 2451 *type = GFX_FW_TYPE_PPTABLE; 2452 break; 2453 case AMDGPU_UCODE_ID_UVD: 2454 *type = GFX_FW_TYPE_UVD; 2455 break; 2456 case AMDGPU_UCODE_ID_UVD1: 2457 *type = GFX_FW_TYPE_UVD1; 2458 break; 2459 case AMDGPU_UCODE_ID_VCE: 2460 *type = GFX_FW_TYPE_VCE; 2461 break; 2462 case AMDGPU_UCODE_ID_VCN: 2463 *type = GFX_FW_TYPE_VCN; 2464 break; 2465 case AMDGPU_UCODE_ID_VCN1: 2466 *type = GFX_FW_TYPE_VCN1; 2467 break; 2468 case AMDGPU_UCODE_ID_DMCU_ERAM: 2469 *type = GFX_FW_TYPE_DMCU_ERAM; 2470 break; 2471 case AMDGPU_UCODE_ID_DMCU_INTV: 2472 *type = GFX_FW_TYPE_DMCU_ISR; 2473 break; 2474 case AMDGPU_UCODE_ID_VCN0_RAM: 2475 *type = GFX_FW_TYPE_VCN0_RAM; 2476 break; 2477 case AMDGPU_UCODE_ID_VCN1_RAM: 2478 *type = GFX_FW_TYPE_VCN1_RAM; 2479 break; 2480 case AMDGPU_UCODE_ID_DMCUB: 2481 *type = GFX_FW_TYPE_DMUB; 2482 break; 2483 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2484 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2485 break; 2486 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2487 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2488 break; 2489 case AMDGPU_UCODE_ID_IMU_I: 2490 *type = GFX_FW_TYPE_IMU_I; 2491 break; 2492 case AMDGPU_UCODE_ID_IMU_D: 2493 *type = GFX_FW_TYPE_IMU_D; 2494 break; 2495 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2496 *type = GFX_FW_TYPE_RS64_PFP; 2497 break; 2498 case AMDGPU_UCODE_ID_CP_RS64_ME: 2499 *type = GFX_FW_TYPE_RS64_ME; 2500 break; 2501 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2502 *type = GFX_FW_TYPE_RS64_MEC; 2503 break; 2504 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2505 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2506 break; 2507 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2508 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2509 break; 2510 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2511 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2512 break; 2513 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2514 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2515 break; 2516 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2517 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2518 break; 2519 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2520 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2521 break; 2522 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2523 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2524 break; 2525 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2526 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2527 break; 2528 case AMDGPU_UCODE_ID_VPE_CTX: 2529 *type = GFX_FW_TYPE_VPEC_FW1; 2530 break; 2531 case AMDGPU_UCODE_ID_VPE_CTL: 2532 *type = GFX_FW_TYPE_VPEC_FW2; 2533 break; 2534 case AMDGPU_UCODE_ID_VPE: 2535 *type = GFX_FW_TYPE_VPE; 2536 break; 2537 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2538 *type = GFX_FW_TYPE_UMSCH_UCODE; 2539 break; 2540 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2541 *type = GFX_FW_TYPE_UMSCH_DATA; 2542 break; 2543 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2544 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2545 break; 2546 case AMDGPU_UCODE_ID_P2S_TABLE: 2547 *type = GFX_FW_TYPE_P2S_TABLE; 2548 break; 2549 case AMDGPU_UCODE_ID_JPEG_RAM: 2550 *type = GFX_FW_TYPE_JPEG_RAM; 2551 break; 2552 case AMDGPU_UCODE_ID_MAXIMUM: 2553 default: 2554 return -EINVAL; 2555 } 2556 2557 return 0; 2558 } 2559 2560 static void psp_print_fw_hdr(struct psp_context *psp, 2561 struct amdgpu_firmware_info *ucode) 2562 { 2563 struct amdgpu_device *adev = psp->adev; 2564 struct common_firmware_header *hdr; 2565 2566 switch (ucode->ucode_id) { 2567 case AMDGPU_UCODE_ID_SDMA0: 2568 case AMDGPU_UCODE_ID_SDMA1: 2569 case AMDGPU_UCODE_ID_SDMA2: 2570 case AMDGPU_UCODE_ID_SDMA3: 2571 case AMDGPU_UCODE_ID_SDMA4: 2572 case AMDGPU_UCODE_ID_SDMA5: 2573 case AMDGPU_UCODE_ID_SDMA6: 2574 case AMDGPU_UCODE_ID_SDMA7: 2575 hdr = (struct common_firmware_header *) 2576 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2577 amdgpu_ucode_print_sdma_hdr(hdr); 2578 break; 2579 case AMDGPU_UCODE_ID_CP_CE: 2580 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2581 amdgpu_ucode_print_gfx_hdr(hdr); 2582 break; 2583 case AMDGPU_UCODE_ID_CP_PFP: 2584 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2585 amdgpu_ucode_print_gfx_hdr(hdr); 2586 break; 2587 case AMDGPU_UCODE_ID_CP_ME: 2588 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2589 amdgpu_ucode_print_gfx_hdr(hdr); 2590 break; 2591 case AMDGPU_UCODE_ID_CP_MEC1: 2592 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2593 amdgpu_ucode_print_gfx_hdr(hdr); 2594 break; 2595 case AMDGPU_UCODE_ID_RLC_G: 2596 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2597 amdgpu_ucode_print_rlc_hdr(hdr); 2598 break; 2599 case AMDGPU_UCODE_ID_SMC: 2600 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2601 amdgpu_ucode_print_smc_hdr(hdr); 2602 break; 2603 default: 2604 break; 2605 } 2606 } 2607 2608 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2609 struct amdgpu_firmware_info *ucode, 2610 struct psp_gfx_cmd_resp *cmd) 2611 { 2612 int ret; 2613 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2614 2615 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2616 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2617 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2618 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2619 2620 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2621 if (ret) 2622 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2623 2624 return ret; 2625 } 2626 2627 int psp_execute_ip_fw_load(struct psp_context *psp, 2628 struct amdgpu_firmware_info *ucode) 2629 { 2630 int ret = 0; 2631 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2632 2633 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2634 if (!ret) { 2635 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2636 psp->fence_buf_mc_addr); 2637 } 2638 2639 release_psp_cmd_buf(psp); 2640 2641 return ret; 2642 } 2643 2644 static int psp_load_p2s_table(struct psp_context *psp) 2645 { 2646 int ret; 2647 struct amdgpu_device *adev = psp->adev; 2648 struct amdgpu_firmware_info *ucode = 2649 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2650 2651 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2652 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2653 return 0; 2654 2655 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 2656 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2657 0x0036003C; 2658 if (psp->sos.fw_version < supp_vers) 2659 return 0; 2660 } 2661 2662 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2663 return 0; 2664 2665 ret = psp_execute_ip_fw_load(psp, ucode); 2666 2667 return ret; 2668 } 2669 2670 static int psp_load_smu_fw(struct psp_context *psp) 2671 { 2672 int ret; 2673 struct amdgpu_device *adev = psp->adev; 2674 struct amdgpu_firmware_info *ucode = 2675 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2676 struct amdgpu_ras *ras = psp->ras_context.ras; 2677 2678 /* 2679 * Skip SMU FW reloading in case of using BACO for runpm only, 2680 * as SMU is always alive. 2681 */ 2682 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2683 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2684 return 0; 2685 2686 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2687 return 0; 2688 2689 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2690 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2691 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2692 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2693 if (ret) 2694 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2695 } 2696 2697 ret = psp_execute_ip_fw_load(psp, ucode); 2698 2699 if (ret) 2700 dev_err(adev->dev, "PSP load smu failed!\n"); 2701 2702 return ret; 2703 } 2704 2705 static bool fw_load_skip_check(struct psp_context *psp, 2706 struct amdgpu_firmware_info *ucode) 2707 { 2708 if (!ucode->fw || !ucode->ucode_size) 2709 return true; 2710 2711 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2712 return true; 2713 2714 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2715 (psp_smu_reload_quirk(psp) || 2716 psp->autoload_supported || 2717 psp->pmfw_centralized_cstate_management)) 2718 return true; 2719 2720 if (amdgpu_sriov_vf(psp->adev) && 2721 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2722 return true; 2723 2724 if (psp->autoload_supported && 2725 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2726 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2727 /* skip mec JT when autoload is enabled */ 2728 return true; 2729 2730 return false; 2731 } 2732 2733 int psp_load_fw_list(struct psp_context *psp, 2734 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2735 { 2736 int ret = 0, i; 2737 struct amdgpu_firmware_info *ucode; 2738 2739 for (i = 0; i < ucode_count; ++i) { 2740 ucode = ucode_list[i]; 2741 psp_print_fw_hdr(psp, ucode); 2742 ret = psp_execute_ip_fw_load(psp, ucode); 2743 if (ret) 2744 return ret; 2745 } 2746 return ret; 2747 } 2748 2749 static int psp_load_non_psp_fw(struct psp_context *psp) 2750 { 2751 int i, ret; 2752 struct amdgpu_firmware_info *ucode; 2753 struct amdgpu_device *adev = psp->adev; 2754 2755 if (psp->autoload_supported && 2756 !psp->pmfw_centralized_cstate_management) { 2757 ret = psp_load_smu_fw(psp); 2758 if (ret) 2759 return ret; 2760 } 2761 2762 /* Load P2S table first if it's available */ 2763 psp_load_p2s_table(psp); 2764 2765 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2766 ucode = &adev->firmware.ucode[i]; 2767 2768 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2769 !fw_load_skip_check(psp, ucode)) { 2770 ret = psp_load_smu_fw(psp); 2771 if (ret) 2772 return ret; 2773 continue; 2774 } 2775 2776 if (fw_load_skip_check(psp, ucode)) 2777 continue; 2778 2779 if (psp->autoload_supported && 2780 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2781 IP_VERSION(11, 0, 7) || 2782 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2783 IP_VERSION(11, 0, 11) || 2784 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2785 IP_VERSION(11, 0, 12)) && 2786 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2787 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2788 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2789 /* PSP only receive one SDMA fw for sienna_cichlid, 2790 * as all four sdma fw are same 2791 */ 2792 continue; 2793 2794 psp_print_fw_hdr(psp, ucode); 2795 2796 ret = psp_execute_ip_fw_load(psp, ucode); 2797 if (ret) 2798 return ret; 2799 2800 /* Start rlc autoload after psp recieved all the gfx firmware */ 2801 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2802 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2803 ret = psp_rlc_autoload_start(psp); 2804 if (ret) { 2805 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2806 return ret; 2807 } 2808 } 2809 } 2810 2811 return 0; 2812 } 2813 2814 static int psp_load_fw(struct amdgpu_device *adev) 2815 { 2816 int ret; 2817 struct psp_context *psp = &adev->psp; 2818 2819 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2820 /* should not destroy ring, only stop */ 2821 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2822 } else { 2823 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2824 2825 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2826 if (ret) { 2827 dev_err(adev->dev, "PSP ring init failed!\n"); 2828 goto failed; 2829 } 2830 } 2831 2832 ret = psp_hw_start(psp); 2833 if (ret) 2834 goto failed; 2835 2836 ret = psp_load_non_psp_fw(psp); 2837 if (ret) 2838 goto failed1; 2839 2840 ret = psp_asd_initialize(psp); 2841 if (ret) { 2842 dev_err(adev->dev, "PSP load asd failed!\n"); 2843 goto failed1; 2844 } 2845 2846 ret = psp_rl_load(adev); 2847 if (ret) { 2848 dev_err(adev->dev, "PSP load RL failed!\n"); 2849 goto failed1; 2850 } 2851 2852 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2853 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2854 ret = psp_xgmi_initialize(psp, false, true); 2855 /* Warning the XGMI seesion initialize failure 2856 * Instead of stop driver initialization 2857 */ 2858 if (ret) 2859 dev_err(psp->adev->dev, 2860 "XGMI: Failed to initialize XGMI session\n"); 2861 } 2862 } 2863 2864 if (psp->ta_fw) { 2865 ret = psp_ras_initialize(psp); 2866 if (ret) 2867 dev_err(psp->adev->dev, 2868 "RAS: Failed to initialize RAS\n"); 2869 2870 ret = psp_hdcp_initialize(psp); 2871 if (ret) 2872 dev_err(psp->adev->dev, 2873 "HDCP: Failed to initialize HDCP\n"); 2874 2875 ret = psp_dtm_initialize(psp); 2876 if (ret) 2877 dev_err(psp->adev->dev, 2878 "DTM: Failed to initialize DTM\n"); 2879 2880 ret = psp_rap_initialize(psp); 2881 if (ret) 2882 dev_err(psp->adev->dev, 2883 "RAP: Failed to initialize RAP\n"); 2884 2885 ret = psp_securedisplay_initialize(psp); 2886 if (ret) 2887 dev_err(psp->adev->dev, 2888 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2889 } 2890 2891 return 0; 2892 2893 failed1: 2894 psp_free_shared_bufs(psp); 2895 failed: 2896 /* 2897 * all cleanup jobs (xgmi terminate, ras terminate, 2898 * ring destroy, cmd/fence/fw buffers destory, 2899 * psp->cmd destory) are delayed to psp_hw_fini 2900 */ 2901 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2902 return ret; 2903 } 2904 2905 static int psp_hw_init(void *handle) 2906 { 2907 int ret; 2908 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2909 2910 mutex_lock(&adev->firmware.mutex); 2911 /* 2912 * This sequence is just used on hw_init only once, no need on 2913 * resume. 2914 */ 2915 ret = amdgpu_ucode_init_bo(adev); 2916 if (ret) 2917 goto failed; 2918 2919 ret = psp_load_fw(adev); 2920 if (ret) { 2921 dev_err(adev->dev, "PSP firmware loading failed\n"); 2922 goto failed; 2923 } 2924 2925 mutex_unlock(&adev->firmware.mutex); 2926 return 0; 2927 2928 failed: 2929 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2930 mutex_unlock(&adev->firmware.mutex); 2931 return -EINVAL; 2932 } 2933 2934 static int psp_hw_fini(void *handle) 2935 { 2936 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2937 struct psp_context *psp = &adev->psp; 2938 2939 if (psp->ta_fw) { 2940 psp_ras_terminate(psp); 2941 psp_securedisplay_terminate(psp); 2942 psp_rap_terminate(psp); 2943 psp_dtm_terminate(psp); 2944 psp_hdcp_terminate(psp); 2945 2946 if (adev->gmc.xgmi.num_physical_nodes > 1) 2947 psp_xgmi_terminate(psp); 2948 } 2949 2950 psp_asd_terminate(psp); 2951 psp_tmr_terminate(psp); 2952 2953 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2954 2955 return 0; 2956 } 2957 2958 static int psp_suspend(void *handle) 2959 { 2960 int ret = 0; 2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2962 struct psp_context *psp = &adev->psp; 2963 2964 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2965 psp->xgmi_context.context.initialized) { 2966 ret = psp_xgmi_terminate(psp); 2967 if (ret) { 2968 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 2969 goto out; 2970 } 2971 } 2972 2973 if (psp->ta_fw) { 2974 ret = psp_ras_terminate(psp); 2975 if (ret) { 2976 dev_err(adev->dev, "Failed to terminate ras ta\n"); 2977 goto out; 2978 } 2979 ret = psp_hdcp_terminate(psp); 2980 if (ret) { 2981 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 2982 goto out; 2983 } 2984 ret = psp_dtm_terminate(psp); 2985 if (ret) { 2986 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 2987 goto out; 2988 } 2989 ret = psp_rap_terminate(psp); 2990 if (ret) { 2991 dev_err(adev->dev, "Failed to terminate rap ta\n"); 2992 goto out; 2993 } 2994 ret = psp_securedisplay_terminate(psp); 2995 if (ret) { 2996 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 2997 goto out; 2998 } 2999 } 3000 3001 ret = psp_asd_terminate(psp); 3002 if (ret) { 3003 dev_err(adev->dev, "Failed to terminate asd\n"); 3004 goto out; 3005 } 3006 3007 ret = psp_tmr_terminate(psp); 3008 if (ret) { 3009 dev_err(adev->dev, "Failed to terminate tmr\n"); 3010 goto out; 3011 } 3012 3013 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3014 if (ret) 3015 dev_err(adev->dev, "PSP ring stop failed\n"); 3016 3017 out: 3018 return ret; 3019 } 3020 3021 static int psp_resume(void *handle) 3022 { 3023 int ret; 3024 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3025 struct psp_context *psp = &adev->psp; 3026 3027 dev_info(adev->dev, "PSP is resuming...\n"); 3028 3029 if (psp->mem_train_ctx.enable_mem_training) { 3030 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3031 if (ret) { 3032 dev_err(adev->dev, "Failed to process memory training!\n"); 3033 return ret; 3034 } 3035 } 3036 3037 mutex_lock(&adev->firmware.mutex); 3038 3039 ret = psp_hw_start(psp); 3040 if (ret) 3041 goto failed; 3042 3043 ret = psp_load_non_psp_fw(psp); 3044 if (ret) 3045 goto failed; 3046 3047 ret = psp_asd_initialize(psp); 3048 if (ret) { 3049 dev_err(adev->dev, "PSP load asd failed!\n"); 3050 goto failed; 3051 } 3052 3053 ret = psp_rl_load(adev); 3054 if (ret) { 3055 dev_err(adev->dev, "PSP load RL failed!\n"); 3056 goto failed; 3057 } 3058 3059 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3060 ret = psp_xgmi_initialize(psp, false, true); 3061 /* Warning the XGMI seesion initialize failure 3062 * Instead of stop driver initialization 3063 */ 3064 if (ret) 3065 dev_err(psp->adev->dev, 3066 "XGMI: Failed to initialize XGMI session\n"); 3067 } 3068 3069 if (psp->ta_fw) { 3070 ret = psp_ras_initialize(psp); 3071 if (ret) 3072 dev_err(psp->adev->dev, 3073 "RAS: Failed to initialize RAS\n"); 3074 3075 ret = psp_hdcp_initialize(psp); 3076 if (ret) 3077 dev_err(psp->adev->dev, 3078 "HDCP: Failed to initialize HDCP\n"); 3079 3080 ret = psp_dtm_initialize(psp); 3081 if (ret) 3082 dev_err(psp->adev->dev, 3083 "DTM: Failed to initialize DTM\n"); 3084 3085 ret = psp_rap_initialize(psp); 3086 if (ret) 3087 dev_err(psp->adev->dev, 3088 "RAP: Failed to initialize RAP\n"); 3089 3090 ret = psp_securedisplay_initialize(psp); 3091 if (ret) 3092 dev_err(psp->adev->dev, 3093 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3094 } 3095 3096 mutex_unlock(&adev->firmware.mutex); 3097 3098 return 0; 3099 3100 failed: 3101 dev_err(adev->dev, "PSP resume failed\n"); 3102 mutex_unlock(&adev->firmware.mutex); 3103 return ret; 3104 } 3105 3106 int psp_gpu_reset(struct amdgpu_device *adev) 3107 { 3108 int ret; 3109 3110 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3111 return 0; 3112 3113 mutex_lock(&adev->psp.mutex); 3114 ret = psp_mode1_reset(&adev->psp); 3115 mutex_unlock(&adev->psp.mutex); 3116 3117 return ret; 3118 } 3119 3120 int psp_rlc_autoload_start(struct psp_context *psp) 3121 { 3122 int ret; 3123 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3124 3125 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3126 3127 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3128 psp->fence_buf_mc_addr); 3129 3130 release_psp_cmd_buf(psp); 3131 3132 return ret; 3133 } 3134 3135 int psp_ring_cmd_submit(struct psp_context *psp, 3136 uint64_t cmd_buf_mc_addr, 3137 uint64_t fence_mc_addr, 3138 int index) 3139 { 3140 unsigned int psp_write_ptr_reg = 0; 3141 struct psp_gfx_rb_frame *write_frame; 3142 struct psp_ring *ring = &psp->km_ring; 3143 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3144 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3145 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3146 struct amdgpu_device *adev = psp->adev; 3147 uint32_t ring_size_dw = ring->ring_size / 4; 3148 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3149 3150 /* KM (GPCOM) prepare write pointer */ 3151 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3152 3153 /* Update KM RB frame pointer to new frame */ 3154 /* write_frame ptr increments by size of rb_frame in bytes */ 3155 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3156 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3157 write_frame = ring_buffer_start; 3158 else 3159 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3160 /* Check invalid write_frame ptr address */ 3161 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3162 dev_err(adev->dev, 3163 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3164 ring_buffer_start, ring_buffer_end, write_frame); 3165 dev_err(adev->dev, 3166 "write_frame is pointing to address out of bounds\n"); 3167 return -EINVAL; 3168 } 3169 3170 /* Initialize KM RB frame */ 3171 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3172 3173 /* Update KM RB frame */ 3174 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3175 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3176 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3177 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3178 write_frame->fence_value = index; 3179 amdgpu_device_flush_hdp(adev, NULL); 3180 3181 /* Update the write Pointer in DWORDs */ 3182 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3183 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3184 return 0; 3185 } 3186 3187 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3188 { 3189 struct amdgpu_device *adev = psp->adev; 3190 char fw_name[PSP_FW_NAME_LEN]; 3191 const struct psp_firmware_header_v1_0 *asd_hdr; 3192 int err = 0; 3193 3194 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 3195 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 3196 if (err) 3197 goto out; 3198 3199 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3200 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3201 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3202 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3203 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3204 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3205 return 0; 3206 out: 3207 amdgpu_ucode_release(&adev->psp.asd_fw); 3208 return err; 3209 } 3210 3211 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3212 { 3213 struct amdgpu_device *adev = psp->adev; 3214 char fw_name[PSP_FW_NAME_LEN]; 3215 const struct psp_firmware_header_v1_0 *toc_hdr; 3216 int err = 0; 3217 3218 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 3219 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 3220 if (err) 3221 goto out; 3222 3223 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3224 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3225 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3226 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3227 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3228 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3229 return 0; 3230 out: 3231 amdgpu_ucode_release(&adev->psp.toc_fw); 3232 return err; 3233 } 3234 3235 static int parse_sos_bin_descriptor(struct psp_context *psp, 3236 const struct psp_fw_bin_desc *desc, 3237 const struct psp_firmware_header_v2_0 *sos_hdr) 3238 { 3239 uint8_t *ucode_start_addr = NULL; 3240 3241 if (!psp || !desc || !sos_hdr) 3242 return -EINVAL; 3243 3244 ucode_start_addr = (uint8_t *)sos_hdr + 3245 le32_to_cpu(desc->offset_bytes) + 3246 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3247 3248 switch (desc->fw_type) { 3249 case PSP_FW_TYPE_PSP_SOS: 3250 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3251 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3252 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3253 psp->sos.start_addr = ucode_start_addr; 3254 break; 3255 case PSP_FW_TYPE_PSP_SYS_DRV: 3256 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3257 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3258 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3259 psp->sys.start_addr = ucode_start_addr; 3260 break; 3261 case PSP_FW_TYPE_PSP_KDB: 3262 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3263 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3264 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3265 psp->kdb.start_addr = ucode_start_addr; 3266 break; 3267 case PSP_FW_TYPE_PSP_TOC: 3268 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3269 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3270 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3271 psp->toc.start_addr = ucode_start_addr; 3272 break; 3273 case PSP_FW_TYPE_PSP_SPL: 3274 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3275 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3276 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3277 psp->spl.start_addr = ucode_start_addr; 3278 break; 3279 case PSP_FW_TYPE_PSP_RL: 3280 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3281 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3282 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3283 psp->rl.start_addr = ucode_start_addr; 3284 break; 3285 case PSP_FW_TYPE_PSP_SOC_DRV: 3286 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3287 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3288 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3289 psp->soc_drv.start_addr = ucode_start_addr; 3290 break; 3291 case PSP_FW_TYPE_PSP_INTF_DRV: 3292 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3293 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3294 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3295 psp->intf_drv.start_addr = ucode_start_addr; 3296 break; 3297 case PSP_FW_TYPE_PSP_DBG_DRV: 3298 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3299 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3300 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3301 psp->dbg_drv.start_addr = ucode_start_addr; 3302 break; 3303 case PSP_FW_TYPE_PSP_RAS_DRV: 3304 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3305 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3306 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3307 psp->ras_drv.start_addr = ucode_start_addr; 3308 break; 3309 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3310 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3311 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3312 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3313 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3314 break; 3315 default: 3316 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3317 break; 3318 } 3319 3320 return 0; 3321 } 3322 3323 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3324 { 3325 const struct psp_firmware_header_v1_0 *sos_hdr; 3326 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3327 uint8_t *ucode_array_start_addr; 3328 3329 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3330 ucode_array_start_addr = (uint8_t *)sos_hdr + 3331 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3332 3333 if (adev->gmc.xgmi.connected_to_cpu || 3334 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3335 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3336 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3337 3338 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3339 adev->psp.sys.start_addr = ucode_array_start_addr; 3340 3341 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3342 adev->psp.sos.start_addr = ucode_array_start_addr + 3343 le32_to_cpu(sos_hdr->sos.offset_bytes); 3344 } else { 3345 /* Load alternate PSP SOS FW */ 3346 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3347 3348 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3349 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3350 3351 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3352 adev->psp.sys.start_addr = ucode_array_start_addr + 3353 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3354 3355 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3356 adev->psp.sos.start_addr = ucode_array_start_addr + 3357 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3358 } 3359 3360 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3361 dev_warn(adev->dev, "PSP SOS FW not available"); 3362 return -EINVAL; 3363 } 3364 3365 return 0; 3366 } 3367 3368 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3369 { 3370 struct amdgpu_device *adev = psp->adev; 3371 char fw_name[PSP_FW_NAME_LEN]; 3372 const struct psp_firmware_header_v1_0 *sos_hdr; 3373 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3374 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3375 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3376 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3377 int err = 0; 3378 uint8_t *ucode_array_start_addr; 3379 int fw_index = 0; 3380 3381 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3382 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3383 if (err) 3384 goto out; 3385 3386 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3387 ucode_array_start_addr = (uint8_t *)sos_hdr + 3388 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3389 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3390 3391 switch (sos_hdr->header.header_version_major) { 3392 case 1: 3393 err = psp_init_sos_base_fw(adev); 3394 if (err) 3395 goto out; 3396 3397 if (sos_hdr->header.header_version_minor == 1) { 3398 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3399 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3400 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3401 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3402 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3403 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3404 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3405 } 3406 if (sos_hdr->header.header_version_minor == 2) { 3407 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3408 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3409 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3410 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3411 } 3412 if (sos_hdr->header.header_version_minor == 3) { 3413 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3414 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3415 adev->psp.toc.start_addr = ucode_array_start_addr + 3416 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3417 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3418 adev->psp.kdb.start_addr = ucode_array_start_addr + 3419 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3420 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3421 adev->psp.spl.start_addr = ucode_array_start_addr + 3422 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3423 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3424 adev->psp.rl.start_addr = ucode_array_start_addr + 3425 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3426 } 3427 break; 3428 case 2: 3429 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3430 3431 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3432 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3433 err = -EINVAL; 3434 goto out; 3435 } 3436 3437 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3438 err = parse_sos_bin_descriptor(psp, 3439 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3440 sos_hdr_v2_0); 3441 if (err) 3442 goto out; 3443 } 3444 break; 3445 default: 3446 dev_err(adev->dev, 3447 "unsupported psp sos firmware\n"); 3448 err = -EINVAL; 3449 goto out; 3450 } 3451 3452 return 0; 3453 out: 3454 amdgpu_ucode_release(&adev->psp.sos_fw); 3455 3456 return err; 3457 } 3458 3459 static int parse_ta_bin_descriptor(struct psp_context *psp, 3460 const struct psp_fw_bin_desc *desc, 3461 const struct ta_firmware_header_v2_0 *ta_hdr) 3462 { 3463 uint8_t *ucode_start_addr = NULL; 3464 3465 if (!psp || !desc || !ta_hdr) 3466 return -EINVAL; 3467 3468 ucode_start_addr = (uint8_t *)ta_hdr + 3469 le32_to_cpu(desc->offset_bytes) + 3470 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3471 3472 switch (desc->fw_type) { 3473 case TA_FW_TYPE_PSP_ASD: 3474 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3475 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3476 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3477 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3478 break; 3479 case TA_FW_TYPE_PSP_XGMI: 3480 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3481 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3482 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3483 break; 3484 case TA_FW_TYPE_PSP_RAS: 3485 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3486 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3487 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3488 break; 3489 case TA_FW_TYPE_PSP_HDCP: 3490 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3491 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3492 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3493 break; 3494 case TA_FW_TYPE_PSP_DTM: 3495 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3496 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3497 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3498 break; 3499 case TA_FW_TYPE_PSP_RAP: 3500 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3501 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3502 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3503 break; 3504 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3505 psp->securedisplay_context.context.bin_desc.fw_version = 3506 le32_to_cpu(desc->fw_version); 3507 psp->securedisplay_context.context.bin_desc.size_bytes = 3508 le32_to_cpu(desc->size_bytes); 3509 psp->securedisplay_context.context.bin_desc.start_addr = 3510 ucode_start_addr; 3511 break; 3512 default: 3513 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3514 break; 3515 } 3516 3517 return 0; 3518 } 3519 3520 static int parse_ta_v1_microcode(struct psp_context *psp) 3521 { 3522 const struct ta_firmware_header_v1_0 *ta_hdr; 3523 struct amdgpu_device *adev = psp->adev; 3524 3525 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3526 3527 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3528 return -EINVAL; 3529 3530 adev->psp.xgmi_context.context.bin_desc.fw_version = 3531 le32_to_cpu(ta_hdr->xgmi.fw_version); 3532 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3533 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3534 adev->psp.xgmi_context.context.bin_desc.start_addr = 3535 (uint8_t *)ta_hdr + 3536 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3537 3538 adev->psp.ras_context.context.bin_desc.fw_version = 3539 le32_to_cpu(ta_hdr->ras.fw_version); 3540 adev->psp.ras_context.context.bin_desc.size_bytes = 3541 le32_to_cpu(ta_hdr->ras.size_bytes); 3542 adev->psp.ras_context.context.bin_desc.start_addr = 3543 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3544 le32_to_cpu(ta_hdr->ras.offset_bytes); 3545 3546 adev->psp.hdcp_context.context.bin_desc.fw_version = 3547 le32_to_cpu(ta_hdr->hdcp.fw_version); 3548 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3549 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3550 adev->psp.hdcp_context.context.bin_desc.start_addr = 3551 (uint8_t *)ta_hdr + 3552 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3553 3554 adev->psp.dtm_context.context.bin_desc.fw_version = 3555 le32_to_cpu(ta_hdr->dtm.fw_version); 3556 adev->psp.dtm_context.context.bin_desc.size_bytes = 3557 le32_to_cpu(ta_hdr->dtm.size_bytes); 3558 adev->psp.dtm_context.context.bin_desc.start_addr = 3559 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3560 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3561 3562 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3563 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3564 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3565 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3566 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3567 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3568 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3569 3570 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3571 3572 return 0; 3573 } 3574 3575 static int parse_ta_v2_microcode(struct psp_context *psp) 3576 { 3577 const struct ta_firmware_header_v2_0 *ta_hdr; 3578 struct amdgpu_device *adev = psp->adev; 3579 int err = 0; 3580 int ta_index = 0; 3581 3582 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3583 3584 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3585 return -EINVAL; 3586 3587 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3588 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3589 return -EINVAL; 3590 } 3591 3592 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3593 err = parse_ta_bin_descriptor(psp, 3594 &ta_hdr->ta_fw_bin[ta_index], 3595 ta_hdr); 3596 if (err) 3597 return err; 3598 } 3599 3600 return 0; 3601 } 3602 3603 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3604 { 3605 const struct common_firmware_header *hdr; 3606 struct amdgpu_device *adev = psp->adev; 3607 char fw_name[PSP_FW_NAME_LEN]; 3608 int err; 3609 3610 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3611 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3612 if (err) 3613 return err; 3614 3615 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3616 switch (le16_to_cpu(hdr->header_version_major)) { 3617 case 1: 3618 err = parse_ta_v1_microcode(psp); 3619 break; 3620 case 2: 3621 err = parse_ta_v2_microcode(psp); 3622 break; 3623 default: 3624 dev_err(adev->dev, "unsupported TA header version\n"); 3625 err = -EINVAL; 3626 } 3627 3628 if (err) 3629 amdgpu_ucode_release(&adev->psp.ta_fw); 3630 3631 return err; 3632 } 3633 3634 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3635 { 3636 struct amdgpu_device *adev = psp->adev; 3637 char fw_name[PSP_FW_NAME_LEN]; 3638 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3639 struct amdgpu_firmware_info *info = NULL; 3640 int err = 0; 3641 3642 if (!amdgpu_sriov_vf(adev)) { 3643 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3644 return -EINVAL; 3645 } 3646 3647 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3648 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3649 if (err) { 3650 if (err == -ENODEV) { 3651 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3652 err = 0; 3653 goto out; 3654 } 3655 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3656 } 3657 3658 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3659 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3660 info->fw = adev->psp.cap_fw; 3661 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3662 adev->psp.cap_fw->data; 3663 adev->firmware.fw_size += ALIGN( 3664 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3665 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3666 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3667 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3668 3669 return 0; 3670 3671 out: 3672 amdgpu_ucode_release(&adev->psp.cap_fw); 3673 return err; 3674 } 3675 3676 static int psp_set_clockgating_state(void *handle, 3677 enum amd_clockgating_state state) 3678 { 3679 return 0; 3680 } 3681 3682 static int psp_set_powergating_state(void *handle, 3683 enum amd_powergating_state state) 3684 { 3685 return 0; 3686 } 3687 3688 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3689 struct device_attribute *attr, 3690 char *buf) 3691 { 3692 struct drm_device *ddev = dev_get_drvdata(dev); 3693 struct amdgpu_device *adev = drm_to_adev(ddev); 3694 uint32_t fw_ver; 3695 int ret; 3696 3697 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3698 dev_info(adev->dev, "PSP block is not ready yet\n."); 3699 return -EBUSY; 3700 } 3701 3702 mutex_lock(&adev->psp.mutex); 3703 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3704 mutex_unlock(&adev->psp.mutex); 3705 3706 if (ret) { 3707 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3708 return ret; 3709 } 3710 3711 return sysfs_emit(buf, "%x\n", fw_ver); 3712 } 3713 3714 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3715 struct device_attribute *attr, 3716 const char *buf, 3717 size_t count) 3718 { 3719 struct drm_device *ddev = dev_get_drvdata(dev); 3720 struct amdgpu_device *adev = drm_to_adev(ddev); 3721 int ret, idx; 3722 char fw_name[100]; 3723 const struct firmware *usbc_pd_fw; 3724 struct amdgpu_bo *fw_buf_bo = NULL; 3725 uint64_t fw_pri_mc_addr; 3726 void *fw_pri_cpu_addr; 3727 3728 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3729 dev_err(adev->dev, "PSP block is not ready yet."); 3730 return -EBUSY; 3731 } 3732 3733 if (!drm_dev_enter(ddev, &idx)) 3734 return -ENODEV; 3735 3736 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3737 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3738 if (ret) 3739 goto fail; 3740 3741 /* LFB address which is aligned to 1MB boundary per PSP request */ 3742 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3743 AMDGPU_GEM_DOMAIN_VRAM | 3744 AMDGPU_GEM_DOMAIN_GTT, 3745 &fw_buf_bo, &fw_pri_mc_addr, 3746 &fw_pri_cpu_addr); 3747 if (ret) 3748 goto rel_buf; 3749 3750 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3751 3752 mutex_lock(&adev->psp.mutex); 3753 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3754 mutex_unlock(&adev->psp.mutex); 3755 3756 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3757 3758 rel_buf: 3759 release_firmware(usbc_pd_fw); 3760 fail: 3761 if (ret) { 3762 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3763 count = ret; 3764 } 3765 3766 drm_dev_exit(idx); 3767 return count; 3768 } 3769 3770 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3771 { 3772 int idx; 3773 3774 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3775 return; 3776 3777 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3778 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3779 3780 drm_dev_exit(idx); 3781 } 3782 3783 /** 3784 * DOC: usbc_pd_fw 3785 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3786 * this file will trigger the update process. 3787 */ 3788 static DEVICE_ATTR(usbc_pd_fw, 0644, 3789 psp_usbc_pd_fw_sysfs_read, 3790 psp_usbc_pd_fw_sysfs_write); 3791 3792 int is_psp_fw_valid(struct psp_bin_desc bin) 3793 { 3794 return bin.size_bytes; 3795 } 3796 3797 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3798 struct bin_attribute *bin_attr, 3799 char *buffer, loff_t pos, size_t count) 3800 { 3801 struct device *dev = kobj_to_dev(kobj); 3802 struct drm_device *ddev = dev_get_drvdata(dev); 3803 struct amdgpu_device *adev = drm_to_adev(ddev); 3804 3805 adev->psp.vbflash_done = false; 3806 3807 /* Safeguard against memory drain */ 3808 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3809 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 3810 kvfree(adev->psp.vbflash_tmp_buf); 3811 adev->psp.vbflash_tmp_buf = NULL; 3812 adev->psp.vbflash_image_size = 0; 3813 return -ENOMEM; 3814 } 3815 3816 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3817 if (!adev->psp.vbflash_tmp_buf) { 3818 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3819 if (!adev->psp.vbflash_tmp_buf) 3820 return -ENOMEM; 3821 } 3822 3823 mutex_lock(&adev->psp.mutex); 3824 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3825 adev->psp.vbflash_image_size += count; 3826 mutex_unlock(&adev->psp.mutex); 3827 3828 dev_dbg(adev->dev, "IFWI staged for update\n"); 3829 3830 return count; 3831 } 3832 3833 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3834 struct bin_attribute *bin_attr, char *buffer, 3835 loff_t pos, size_t count) 3836 { 3837 struct device *dev = kobj_to_dev(kobj); 3838 struct drm_device *ddev = dev_get_drvdata(dev); 3839 struct amdgpu_device *adev = drm_to_adev(ddev); 3840 struct amdgpu_bo *fw_buf_bo = NULL; 3841 uint64_t fw_pri_mc_addr; 3842 void *fw_pri_cpu_addr; 3843 int ret; 3844 3845 if (adev->psp.vbflash_image_size == 0) 3846 return -EINVAL; 3847 3848 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 3849 3850 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3851 AMDGPU_GPU_PAGE_SIZE, 3852 AMDGPU_GEM_DOMAIN_VRAM, 3853 &fw_buf_bo, 3854 &fw_pri_mc_addr, 3855 &fw_pri_cpu_addr); 3856 if (ret) 3857 goto rel_buf; 3858 3859 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3860 3861 mutex_lock(&adev->psp.mutex); 3862 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3863 mutex_unlock(&adev->psp.mutex); 3864 3865 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3866 3867 rel_buf: 3868 kvfree(adev->psp.vbflash_tmp_buf); 3869 adev->psp.vbflash_tmp_buf = NULL; 3870 adev->psp.vbflash_image_size = 0; 3871 3872 if (ret) { 3873 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 3874 return ret; 3875 } 3876 3877 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 3878 return 0; 3879 } 3880 3881 /** 3882 * DOC: psp_vbflash 3883 * Writing to this file will stage an IFWI for update. Reading from this file 3884 * will trigger the update process. 3885 */ 3886 static struct bin_attribute psp_vbflash_bin_attr = { 3887 .attr = {.name = "psp_vbflash", .mode = 0660}, 3888 .size = 0, 3889 .write = amdgpu_psp_vbflash_write, 3890 .read = amdgpu_psp_vbflash_read, 3891 }; 3892 3893 /** 3894 * DOC: psp_vbflash_status 3895 * The status of the flash process. 3896 * 0: IFWI flash not complete. 3897 * 1: IFWI flash complete. 3898 */ 3899 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3900 struct device_attribute *attr, 3901 char *buf) 3902 { 3903 struct drm_device *ddev = dev_get_drvdata(dev); 3904 struct amdgpu_device *adev = drm_to_adev(ddev); 3905 uint32_t vbflash_status; 3906 3907 vbflash_status = psp_vbflash_status(&adev->psp); 3908 if (!adev->psp.vbflash_done) 3909 vbflash_status = 0; 3910 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3911 vbflash_status = 1; 3912 3913 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3914 } 3915 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 3916 3917 static struct bin_attribute *bin_flash_attrs[] = { 3918 &psp_vbflash_bin_attr, 3919 NULL 3920 }; 3921 3922 static struct attribute *flash_attrs[] = { 3923 &dev_attr_psp_vbflash_status.attr, 3924 &dev_attr_usbc_pd_fw.attr, 3925 NULL 3926 }; 3927 3928 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 3929 { 3930 struct device *dev = kobj_to_dev(kobj); 3931 struct drm_device *ddev = dev_get_drvdata(dev); 3932 struct amdgpu_device *adev = drm_to_adev(ddev); 3933 3934 if (attr == &dev_attr_usbc_pd_fw.attr) 3935 return adev->psp.sup_pd_fw_up ? 0660 : 0; 3936 3937 return adev->psp.sup_ifwi_up ? 0440 : 0; 3938 } 3939 3940 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 3941 struct bin_attribute *attr, 3942 int idx) 3943 { 3944 struct device *dev = kobj_to_dev(kobj); 3945 struct drm_device *ddev = dev_get_drvdata(dev); 3946 struct amdgpu_device *adev = drm_to_adev(ddev); 3947 3948 return adev->psp.sup_ifwi_up ? 0660 : 0; 3949 } 3950 3951 const struct attribute_group amdgpu_flash_attr_group = { 3952 .attrs = flash_attrs, 3953 .bin_attrs = bin_flash_attrs, 3954 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 3955 .is_visible = amdgpu_flash_attr_is_visible, 3956 }; 3957 3958 const struct amd_ip_funcs psp_ip_funcs = { 3959 .name = "psp", 3960 .early_init = psp_early_init, 3961 .late_init = NULL, 3962 .sw_init = psp_sw_init, 3963 .sw_fini = psp_sw_fini, 3964 .hw_init = psp_hw_init, 3965 .hw_fini = psp_hw_fini, 3966 .suspend = psp_suspend, 3967 .resume = psp_resume, 3968 .is_idle = NULL, 3969 .check_soft_reset = NULL, 3970 .wait_for_idle = NULL, 3971 .soft_reset = NULL, 3972 .set_clockgating_state = psp_set_clockgating_state, 3973 .set_powergating_state = psp_set_powergating_state, 3974 }; 3975 3976 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 3977 .type = AMD_IP_BLOCK_TYPE_PSP, 3978 .major = 3, 3979 .minor = 1, 3980 .rev = 0, 3981 .funcs = &psp_ip_funcs, 3982 }; 3983 3984 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 3985 .type = AMD_IP_BLOCK_TYPE_PSP, 3986 .major = 10, 3987 .minor = 0, 3988 .rev = 0, 3989 .funcs = &psp_ip_funcs, 3990 }; 3991 3992 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 3993 .type = AMD_IP_BLOCK_TYPE_PSP, 3994 .major = 11, 3995 .minor = 0, 3996 .rev = 0, 3997 .funcs = &psp_ip_funcs, 3998 }; 3999 4000 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4001 .type = AMD_IP_BLOCK_TYPE_PSP, 4002 .major = 11, 4003 .minor = 0, 4004 .rev = 8, 4005 .funcs = &psp_ip_funcs, 4006 }; 4007 4008 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4009 .type = AMD_IP_BLOCK_TYPE_PSP, 4010 .major = 12, 4011 .minor = 0, 4012 .rev = 0, 4013 .funcs = &psp_ip_funcs, 4014 }; 4015 4016 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4017 .type = AMD_IP_BLOCK_TYPE_PSP, 4018 .major = 13, 4019 .minor = 0, 4020 .rev = 0, 4021 .funcs = &psp_ip_funcs, 4022 }; 4023 4024 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4025 .type = AMD_IP_BLOCK_TYPE_PSP, 4026 .major = 13, 4027 .minor = 0, 4028 .rev = 4, 4029 .funcs = &psp_ip_funcs, 4030 }; 4031 4032 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4033 .type = AMD_IP_BLOCK_TYPE_PSP, 4034 .major = 14, 4035 .minor = 0, 4036 .rev = 0, 4037 .funcs = &psp_ip_funcs, 4038 }; 4039