1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 42 #include "amdgpu_ras.h" 43 #include "amdgpu_securedisplay.h" 44 #include "amdgpu_atomfirmware.h" 45 46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 47 48 static int psp_load_smu_fw(struct psp_context *psp); 49 static int psp_rap_terminate(struct psp_context *psp); 50 static int psp_securedisplay_terminate(struct psp_context *psp); 51 52 static int psp_ring_init(struct psp_context *psp, 53 enum psp_ring_type ring_type) 54 { 55 int ret = 0; 56 struct psp_ring *ring; 57 struct amdgpu_device *adev = psp->adev; 58 59 ring = &psp->km_ring; 60 61 ring->ring_type = ring_type; 62 63 /* allocate 4k Page of Local Frame Buffer memory for ring */ 64 ring->ring_size = 0x1000; 65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 66 AMDGPU_GEM_DOMAIN_VRAM | 67 AMDGPU_GEM_DOMAIN_GTT, 68 &adev->firmware.rbuf, 69 &ring->ring_mem_mc_addr, 70 (void **)&ring->ring_mem); 71 if (ret) { 72 ring->ring_size = 0; 73 return ret; 74 } 75 76 return 0; 77 } 78 79 /* 80 * Due to DF Cstate management centralized to PMFW, the firmware 81 * loading sequence will be updated as below: 82 * - Load KDB 83 * - Load SYS_DRV 84 * - Load tOS 85 * - Load PMFW 86 * - Setup TMR 87 * - Load other non-psp fw 88 * - Load ASD 89 * - Load XGMI/RAS/HDCP/DTM TA if any 90 * 91 * This new sequence is required for 92 * - Arcturus and onwards 93 */ 94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 95 { 96 struct amdgpu_device *adev = psp->adev; 97 98 if (amdgpu_sriov_vf(adev)) { 99 psp->pmfw_centralized_cstate_management = false; 100 return; 101 } 102 103 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 104 case IP_VERSION(11, 0, 0): 105 case IP_VERSION(11, 0, 4): 106 case IP_VERSION(11, 0, 5): 107 case IP_VERSION(11, 0, 7): 108 case IP_VERSION(11, 0, 9): 109 case IP_VERSION(11, 0, 11): 110 case IP_VERSION(11, 0, 12): 111 case IP_VERSION(11, 0, 13): 112 case IP_VERSION(13, 0, 0): 113 case IP_VERSION(13, 0, 2): 114 case IP_VERSION(13, 0, 7): 115 psp->pmfw_centralized_cstate_management = true; 116 break; 117 default: 118 psp->pmfw_centralized_cstate_management = false; 119 break; 120 } 121 } 122 123 static int psp_init_sriov_microcode(struct psp_context *psp) 124 { 125 struct amdgpu_device *adev = psp->adev; 126 char ucode_prefix[30]; 127 int ret = 0; 128 129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 130 131 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 132 case IP_VERSION(9, 0, 0): 133 case IP_VERSION(11, 0, 7): 134 case IP_VERSION(11, 0, 9): 135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 136 ret = psp_init_cap_microcode(psp, ucode_prefix); 137 break; 138 case IP_VERSION(13, 0, 2): 139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 140 ret = psp_init_cap_microcode(psp, ucode_prefix); 141 ret &= psp_init_ta_microcode(psp, ucode_prefix); 142 break; 143 case IP_VERSION(13, 0, 0): 144 adev->virt.autoload_ucode_id = 0; 145 break; 146 case IP_VERSION(13, 0, 6): 147 ret = psp_init_cap_microcode(psp, ucode_prefix); 148 ret &= psp_init_ta_microcode(psp, ucode_prefix); 149 break; 150 case IP_VERSION(13, 0, 10): 151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 152 ret = psp_init_cap_microcode(psp, ucode_prefix); 153 break; 154 default: 155 return -EINVAL; 156 } 157 return ret; 158 } 159 160 static int psp_early_init(void *handle) 161 { 162 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 163 struct psp_context *psp = &adev->psp; 164 165 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 166 case IP_VERSION(9, 0, 0): 167 psp_v3_1_set_psp_funcs(psp); 168 psp->autoload_supported = false; 169 break; 170 case IP_VERSION(10, 0, 0): 171 case IP_VERSION(10, 0, 1): 172 psp_v10_0_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 break; 175 case IP_VERSION(11, 0, 2): 176 case IP_VERSION(11, 0, 4): 177 psp_v11_0_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 break; 180 case IP_VERSION(11, 0, 0): 181 case IP_VERSION(11, 0, 7): 182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 183 fallthrough; 184 case IP_VERSION(11, 0, 5): 185 case IP_VERSION(11, 0, 9): 186 case IP_VERSION(11, 0, 11): 187 case IP_VERSION(11, 5, 0): 188 case IP_VERSION(11, 0, 12): 189 case IP_VERSION(11, 0, 13): 190 psp_v11_0_set_psp_funcs(psp); 191 psp->autoload_supported = true; 192 break; 193 case IP_VERSION(11, 0, 3): 194 case IP_VERSION(12, 0, 1): 195 psp_v12_0_set_psp_funcs(psp); 196 break; 197 case IP_VERSION(13, 0, 2): 198 case IP_VERSION(13, 0, 6): 199 psp_v13_0_set_psp_funcs(psp); 200 break; 201 case IP_VERSION(13, 0, 1): 202 case IP_VERSION(13, 0, 3): 203 case IP_VERSION(13, 0, 5): 204 case IP_VERSION(13, 0, 8): 205 case IP_VERSION(13, 0, 11): 206 case IP_VERSION(14, 0, 0): 207 psp_v13_0_set_psp_funcs(psp); 208 psp->autoload_supported = true; 209 break; 210 case IP_VERSION(11, 0, 8): 211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 212 psp_v11_0_8_set_psp_funcs(psp); 213 psp->autoload_supported = false; 214 } 215 break; 216 case IP_VERSION(13, 0, 0): 217 case IP_VERSION(13, 0, 7): 218 case IP_VERSION(13, 0, 10): 219 psp_v13_0_set_psp_funcs(psp); 220 psp->autoload_supported = true; 221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 222 break; 223 case IP_VERSION(13, 0, 4): 224 psp_v13_0_4_set_psp_funcs(psp); 225 psp->autoload_supported = true; 226 break; 227 default: 228 return -EINVAL; 229 } 230 231 psp->adev = adev; 232 233 psp_check_pmfw_centralized_cstate_management(psp); 234 235 if (amdgpu_sriov_vf(adev)) 236 return psp_init_sriov_microcode(psp); 237 else 238 return psp_init_microcode(psp); 239 } 240 241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 242 { 243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 244 &mem_ctx->shared_buf); 245 mem_ctx->shared_bo = NULL; 246 } 247 248 static void psp_free_shared_bufs(struct psp_context *psp) 249 { 250 void *tmr_buf; 251 void **pptr; 252 253 /* free TMR memory buffer */ 254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 256 psp->tmr_bo = NULL; 257 258 /* free xgmi shared memory */ 259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 260 261 /* free ras shared memory */ 262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 263 264 /* free hdcp shared memory */ 265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 266 267 /* free dtm shared memory */ 268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 269 270 /* free rap shared memory */ 271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 272 273 /* free securedisplay shared memory */ 274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 275 276 277 } 278 279 static void psp_memory_training_fini(struct psp_context *psp) 280 { 281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 282 283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 284 kfree(ctx->sys_cache); 285 ctx->sys_cache = NULL; 286 } 287 288 static int psp_memory_training_init(struct psp_context *psp) 289 { 290 int ret; 291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 292 293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 294 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 295 return 0; 296 } 297 298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 299 if (ctx->sys_cache == NULL) { 300 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 301 ret = -ENOMEM; 302 goto Err_out; 303 } 304 305 dev_dbg(psp->adev->dev, 306 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 307 ctx->train_data_size, 308 ctx->p2c_train_data_offset, 309 ctx->c2p_train_data_offset); 310 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 311 return 0; 312 313 Err_out: 314 psp_memory_training_fini(psp); 315 return ret; 316 } 317 318 /* 319 * Helper funciton to query psp runtime database entry 320 * 321 * @adev: amdgpu_device pointer 322 * @entry_type: the type of psp runtime database entry 323 * @db_entry: runtime database entry pointer 324 * 325 * Return false if runtime database doesn't exit or entry is invalid 326 * or true if the specific database entry is found, and copy to @db_entry 327 */ 328 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 329 enum psp_runtime_entry_type entry_type, 330 void *db_entry) 331 { 332 uint64_t db_header_pos, db_dir_pos; 333 struct psp_runtime_data_header db_header = {0}; 334 struct psp_runtime_data_directory db_dir = {0}; 335 bool ret = false; 336 int i; 337 338 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 339 return false; 340 341 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 342 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 343 344 /* read runtime db header from vram */ 345 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 346 sizeof(struct psp_runtime_data_header), false); 347 348 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 349 /* runtime db doesn't exist, exit */ 350 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 351 return false; 352 } 353 354 /* read runtime database entry from vram */ 355 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 356 sizeof(struct psp_runtime_data_directory), false); 357 358 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 359 /* invalid db entry count, exit */ 360 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 361 return false; 362 } 363 364 /* look up for requested entry type */ 365 for (i = 0; i < db_dir.entry_count && !ret; i++) { 366 if (db_dir.entry_list[i].entry_type == entry_type) { 367 switch (entry_type) { 368 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 369 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 370 /* invalid db entry size */ 371 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 372 return false; 373 } 374 /* read runtime database entry */ 375 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 376 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 377 ret = true; 378 break; 379 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 380 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 381 /* invalid db entry size */ 382 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 383 return false; 384 } 385 /* read runtime database entry */ 386 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 387 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 388 ret = true; 389 break; 390 default: 391 ret = false; 392 break; 393 } 394 } 395 } 396 397 return ret; 398 } 399 400 static int psp_sw_init(void *handle) 401 { 402 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 403 struct psp_context *psp = &adev->psp; 404 int ret; 405 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 406 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 407 struct psp_runtime_scpm_entry scpm_entry; 408 409 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 410 if (!psp->cmd) { 411 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 412 ret = -ENOMEM; 413 } 414 415 adev->psp.xgmi_context.supports_extended_data = 416 !adev->gmc.xgmi.connected_to_cpu && 417 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 418 419 memset(&scpm_entry, 0, sizeof(scpm_entry)); 420 if ((psp_get_runtime_db_entry(adev, 421 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 422 &scpm_entry)) && 423 (scpm_entry.scpm_status != SCPM_DISABLE)) { 424 adev->scpm_enabled = true; 425 adev->scpm_status = scpm_entry.scpm_status; 426 } else { 427 adev->scpm_enabled = false; 428 adev->scpm_status = SCPM_DISABLE; 429 } 430 431 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 432 433 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 434 if (psp_get_runtime_db_entry(adev, 435 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 436 &boot_cfg_entry)) { 437 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 438 if ((psp->boot_cfg_bitmask) & 439 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 440 /* If psp runtime database exists, then 441 * only enable two stage memory training 442 * when TWO_STAGE_DRAM_TRAINING bit is set 443 * in runtime database 444 */ 445 mem_training_ctx->enable_mem_training = true; 446 } 447 448 } else { 449 /* If psp runtime database doesn't exist or is 450 * invalid, force enable two stage memory training 451 */ 452 mem_training_ctx->enable_mem_training = true; 453 } 454 455 if (mem_training_ctx->enable_mem_training) { 456 ret = psp_memory_training_init(psp); 457 if (ret) { 458 dev_err(adev->dev, "Failed to initialize memory training!\n"); 459 return ret; 460 } 461 462 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 463 if (ret) { 464 dev_err(adev->dev, "Failed to process memory training!\n"); 465 return ret; 466 } 467 } 468 469 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 470 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 471 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 472 &psp->fw_pri_bo, 473 &psp->fw_pri_mc_addr, 474 &psp->fw_pri_buf); 475 if (ret) 476 return ret; 477 478 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 479 AMDGPU_GEM_DOMAIN_VRAM | 480 AMDGPU_GEM_DOMAIN_GTT, 481 &psp->fence_buf_bo, 482 &psp->fence_buf_mc_addr, 483 &psp->fence_buf); 484 if (ret) 485 goto failed1; 486 487 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 488 AMDGPU_GEM_DOMAIN_VRAM | 489 AMDGPU_GEM_DOMAIN_GTT, 490 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 491 (void **)&psp->cmd_buf_mem); 492 if (ret) 493 goto failed2; 494 495 return 0; 496 497 failed2: 498 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 499 &psp->fence_buf_mc_addr, &psp->fence_buf); 500 failed1: 501 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 502 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 503 return ret; 504 } 505 506 static int psp_sw_fini(void *handle) 507 { 508 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 509 struct psp_context *psp = &adev->psp; 510 struct psp_gfx_cmd_resp *cmd = psp->cmd; 511 512 psp_memory_training_fini(psp); 513 514 amdgpu_ucode_release(&psp->sos_fw); 515 amdgpu_ucode_release(&psp->asd_fw); 516 amdgpu_ucode_release(&psp->ta_fw); 517 amdgpu_ucode_release(&psp->cap_fw); 518 amdgpu_ucode_release(&psp->toc_fw); 519 520 kfree(cmd); 521 cmd = NULL; 522 523 psp_free_shared_bufs(psp); 524 525 if (psp->km_ring.ring_mem) 526 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 527 &psp->km_ring.ring_mem_mc_addr, 528 (void **)&psp->km_ring.ring_mem); 529 530 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 531 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 532 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 533 &psp->fence_buf_mc_addr, &psp->fence_buf); 534 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 535 (void **)&psp->cmd_buf_mem); 536 537 return 0; 538 } 539 540 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 541 uint32_t reg_val, uint32_t mask, bool check_changed) 542 { 543 uint32_t val; 544 int i; 545 struct amdgpu_device *adev = psp->adev; 546 547 if (psp->adev->no_hw_access) 548 return 0; 549 550 for (i = 0; i < adev->usec_timeout; i++) { 551 val = RREG32(reg_index); 552 if (check_changed) { 553 if (val != reg_val) 554 return 0; 555 } else { 556 if ((val & mask) == reg_val) 557 return 0; 558 } 559 udelay(1); 560 } 561 562 return -ETIME; 563 } 564 565 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 566 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 567 { 568 uint32_t val; 569 int i; 570 struct amdgpu_device *adev = psp->adev; 571 572 if (psp->adev->no_hw_access) 573 return 0; 574 575 for (i = 0; i < msec_timeout; i++) { 576 val = RREG32(reg_index); 577 if ((val & mask) == reg_val) 578 return 0; 579 msleep(1); 580 } 581 582 return -ETIME; 583 } 584 585 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 586 { 587 switch (cmd_id) { 588 case GFX_CMD_ID_LOAD_TA: 589 return "LOAD_TA"; 590 case GFX_CMD_ID_UNLOAD_TA: 591 return "UNLOAD_TA"; 592 case GFX_CMD_ID_INVOKE_CMD: 593 return "INVOKE_CMD"; 594 case GFX_CMD_ID_LOAD_ASD: 595 return "LOAD_ASD"; 596 case GFX_CMD_ID_SETUP_TMR: 597 return "SETUP_TMR"; 598 case GFX_CMD_ID_LOAD_IP_FW: 599 return "LOAD_IP_FW"; 600 case GFX_CMD_ID_DESTROY_TMR: 601 return "DESTROY_TMR"; 602 case GFX_CMD_ID_SAVE_RESTORE: 603 return "SAVE_RESTORE_IP_FW"; 604 case GFX_CMD_ID_SETUP_VMR: 605 return "SETUP_VMR"; 606 case GFX_CMD_ID_DESTROY_VMR: 607 return "DESTROY_VMR"; 608 case GFX_CMD_ID_PROG_REG: 609 return "PROG_REG"; 610 case GFX_CMD_ID_GET_FW_ATTESTATION: 611 return "GET_FW_ATTESTATION"; 612 case GFX_CMD_ID_LOAD_TOC: 613 return "ID_LOAD_TOC"; 614 case GFX_CMD_ID_AUTOLOAD_RLC: 615 return "AUTOLOAD_RLC"; 616 case GFX_CMD_ID_BOOT_CFG: 617 return "BOOT_CFG"; 618 default: 619 return "UNKNOWN CMD"; 620 } 621 } 622 623 static int 624 psp_cmd_submit_buf(struct psp_context *psp, 625 struct amdgpu_firmware_info *ucode, 626 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 627 { 628 int ret; 629 int index; 630 int timeout = 20000; 631 bool ras_intr = false; 632 bool skip_unsupport = false; 633 634 if (psp->adev->no_hw_access) 635 return 0; 636 637 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 638 639 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 640 641 index = atomic_inc_return(&psp->fence_value); 642 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 643 if (ret) { 644 atomic_dec(&psp->fence_value); 645 goto exit; 646 } 647 648 amdgpu_device_invalidate_hdp(psp->adev, NULL); 649 while (*((unsigned int *)psp->fence_buf) != index) { 650 if (--timeout == 0) 651 break; 652 /* 653 * Shouldn't wait for timeout when err_event_athub occurs, 654 * because gpu reset thread triggered and lock resource should 655 * be released for psp resume sequence. 656 */ 657 ras_intr = amdgpu_ras_intr_triggered(); 658 if (ras_intr) 659 break; 660 usleep_range(10, 100); 661 amdgpu_device_invalidate_hdp(psp->adev, NULL); 662 } 663 664 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 665 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 666 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 667 668 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 669 670 /* In some cases, psp response status is not 0 even there is no 671 * problem while the command is submitted. Some version of PSP FW 672 * doesn't write 0 to that field. 673 * So here we would like to only print a warning instead of an error 674 * during psp initialization to avoid breaking hw_init and it doesn't 675 * return -EINVAL. 676 */ 677 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 678 if (ucode) 679 dev_warn(psp->adev->dev, 680 "failed to load ucode %s(0x%X) ", 681 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 682 dev_warn(psp->adev->dev, 683 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 684 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 685 psp->cmd_buf_mem->resp.status); 686 /* If any firmware (including CAP) load fails under SRIOV, it should 687 * return failure to stop the VF from initializing. 688 * Also return failure in case of timeout 689 */ 690 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 691 ret = -EINVAL; 692 goto exit; 693 } 694 } 695 696 if (ucode) { 697 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 698 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 699 } 700 701 exit: 702 return ret; 703 } 704 705 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 706 { 707 struct psp_gfx_cmd_resp *cmd = psp->cmd; 708 709 mutex_lock(&psp->mutex); 710 711 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 712 713 return cmd; 714 } 715 716 static void release_psp_cmd_buf(struct psp_context *psp) 717 { 718 mutex_unlock(&psp->mutex); 719 } 720 721 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 722 struct psp_gfx_cmd_resp *cmd, 723 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 724 { 725 struct amdgpu_device *adev = psp->adev; 726 uint32_t size = 0; 727 uint64_t tmr_pa = 0; 728 729 if (tmr_bo) { 730 size = amdgpu_bo_size(tmr_bo); 731 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 732 } 733 734 if (amdgpu_sriov_vf(psp->adev)) 735 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 736 else 737 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 738 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 739 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 740 cmd->cmd.cmd_setup_tmr.buf_size = size; 741 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 742 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 743 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 744 } 745 746 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 747 uint64_t pri_buf_mc, uint32_t size) 748 { 749 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 750 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 751 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 752 cmd->cmd.cmd_load_toc.toc_size = size; 753 } 754 755 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 756 static int psp_load_toc(struct psp_context *psp, 757 uint32_t *tmr_size) 758 { 759 int ret; 760 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 761 762 /* Copy toc to psp firmware private buffer */ 763 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 764 765 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 766 767 ret = psp_cmd_submit_buf(psp, NULL, cmd, 768 psp->fence_buf_mc_addr); 769 if (!ret) 770 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 771 772 release_psp_cmd_buf(psp); 773 774 return ret; 775 } 776 777 static bool psp_boottime_tmr(struct psp_context *psp) 778 { 779 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 780 case IP_VERSION(13, 0, 6): 781 return true; 782 default: 783 return false; 784 } 785 } 786 787 /* Set up Trusted Memory Region */ 788 static int psp_tmr_init(struct psp_context *psp) 789 { 790 int ret = 0; 791 int tmr_size; 792 void *tmr_buf; 793 void **pptr; 794 795 /* 796 * According to HW engineer, they prefer the TMR address be "naturally 797 * aligned" , e.g. the start address be an integer divide of TMR size. 798 * 799 * Note: this memory need be reserved till the driver 800 * uninitializes. 801 */ 802 tmr_size = PSP_TMR_SIZE(psp->adev); 803 804 /* For ASICs support RLC autoload, psp will parse the toc 805 * and calculate the total size of TMR needed 806 */ 807 if (!amdgpu_sriov_vf(psp->adev) && 808 psp->toc.start_addr && 809 psp->toc.size_bytes && 810 psp->fw_pri_buf) { 811 ret = psp_load_toc(psp, &tmr_size); 812 if (ret) { 813 dev_err(psp->adev->dev, "Failed to load toc\n"); 814 return ret; 815 } 816 } 817 818 if (!psp->tmr_bo) { 819 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 820 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 821 PSP_TMR_ALIGNMENT, 822 AMDGPU_HAS_VRAM(psp->adev) ? 823 AMDGPU_GEM_DOMAIN_VRAM : 824 AMDGPU_GEM_DOMAIN_GTT, 825 &psp->tmr_bo, &psp->tmr_mc_addr, 826 pptr); 827 } 828 829 return ret; 830 } 831 832 static bool psp_skip_tmr(struct psp_context *psp) 833 { 834 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 835 case IP_VERSION(11, 0, 9): 836 case IP_VERSION(11, 0, 7): 837 case IP_VERSION(13, 0, 2): 838 case IP_VERSION(13, 0, 6): 839 case IP_VERSION(13, 0, 10): 840 return true; 841 default: 842 return false; 843 } 844 } 845 846 static int psp_tmr_load(struct psp_context *psp) 847 { 848 int ret; 849 struct psp_gfx_cmd_resp *cmd; 850 851 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 852 * Already set up by host driver. 853 */ 854 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 855 return 0; 856 857 cmd = acquire_psp_cmd_buf(psp); 858 859 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 860 if (psp->tmr_bo) 861 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 862 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 863 864 ret = psp_cmd_submit_buf(psp, NULL, cmd, 865 psp->fence_buf_mc_addr); 866 867 release_psp_cmd_buf(psp); 868 869 return ret; 870 } 871 872 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 873 struct psp_gfx_cmd_resp *cmd) 874 { 875 if (amdgpu_sriov_vf(psp->adev)) 876 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 877 else 878 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 879 } 880 881 static int psp_tmr_unload(struct psp_context *psp) 882 { 883 int ret; 884 struct psp_gfx_cmd_resp *cmd; 885 886 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 887 * as TMR is not loaded at all 888 */ 889 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 890 return 0; 891 892 cmd = acquire_psp_cmd_buf(psp); 893 894 psp_prep_tmr_unload_cmd_buf(psp, cmd); 895 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 896 897 ret = psp_cmd_submit_buf(psp, NULL, cmd, 898 psp->fence_buf_mc_addr); 899 900 release_psp_cmd_buf(psp); 901 902 return ret; 903 } 904 905 static int psp_tmr_terminate(struct psp_context *psp) 906 { 907 return psp_tmr_unload(psp); 908 } 909 910 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 911 uint64_t *output_ptr) 912 { 913 int ret; 914 struct psp_gfx_cmd_resp *cmd; 915 916 if (!output_ptr) 917 return -EINVAL; 918 919 if (amdgpu_sriov_vf(psp->adev)) 920 return 0; 921 922 cmd = acquire_psp_cmd_buf(psp); 923 924 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 925 926 ret = psp_cmd_submit_buf(psp, NULL, cmd, 927 psp->fence_buf_mc_addr); 928 929 if (!ret) { 930 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 931 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 932 } 933 934 release_psp_cmd_buf(psp); 935 936 return ret; 937 } 938 939 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 940 { 941 struct psp_context *psp = &adev->psp; 942 struct psp_gfx_cmd_resp *cmd; 943 int ret; 944 945 if (amdgpu_sriov_vf(adev)) 946 return 0; 947 948 cmd = acquire_psp_cmd_buf(psp); 949 950 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 951 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 952 953 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 954 if (!ret) { 955 *boot_cfg = 956 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 957 } 958 959 release_psp_cmd_buf(psp); 960 961 return ret; 962 } 963 964 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 965 { 966 int ret; 967 struct psp_context *psp = &adev->psp; 968 struct psp_gfx_cmd_resp *cmd; 969 970 if (amdgpu_sriov_vf(adev)) 971 return 0; 972 973 cmd = acquire_psp_cmd_buf(psp); 974 975 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 976 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 977 cmd->cmd.boot_cfg.boot_config = boot_cfg; 978 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 979 980 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 981 982 release_psp_cmd_buf(psp); 983 984 return ret; 985 } 986 987 static int psp_rl_load(struct amdgpu_device *adev) 988 { 989 int ret; 990 struct psp_context *psp = &adev->psp; 991 struct psp_gfx_cmd_resp *cmd; 992 993 if (!is_psp_fw_valid(psp->rl)) 994 return 0; 995 996 cmd = acquire_psp_cmd_buf(psp); 997 998 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 999 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1000 1001 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1002 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1003 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1004 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1005 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1006 1007 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1008 1009 release_psp_cmd_buf(psp); 1010 1011 return ret; 1012 } 1013 1014 int psp_spatial_partition(struct psp_context *psp, int mode) 1015 { 1016 struct psp_gfx_cmd_resp *cmd; 1017 int ret; 1018 1019 if (amdgpu_sriov_vf(psp->adev)) 1020 return 0; 1021 1022 cmd = acquire_psp_cmd_buf(psp); 1023 1024 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1025 cmd->cmd.cmd_spatial_part.mode = mode; 1026 1027 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1028 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1029 1030 release_psp_cmd_buf(psp); 1031 1032 return ret; 1033 } 1034 1035 static int psp_asd_initialize(struct psp_context *psp) 1036 { 1037 int ret; 1038 1039 /* If PSP version doesn't match ASD version, asd loading will be failed. 1040 * add workaround to bypass it for sriov now. 1041 * TODO: add version check to make it common 1042 */ 1043 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1044 return 0; 1045 1046 psp->asd_context.mem_context.shared_mc_addr = 0; 1047 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1048 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1049 1050 ret = psp_ta_load(psp, &psp->asd_context); 1051 if (!ret) 1052 psp->asd_context.initialized = true; 1053 1054 return ret; 1055 } 1056 1057 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1058 uint32_t session_id) 1059 { 1060 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1061 cmd->cmd.cmd_unload_ta.session_id = session_id; 1062 } 1063 1064 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1065 { 1066 int ret; 1067 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1068 1069 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1070 1071 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1072 1073 context->resp_status = cmd->resp.status; 1074 1075 release_psp_cmd_buf(psp); 1076 1077 return ret; 1078 } 1079 1080 static int psp_asd_terminate(struct psp_context *psp) 1081 { 1082 int ret; 1083 1084 if (amdgpu_sriov_vf(psp->adev)) 1085 return 0; 1086 1087 if (!psp->asd_context.initialized) 1088 return 0; 1089 1090 ret = psp_ta_unload(psp, &psp->asd_context); 1091 if (!ret) 1092 psp->asd_context.initialized = false; 1093 1094 return ret; 1095 } 1096 1097 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1098 uint32_t id, uint32_t value) 1099 { 1100 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1101 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1102 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1103 } 1104 1105 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1106 uint32_t value) 1107 { 1108 struct psp_gfx_cmd_resp *cmd; 1109 int ret = 0; 1110 1111 if (reg >= PSP_REG_LAST) 1112 return -EINVAL; 1113 1114 cmd = acquire_psp_cmd_buf(psp); 1115 1116 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1117 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1118 if (ret) 1119 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1120 1121 release_psp_cmd_buf(psp); 1122 1123 return ret; 1124 } 1125 1126 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1127 uint64_t ta_bin_mc, 1128 struct ta_context *context) 1129 { 1130 cmd->cmd_id = context->ta_load_type; 1131 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1132 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1133 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1134 1135 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1136 lower_32_bits(context->mem_context.shared_mc_addr); 1137 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1138 upper_32_bits(context->mem_context.shared_mc_addr); 1139 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1140 } 1141 1142 int psp_ta_init_shared_buf(struct psp_context *psp, 1143 struct ta_mem_context *mem_ctx) 1144 { 1145 /* 1146 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1147 * physical) for ta to host memory 1148 */ 1149 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1150 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1151 AMDGPU_GEM_DOMAIN_GTT, 1152 &mem_ctx->shared_bo, 1153 &mem_ctx->shared_mc_addr, 1154 &mem_ctx->shared_buf); 1155 } 1156 1157 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1158 uint32_t ta_cmd_id, 1159 uint32_t session_id) 1160 { 1161 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1162 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1163 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1164 } 1165 1166 int psp_ta_invoke(struct psp_context *psp, 1167 uint32_t ta_cmd_id, 1168 struct ta_context *context) 1169 { 1170 int ret; 1171 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1172 1173 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1174 1175 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1176 psp->fence_buf_mc_addr); 1177 1178 context->resp_status = cmd->resp.status; 1179 1180 release_psp_cmd_buf(psp); 1181 1182 return ret; 1183 } 1184 1185 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1186 { 1187 int ret; 1188 struct psp_gfx_cmd_resp *cmd; 1189 1190 cmd = acquire_psp_cmd_buf(psp); 1191 1192 psp_copy_fw(psp, context->bin_desc.start_addr, 1193 context->bin_desc.size_bytes); 1194 1195 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1196 1197 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1198 psp->fence_buf_mc_addr); 1199 1200 context->resp_status = cmd->resp.status; 1201 1202 if (!ret) 1203 context->session_id = cmd->resp.session_id; 1204 1205 release_psp_cmd_buf(psp); 1206 1207 return ret; 1208 } 1209 1210 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1211 { 1212 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1213 } 1214 1215 int psp_xgmi_terminate(struct psp_context *psp) 1216 { 1217 int ret; 1218 struct amdgpu_device *adev = psp->adev; 1219 1220 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1221 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1222 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1223 adev->gmc.xgmi.connected_to_cpu)) 1224 return 0; 1225 1226 if (!psp->xgmi_context.context.initialized) 1227 return 0; 1228 1229 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1230 1231 psp->xgmi_context.context.initialized = false; 1232 1233 return ret; 1234 } 1235 1236 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1237 { 1238 struct ta_xgmi_shared_memory *xgmi_cmd; 1239 int ret; 1240 1241 if (!psp->ta_fw || 1242 !psp->xgmi_context.context.bin_desc.size_bytes || 1243 !psp->xgmi_context.context.bin_desc.start_addr) 1244 return -ENOENT; 1245 1246 if (!load_ta) 1247 goto invoke; 1248 1249 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1250 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1251 1252 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1253 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1254 if (ret) 1255 return ret; 1256 } 1257 1258 /* Load XGMI TA */ 1259 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1260 if (!ret) 1261 psp->xgmi_context.context.initialized = true; 1262 else 1263 return ret; 1264 1265 invoke: 1266 /* Initialize XGMI session */ 1267 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1268 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1269 xgmi_cmd->flag_extend_link_record = set_extended_data; 1270 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1271 1272 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1273 /* note down the capbility flag for XGMI TA */ 1274 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1275 1276 return ret; 1277 } 1278 1279 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1280 { 1281 struct ta_xgmi_shared_memory *xgmi_cmd; 1282 int ret; 1283 1284 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1285 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1286 1287 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1288 1289 /* Invoke xgmi ta to get hive id */ 1290 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1291 if (ret) 1292 return ret; 1293 1294 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1295 1296 return 0; 1297 } 1298 1299 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1300 { 1301 struct ta_xgmi_shared_memory *xgmi_cmd; 1302 int ret; 1303 1304 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1305 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1306 1307 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1308 1309 /* Invoke xgmi ta to get the node id */ 1310 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1311 if (ret) 1312 return ret; 1313 1314 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1315 1316 return 0; 1317 } 1318 1319 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1320 { 1321 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1322 IP_VERSION(13, 0, 2) && 1323 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1324 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1325 IP_VERSION(13, 0, 6); 1326 } 1327 1328 /* 1329 * Chips that support extended topology information require the driver to 1330 * reflect topology information in the opposite direction. This is 1331 * because the TA has already exceeded its link record limit and if the 1332 * TA holds bi-directional information, the driver would have to do 1333 * multiple fetches instead of just two. 1334 */ 1335 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1336 struct psp_xgmi_node_info node_info) 1337 { 1338 struct amdgpu_device *mirror_adev; 1339 struct amdgpu_hive_info *hive; 1340 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1341 uint64_t dst_node_id = node_info.node_id; 1342 uint8_t dst_num_hops = node_info.num_hops; 1343 uint8_t dst_num_links = node_info.num_links; 1344 1345 hive = amdgpu_get_xgmi_hive(psp->adev); 1346 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1347 struct psp_xgmi_topology_info *mirror_top_info; 1348 int j; 1349 1350 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1351 continue; 1352 1353 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1354 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1355 if (mirror_top_info->nodes[j].node_id != src_node_id) 1356 continue; 1357 1358 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1359 /* 1360 * prevent 0 num_links value re-reflection since reflection 1361 * criteria is based on num_hops (direct or indirect). 1362 * 1363 */ 1364 if (dst_num_links) 1365 mirror_top_info->nodes[j].num_links = dst_num_links; 1366 1367 break; 1368 } 1369 1370 break; 1371 } 1372 1373 amdgpu_put_xgmi_hive(hive); 1374 } 1375 1376 int psp_xgmi_get_topology_info(struct psp_context *psp, 1377 int number_devices, 1378 struct psp_xgmi_topology_info *topology, 1379 bool get_extended_data) 1380 { 1381 struct ta_xgmi_shared_memory *xgmi_cmd; 1382 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1383 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1384 int i; 1385 int ret; 1386 1387 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1388 return -EINVAL; 1389 1390 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1391 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1392 xgmi_cmd->flag_extend_link_record = get_extended_data; 1393 1394 /* Fill in the shared memory with topology information as input */ 1395 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1396 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1397 topology_info_input->num_nodes = number_devices; 1398 1399 for (i = 0; i < topology_info_input->num_nodes; i++) { 1400 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1401 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1402 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1403 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1404 } 1405 1406 /* Invoke xgmi ta to get the topology information */ 1407 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1408 if (ret) 1409 return ret; 1410 1411 /* Read the output topology information from the shared memory */ 1412 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1413 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1414 for (i = 0; i < topology->num_nodes; i++) { 1415 /* extended data will either be 0 or equal to non-extended data */ 1416 if (topology_info_output->nodes[i].num_hops) 1417 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1418 1419 /* non-extended data gets everything here so no need to update */ 1420 if (!get_extended_data) { 1421 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1422 topology->nodes[i].is_sharing_enabled = 1423 topology_info_output->nodes[i].is_sharing_enabled; 1424 topology->nodes[i].sdma_engine = 1425 topology_info_output->nodes[i].sdma_engine; 1426 } 1427 1428 } 1429 1430 /* Invoke xgmi ta again to get the link information */ 1431 if (psp_xgmi_peer_link_info_supported(psp)) { 1432 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1433 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1434 bool requires_reflection = 1435 (psp->xgmi_context.supports_extended_data && 1436 get_extended_data) || 1437 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1438 IP_VERSION(13, 0, 6); 1439 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1440 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1441 1442 /* popluate the shared output buffer rather than the cmd input buffer 1443 * with node_ids as the input for GET_PEER_LINKS command execution. 1444 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1445 * The same requirement for GET_EXTEND_PEER_LINKS command. 1446 */ 1447 if (ta_port_num_support) { 1448 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1449 1450 for (i = 0; i < topology->num_nodes; i++) 1451 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1452 1453 link_extend_info_output->num_nodes = topology->num_nodes; 1454 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1455 } else { 1456 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1457 1458 for (i = 0; i < topology->num_nodes; i++) 1459 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1460 1461 link_info_output->num_nodes = topology->num_nodes; 1462 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1463 } 1464 1465 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1466 if (ret) 1467 return ret; 1468 1469 for (i = 0; i < topology->num_nodes; i++) { 1470 uint8_t node_num_links = ta_port_num_support ? 1471 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1472 /* accumulate num_links on extended data */ 1473 if (get_extended_data) { 1474 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1475 } else { 1476 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1477 topology->nodes[i].num_links : node_num_links; 1478 } 1479 /* popluate the connected port num info if supported and available */ 1480 if (ta_port_num_support && topology->nodes[i].num_links) { 1481 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1482 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1483 } 1484 1485 /* reflect the topology information for bi-directionality */ 1486 if (requires_reflection && topology->nodes[i].num_hops) 1487 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1488 } 1489 } 1490 1491 return 0; 1492 } 1493 1494 int psp_xgmi_set_topology_info(struct psp_context *psp, 1495 int number_devices, 1496 struct psp_xgmi_topology_info *topology) 1497 { 1498 struct ta_xgmi_shared_memory *xgmi_cmd; 1499 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1500 int i; 1501 1502 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1503 return -EINVAL; 1504 1505 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1506 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1507 1508 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1509 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1510 topology_info_input->num_nodes = number_devices; 1511 1512 for (i = 0; i < topology_info_input->num_nodes; i++) { 1513 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1514 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1515 topology_info_input->nodes[i].is_sharing_enabled = 1; 1516 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1517 } 1518 1519 /* Invoke xgmi ta to set topology information */ 1520 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1521 } 1522 1523 // ras begin 1524 static void psp_ras_ta_check_status(struct psp_context *psp) 1525 { 1526 struct ta_ras_shared_memory *ras_cmd = 1527 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1528 1529 switch (ras_cmd->ras_status) { 1530 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1531 dev_warn(psp->adev->dev, 1532 "RAS WARNING: cmd failed due to unsupported ip\n"); 1533 break; 1534 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1535 dev_warn(psp->adev->dev, 1536 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1537 break; 1538 case TA_RAS_STATUS__SUCCESS: 1539 break; 1540 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1541 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1542 dev_warn(psp->adev->dev, 1543 "RAS WARNING: Inject error to critical region is not allowed\n"); 1544 break; 1545 default: 1546 dev_warn(psp->adev->dev, 1547 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1548 break; 1549 } 1550 } 1551 1552 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1553 { 1554 struct ta_ras_shared_memory *ras_cmd; 1555 int ret; 1556 1557 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1558 1559 /* 1560 * TODO: bypass the loading in sriov for now 1561 */ 1562 if (amdgpu_sriov_vf(psp->adev)) 1563 return 0; 1564 1565 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1566 1567 if (amdgpu_ras_intr_triggered()) 1568 return ret; 1569 1570 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1571 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1572 return -EINVAL; 1573 } 1574 1575 if (!ret) { 1576 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1577 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1578 1579 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1580 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1581 dev_warn(psp->adev->dev, 1582 "RAS internal register access blocked\n"); 1583 1584 psp_ras_ta_check_status(psp); 1585 } 1586 1587 return ret; 1588 } 1589 1590 int psp_ras_enable_features(struct psp_context *psp, 1591 union ta_ras_cmd_input *info, bool enable) 1592 { 1593 struct ta_ras_shared_memory *ras_cmd; 1594 int ret; 1595 1596 if (!psp->ras_context.context.initialized) 1597 return -EINVAL; 1598 1599 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1600 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1601 1602 if (enable) 1603 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1604 else 1605 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1606 1607 ras_cmd->ras_in_message = *info; 1608 1609 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1610 if (ret) 1611 return -EINVAL; 1612 1613 return 0; 1614 } 1615 1616 int psp_ras_terminate(struct psp_context *psp) 1617 { 1618 int ret; 1619 1620 /* 1621 * TODO: bypass the terminate in sriov for now 1622 */ 1623 if (amdgpu_sriov_vf(psp->adev)) 1624 return 0; 1625 1626 if (!psp->ras_context.context.initialized) 1627 return 0; 1628 1629 ret = psp_ta_unload(psp, &psp->ras_context.context); 1630 1631 psp->ras_context.context.initialized = false; 1632 1633 return ret; 1634 } 1635 1636 int psp_ras_initialize(struct psp_context *psp) 1637 { 1638 int ret; 1639 uint32_t boot_cfg = 0xFF; 1640 struct amdgpu_device *adev = psp->adev; 1641 struct ta_ras_shared_memory *ras_cmd; 1642 1643 /* 1644 * TODO: bypass the initialize in sriov for now 1645 */ 1646 if (amdgpu_sriov_vf(adev)) 1647 return 0; 1648 1649 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1650 !adev->psp.ras_context.context.bin_desc.start_addr) { 1651 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1652 return 0; 1653 } 1654 1655 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1656 /* query GECC enablement status from boot config 1657 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1658 */ 1659 ret = psp_boot_config_get(adev, &boot_cfg); 1660 if (ret) 1661 dev_warn(adev->dev, "PSP get boot config failed\n"); 1662 1663 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1664 if (!boot_cfg) { 1665 dev_info(adev->dev, "GECC is disabled\n"); 1666 } else { 1667 /* disable GECC in next boot cycle if ras is 1668 * disabled by module parameter amdgpu_ras_enable 1669 * and/or amdgpu_ras_mask, or boot_config_get call 1670 * is failed 1671 */ 1672 ret = psp_boot_config_set(adev, 0); 1673 if (ret) 1674 dev_warn(adev->dev, "PSP set boot config failed\n"); 1675 else 1676 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1677 } 1678 } else { 1679 if (boot_cfg == 1) { 1680 dev_info(adev->dev, "GECC is enabled\n"); 1681 } else { 1682 /* enable GECC in next boot cycle if it is disabled 1683 * in boot config, or force enable GECC if failed to 1684 * get boot configuration 1685 */ 1686 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1687 if (ret) 1688 dev_warn(adev->dev, "PSP set boot config failed\n"); 1689 else 1690 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1691 } 1692 } 1693 } 1694 1695 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1696 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1697 1698 if (!psp->ras_context.context.mem_context.shared_buf) { 1699 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1700 if (ret) 1701 return ret; 1702 } 1703 1704 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1705 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1706 1707 if (amdgpu_ras_is_poison_mode_supported(adev)) 1708 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1709 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1710 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1711 ras_cmd->ras_in_message.init_flags.xcc_mask = 1712 adev->gfx.xcc_mask; 1713 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1714 1715 ret = psp_ta_load(psp, &psp->ras_context.context); 1716 1717 if (!ret && !ras_cmd->ras_status) 1718 psp->ras_context.context.initialized = true; 1719 else { 1720 if (ras_cmd->ras_status) 1721 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1722 1723 /* fail to load RAS TA */ 1724 psp->ras_context.context.initialized = false; 1725 } 1726 1727 return ret; 1728 } 1729 1730 int psp_ras_trigger_error(struct psp_context *psp, 1731 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1732 { 1733 struct ta_ras_shared_memory *ras_cmd; 1734 struct amdgpu_device *adev = psp->adev; 1735 int ret; 1736 uint32_t dev_mask; 1737 1738 if (!psp->ras_context.context.initialized) 1739 return -EINVAL; 1740 1741 switch (info->block_id) { 1742 case TA_RAS_BLOCK__GFX: 1743 dev_mask = GET_MASK(GC, instance_mask); 1744 break; 1745 case TA_RAS_BLOCK__SDMA: 1746 dev_mask = GET_MASK(SDMA0, instance_mask); 1747 break; 1748 case TA_RAS_BLOCK__VCN: 1749 case TA_RAS_BLOCK__JPEG: 1750 dev_mask = GET_MASK(VCN, instance_mask); 1751 break; 1752 default: 1753 dev_mask = instance_mask; 1754 break; 1755 } 1756 1757 /* reuse sub_block_index for backward compatibility */ 1758 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1759 dev_mask &= AMDGPU_RAS_INST_MASK; 1760 info->sub_block_index |= dev_mask; 1761 1762 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1763 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1764 1765 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1766 ras_cmd->ras_in_message.trigger_error = *info; 1767 1768 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1769 if (ret) 1770 return -EINVAL; 1771 1772 /* If err_event_athub occurs error inject was successful, however 1773 * return status from TA is no long reliable 1774 */ 1775 if (amdgpu_ras_intr_triggered()) 1776 return 0; 1777 1778 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1779 return -EACCES; 1780 else if (ras_cmd->ras_status) 1781 return -EINVAL; 1782 1783 return 0; 1784 } 1785 // ras end 1786 1787 // HDCP start 1788 static int psp_hdcp_initialize(struct psp_context *psp) 1789 { 1790 int ret; 1791 1792 /* 1793 * TODO: bypass the initialize in sriov for now 1794 */ 1795 if (amdgpu_sriov_vf(psp->adev)) 1796 return 0; 1797 1798 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1799 !psp->hdcp_context.context.bin_desc.start_addr) { 1800 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1801 return 0; 1802 } 1803 1804 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1805 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1806 1807 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1808 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1809 if (ret) 1810 return ret; 1811 } 1812 1813 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1814 if (!ret) { 1815 psp->hdcp_context.context.initialized = true; 1816 mutex_init(&psp->hdcp_context.mutex); 1817 } 1818 1819 return ret; 1820 } 1821 1822 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1823 { 1824 /* 1825 * TODO: bypass the loading in sriov for now 1826 */ 1827 if (amdgpu_sriov_vf(psp->adev)) 1828 return 0; 1829 1830 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1831 } 1832 1833 static int psp_hdcp_terminate(struct psp_context *psp) 1834 { 1835 int ret; 1836 1837 /* 1838 * TODO: bypass the terminate in sriov for now 1839 */ 1840 if (amdgpu_sriov_vf(psp->adev)) 1841 return 0; 1842 1843 if (!psp->hdcp_context.context.initialized) 1844 return 0; 1845 1846 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1847 1848 psp->hdcp_context.context.initialized = false; 1849 1850 return ret; 1851 } 1852 // HDCP end 1853 1854 // DTM start 1855 static int psp_dtm_initialize(struct psp_context *psp) 1856 { 1857 int ret; 1858 1859 /* 1860 * TODO: bypass the initialize in sriov for now 1861 */ 1862 if (amdgpu_sriov_vf(psp->adev)) 1863 return 0; 1864 1865 if (!psp->dtm_context.context.bin_desc.size_bytes || 1866 !psp->dtm_context.context.bin_desc.start_addr) { 1867 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1868 return 0; 1869 } 1870 1871 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1872 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1873 1874 if (!psp->dtm_context.context.mem_context.shared_buf) { 1875 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1876 if (ret) 1877 return ret; 1878 } 1879 1880 ret = psp_ta_load(psp, &psp->dtm_context.context); 1881 if (!ret) { 1882 psp->dtm_context.context.initialized = true; 1883 mutex_init(&psp->dtm_context.mutex); 1884 } 1885 1886 return ret; 1887 } 1888 1889 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1890 { 1891 /* 1892 * TODO: bypass the loading in sriov for now 1893 */ 1894 if (amdgpu_sriov_vf(psp->adev)) 1895 return 0; 1896 1897 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1898 } 1899 1900 static int psp_dtm_terminate(struct psp_context *psp) 1901 { 1902 int ret; 1903 1904 /* 1905 * TODO: bypass the terminate in sriov for now 1906 */ 1907 if (amdgpu_sriov_vf(psp->adev)) 1908 return 0; 1909 1910 if (!psp->dtm_context.context.initialized) 1911 return 0; 1912 1913 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1914 1915 psp->dtm_context.context.initialized = false; 1916 1917 return ret; 1918 } 1919 // DTM end 1920 1921 // RAP start 1922 static int psp_rap_initialize(struct psp_context *psp) 1923 { 1924 int ret; 1925 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1926 1927 /* 1928 * TODO: bypass the initialize in sriov for now 1929 */ 1930 if (amdgpu_sriov_vf(psp->adev)) 1931 return 0; 1932 1933 if (!psp->rap_context.context.bin_desc.size_bytes || 1934 !psp->rap_context.context.bin_desc.start_addr) { 1935 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1936 return 0; 1937 } 1938 1939 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 1940 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1941 1942 if (!psp->rap_context.context.mem_context.shared_buf) { 1943 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 1944 if (ret) 1945 return ret; 1946 } 1947 1948 ret = psp_ta_load(psp, &psp->rap_context.context); 1949 if (!ret) { 1950 psp->rap_context.context.initialized = true; 1951 mutex_init(&psp->rap_context.mutex); 1952 } else 1953 return ret; 1954 1955 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 1956 if (ret || status != TA_RAP_STATUS__SUCCESS) { 1957 psp_rap_terminate(psp); 1958 /* free rap shared memory */ 1959 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 1960 1961 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 1962 ret, status); 1963 1964 return ret; 1965 } 1966 1967 return 0; 1968 } 1969 1970 static int psp_rap_terminate(struct psp_context *psp) 1971 { 1972 int ret; 1973 1974 if (!psp->rap_context.context.initialized) 1975 return 0; 1976 1977 ret = psp_ta_unload(psp, &psp->rap_context.context); 1978 1979 psp->rap_context.context.initialized = false; 1980 1981 return ret; 1982 } 1983 1984 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 1985 { 1986 struct ta_rap_shared_memory *rap_cmd; 1987 int ret = 0; 1988 1989 if (!psp->rap_context.context.initialized) 1990 return 0; 1991 1992 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1993 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1994 return -EINVAL; 1995 1996 mutex_lock(&psp->rap_context.mutex); 1997 1998 rap_cmd = (struct ta_rap_shared_memory *) 1999 psp->rap_context.context.mem_context.shared_buf; 2000 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2001 2002 rap_cmd->cmd_id = ta_cmd_id; 2003 rap_cmd->validation_method_id = METHOD_A; 2004 2005 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2006 if (ret) 2007 goto out_unlock; 2008 2009 if (status) 2010 *status = rap_cmd->rap_status; 2011 2012 out_unlock: 2013 mutex_unlock(&psp->rap_context.mutex); 2014 2015 return ret; 2016 } 2017 // RAP end 2018 2019 /* securedisplay start */ 2020 static int psp_securedisplay_initialize(struct psp_context *psp) 2021 { 2022 int ret; 2023 struct ta_securedisplay_cmd *securedisplay_cmd; 2024 2025 /* 2026 * TODO: bypass the initialize in sriov for now 2027 */ 2028 if (amdgpu_sriov_vf(psp->adev)) 2029 return 0; 2030 2031 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2032 !psp->securedisplay_context.context.bin_desc.start_addr) { 2033 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2034 return 0; 2035 } 2036 2037 psp->securedisplay_context.context.mem_context.shared_mem_size = 2038 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2039 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2040 2041 if (!psp->securedisplay_context.context.initialized) { 2042 ret = psp_ta_init_shared_buf(psp, 2043 &psp->securedisplay_context.context.mem_context); 2044 if (ret) 2045 return ret; 2046 } 2047 2048 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2049 if (!ret) { 2050 psp->securedisplay_context.context.initialized = true; 2051 mutex_init(&psp->securedisplay_context.mutex); 2052 } else 2053 return ret; 2054 2055 mutex_lock(&psp->securedisplay_context.mutex); 2056 2057 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2058 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2059 2060 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2061 2062 mutex_unlock(&psp->securedisplay_context.mutex); 2063 2064 if (ret) { 2065 psp_securedisplay_terminate(psp); 2066 /* free securedisplay shared memory */ 2067 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2068 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2069 return -EINVAL; 2070 } 2071 2072 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2073 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2074 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2075 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2076 /* don't try again */ 2077 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2078 } 2079 2080 return 0; 2081 } 2082 2083 static int psp_securedisplay_terminate(struct psp_context *psp) 2084 { 2085 int ret; 2086 2087 /* 2088 * TODO:bypass the terminate in sriov for now 2089 */ 2090 if (amdgpu_sriov_vf(psp->adev)) 2091 return 0; 2092 2093 if (!psp->securedisplay_context.context.initialized) 2094 return 0; 2095 2096 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2097 2098 psp->securedisplay_context.context.initialized = false; 2099 2100 return ret; 2101 } 2102 2103 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2104 { 2105 int ret; 2106 2107 if (!psp->securedisplay_context.context.initialized) 2108 return -EINVAL; 2109 2110 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2111 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 2112 return -EINVAL; 2113 2114 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2115 2116 return ret; 2117 } 2118 /* SECUREDISPLAY end */ 2119 2120 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2121 { 2122 struct psp_context *psp = &adev->psp; 2123 int ret = 0; 2124 2125 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2126 ret = psp->funcs->wait_for_bootloader(psp); 2127 2128 return ret; 2129 } 2130 2131 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2132 { 2133 if (psp->funcs && 2134 psp->funcs->get_ras_capability) { 2135 return psp->funcs->get_ras_capability(psp); 2136 } else { 2137 return false; 2138 } 2139 } 2140 2141 static int psp_hw_start(struct psp_context *psp) 2142 { 2143 struct amdgpu_device *adev = psp->adev; 2144 int ret; 2145 2146 if (!amdgpu_sriov_vf(adev)) { 2147 if ((is_psp_fw_valid(psp->kdb)) && 2148 (psp->funcs->bootloader_load_kdb != NULL)) { 2149 ret = psp_bootloader_load_kdb(psp); 2150 if (ret) { 2151 dev_err(adev->dev, "PSP load kdb failed!\n"); 2152 return ret; 2153 } 2154 } 2155 2156 if ((is_psp_fw_valid(psp->spl)) && 2157 (psp->funcs->bootloader_load_spl != NULL)) { 2158 ret = psp_bootloader_load_spl(psp); 2159 if (ret) { 2160 dev_err(adev->dev, "PSP load spl failed!\n"); 2161 return ret; 2162 } 2163 } 2164 2165 if ((is_psp_fw_valid(psp->sys)) && 2166 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2167 ret = psp_bootloader_load_sysdrv(psp); 2168 if (ret) { 2169 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2170 return ret; 2171 } 2172 } 2173 2174 if ((is_psp_fw_valid(psp->soc_drv)) && 2175 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2176 ret = psp_bootloader_load_soc_drv(psp); 2177 if (ret) { 2178 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2179 return ret; 2180 } 2181 } 2182 2183 if ((is_psp_fw_valid(psp->intf_drv)) && 2184 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2185 ret = psp_bootloader_load_intf_drv(psp); 2186 if (ret) { 2187 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2188 return ret; 2189 } 2190 } 2191 2192 if ((is_psp_fw_valid(psp->dbg_drv)) && 2193 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2194 ret = psp_bootloader_load_dbg_drv(psp); 2195 if (ret) { 2196 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2197 return ret; 2198 } 2199 } 2200 2201 if ((is_psp_fw_valid(psp->ras_drv)) && 2202 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2203 ret = psp_bootloader_load_ras_drv(psp); 2204 if (ret) { 2205 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2206 return ret; 2207 } 2208 } 2209 2210 if ((is_psp_fw_valid(psp->sos)) && 2211 (psp->funcs->bootloader_load_sos != NULL)) { 2212 ret = psp_bootloader_load_sos(psp); 2213 if (ret) { 2214 dev_err(adev->dev, "PSP load sos failed!\n"); 2215 return ret; 2216 } 2217 } 2218 } 2219 2220 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2221 if (ret) { 2222 dev_err(adev->dev, "PSP create ring failed!\n"); 2223 return ret; 2224 } 2225 2226 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2227 goto skip_pin_bo; 2228 2229 if (!psp_boottime_tmr(psp)) { 2230 ret = psp_tmr_init(psp); 2231 if (ret) { 2232 dev_err(adev->dev, "PSP tmr init failed!\n"); 2233 return ret; 2234 } 2235 } 2236 2237 skip_pin_bo: 2238 /* 2239 * For ASICs with DF Cstate management centralized 2240 * to PMFW, TMR setup should be performed after PMFW 2241 * loaded and before other non-psp firmware loaded. 2242 */ 2243 if (psp->pmfw_centralized_cstate_management) { 2244 ret = psp_load_smu_fw(psp); 2245 if (ret) 2246 return ret; 2247 } 2248 2249 ret = psp_tmr_load(psp); 2250 if (ret) { 2251 dev_err(adev->dev, "PSP load tmr failed!\n"); 2252 return ret; 2253 } 2254 2255 return 0; 2256 } 2257 2258 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2259 enum psp_gfx_fw_type *type) 2260 { 2261 switch (ucode->ucode_id) { 2262 case AMDGPU_UCODE_ID_CAP: 2263 *type = GFX_FW_TYPE_CAP; 2264 break; 2265 case AMDGPU_UCODE_ID_SDMA0: 2266 *type = GFX_FW_TYPE_SDMA0; 2267 break; 2268 case AMDGPU_UCODE_ID_SDMA1: 2269 *type = GFX_FW_TYPE_SDMA1; 2270 break; 2271 case AMDGPU_UCODE_ID_SDMA2: 2272 *type = GFX_FW_TYPE_SDMA2; 2273 break; 2274 case AMDGPU_UCODE_ID_SDMA3: 2275 *type = GFX_FW_TYPE_SDMA3; 2276 break; 2277 case AMDGPU_UCODE_ID_SDMA4: 2278 *type = GFX_FW_TYPE_SDMA4; 2279 break; 2280 case AMDGPU_UCODE_ID_SDMA5: 2281 *type = GFX_FW_TYPE_SDMA5; 2282 break; 2283 case AMDGPU_UCODE_ID_SDMA6: 2284 *type = GFX_FW_TYPE_SDMA6; 2285 break; 2286 case AMDGPU_UCODE_ID_SDMA7: 2287 *type = GFX_FW_TYPE_SDMA7; 2288 break; 2289 case AMDGPU_UCODE_ID_CP_MES: 2290 *type = GFX_FW_TYPE_CP_MES; 2291 break; 2292 case AMDGPU_UCODE_ID_CP_MES_DATA: 2293 *type = GFX_FW_TYPE_MES_STACK; 2294 break; 2295 case AMDGPU_UCODE_ID_CP_MES1: 2296 *type = GFX_FW_TYPE_CP_MES_KIQ; 2297 break; 2298 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2299 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2300 break; 2301 case AMDGPU_UCODE_ID_CP_CE: 2302 *type = GFX_FW_TYPE_CP_CE; 2303 break; 2304 case AMDGPU_UCODE_ID_CP_PFP: 2305 *type = GFX_FW_TYPE_CP_PFP; 2306 break; 2307 case AMDGPU_UCODE_ID_CP_ME: 2308 *type = GFX_FW_TYPE_CP_ME; 2309 break; 2310 case AMDGPU_UCODE_ID_CP_MEC1: 2311 *type = GFX_FW_TYPE_CP_MEC; 2312 break; 2313 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2314 *type = GFX_FW_TYPE_CP_MEC_ME1; 2315 break; 2316 case AMDGPU_UCODE_ID_CP_MEC2: 2317 *type = GFX_FW_TYPE_CP_MEC; 2318 break; 2319 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2320 *type = GFX_FW_TYPE_CP_MEC_ME2; 2321 break; 2322 case AMDGPU_UCODE_ID_RLC_P: 2323 *type = GFX_FW_TYPE_RLC_P; 2324 break; 2325 case AMDGPU_UCODE_ID_RLC_V: 2326 *type = GFX_FW_TYPE_RLC_V; 2327 break; 2328 case AMDGPU_UCODE_ID_RLC_G: 2329 *type = GFX_FW_TYPE_RLC_G; 2330 break; 2331 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2332 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2333 break; 2334 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2335 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2336 break; 2337 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2338 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2339 break; 2340 case AMDGPU_UCODE_ID_RLC_IRAM: 2341 *type = GFX_FW_TYPE_RLC_IRAM; 2342 break; 2343 case AMDGPU_UCODE_ID_RLC_DRAM: 2344 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2345 break; 2346 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2347 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2348 break; 2349 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2350 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2351 break; 2352 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2353 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2354 break; 2355 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2356 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2357 break; 2358 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2359 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2360 break; 2361 case AMDGPU_UCODE_ID_SMC: 2362 *type = GFX_FW_TYPE_SMU; 2363 break; 2364 case AMDGPU_UCODE_ID_PPTABLE: 2365 *type = GFX_FW_TYPE_PPTABLE; 2366 break; 2367 case AMDGPU_UCODE_ID_UVD: 2368 *type = GFX_FW_TYPE_UVD; 2369 break; 2370 case AMDGPU_UCODE_ID_UVD1: 2371 *type = GFX_FW_TYPE_UVD1; 2372 break; 2373 case AMDGPU_UCODE_ID_VCE: 2374 *type = GFX_FW_TYPE_VCE; 2375 break; 2376 case AMDGPU_UCODE_ID_VCN: 2377 *type = GFX_FW_TYPE_VCN; 2378 break; 2379 case AMDGPU_UCODE_ID_VCN1: 2380 *type = GFX_FW_TYPE_VCN1; 2381 break; 2382 case AMDGPU_UCODE_ID_DMCU_ERAM: 2383 *type = GFX_FW_TYPE_DMCU_ERAM; 2384 break; 2385 case AMDGPU_UCODE_ID_DMCU_INTV: 2386 *type = GFX_FW_TYPE_DMCU_ISR; 2387 break; 2388 case AMDGPU_UCODE_ID_VCN0_RAM: 2389 *type = GFX_FW_TYPE_VCN0_RAM; 2390 break; 2391 case AMDGPU_UCODE_ID_VCN1_RAM: 2392 *type = GFX_FW_TYPE_VCN1_RAM; 2393 break; 2394 case AMDGPU_UCODE_ID_DMCUB: 2395 *type = GFX_FW_TYPE_DMUB; 2396 break; 2397 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2398 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2399 break; 2400 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2401 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2402 break; 2403 case AMDGPU_UCODE_ID_IMU_I: 2404 *type = GFX_FW_TYPE_IMU_I; 2405 break; 2406 case AMDGPU_UCODE_ID_IMU_D: 2407 *type = GFX_FW_TYPE_IMU_D; 2408 break; 2409 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2410 *type = GFX_FW_TYPE_RS64_PFP; 2411 break; 2412 case AMDGPU_UCODE_ID_CP_RS64_ME: 2413 *type = GFX_FW_TYPE_RS64_ME; 2414 break; 2415 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2416 *type = GFX_FW_TYPE_RS64_MEC; 2417 break; 2418 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2419 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2420 break; 2421 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2422 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2423 break; 2424 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2425 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2426 break; 2427 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2428 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2429 break; 2430 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2431 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2432 break; 2433 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2434 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2435 break; 2436 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2437 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2438 break; 2439 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2440 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2441 break; 2442 case AMDGPU_UCODE_ID_VPE_CTX: 2443 *type = GFX_FW_TYPE_VPEC_FW1; 2444 break; 2445 case AMDGPU_UCODE_ID_VPE_CTL: 2446 *type = GFX_FW_TYPE_VPEC_FW2; 2447 break; 2448 case AMDGPU_UCODE_ID_VPE: 2449 *type = GFX_FW_TYPE_VPE; 2450 break; 2451 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2452 *type = GFX_FW_TYPE_UMSCH_UCODE; 2453 break; 2454 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2455 *type = GFX_FW_TYPE_UMSCH_DATA; 2456 break; 2457 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2458 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2459 break; 2460 case AMDGPU_UCODE_ID_P2S_TABLE: 2461 *type = GFX_FW_TYPE_P2S_TABLE; 2462 break; 2463 case AMDGPU_UCODE_ID_MAXIMUM: 2464 default: 2465 return -EINVAL; 2466 } 2467 2468 return 0; 2469 } 2470 2471 static void psp_print_fw_hdr(struct psp_context *psp, 2472 struct amdgpu_firmware_info *ucode) 2473 { 2474 struct amdgpu_device *adev = psp->adev; 2475 struct common_firmware_header *hdr; 2476 2477 switch (ucode->ucode_id) { 2478 case AMDGPU_UCODE_ID_SDMA0: 2479 case AMDGPU_UCODE_ID_SDMA1: 2480 case AMDGPU_UCODE_ID_SDMA2: 2481 case AMDGPU_UCODE_ID_SDMA3: 2482 case AMDGPU_UCODE_ID_SDMA4: 2483 case AMDGPU_UCODE_ID_SDMA5: 2484 case AMDGPU_UCODE_ID_SDMA6: 2485 case AMDGPU_UCODE_ID_SDMA7: 2486 hdr = (struct common_firmware_header *) 2487 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2488 amdgpu_ucode_print_sdma_hdr(hdr); 2489 break; 2490 case AMDGPU_UCODE_ID_CP_CE: 2491 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2492 amdgpu_ucode_print_gfx_hdr(hdr); 2493 break; 2494 case AMDGPU_UCODE_ID_CP_PFP: 2495 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2496 amdgpu_ucode_print_gfx_hdr(hdr); 2497 break; 2498 case AMDGPU_UCODE_ID_CP_ME: 2499 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2500 amdgpu_ucode_print_gfx_hdr(hdr); 2501 break; 2502 case AMDGPU_UCODE_ID_CP_MEC1: 2503 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2504 amdgpu_ucode_print_gfx_hdr(hdr); 2505 break; 2506 case AMDGPU_UCODE_ID_RLC_G: 2507 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2508 amdgpu_ucode_print_rlc_hdr(hdr); 2509 break; 2510 case AMDGPU_UCODE_ID_SMC: 2511 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2512 amdgpu_ucode_print_smc_hdr(hdr); 2513 break; 2514 default: 2515 break; 2516 } 2517 } 2518 2519 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2520 struct amdgpu_firmware_info *ucode, 2521 struct psp_gfx_cmd_resp *cmd) 2522 { 2523 int ret; 2524 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2525 2526 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2527 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2528 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2529 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2530 2531 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2532 if (ret) 2533 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2534 2535 return ret; 2536 } 2537 2538 int psp_execute_ip_fw_load(struct psp_context *psp, 2539 struct amdgpu_firmware_info *ucode) 2540 { 2541 int ret = 0; 2542 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2543 2544 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2545 if (!ret) { 2546 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2547 psp->fence_buf_mc_addr); 2548 } 2549 2550 release_psp_cmd_buf(psp); 2551 2552 return ret; 2553 } 2554 2555 static int psp_load_p2s_table(struct psp_context *psp) 2556 { 2557 int ret; 2558 struct amdgpu_device *adev = psp->adev; 2559 struct amdgpu_firmware_info *ucode = 2560 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2561 2562 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2563 return 0; 2564 2565 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 2566 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2567 0x0036003C; 2568 if (psp->sos.fw_version < supp_vers) 2569 return 0; 2570 } 2571 2572 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2573 return 0; 2574 2575 ret = psp_execute_ip_fw_load(psp, ucode); 2576 2577 return ret; 2578 } 2579 2580 static int psp_load_smu_fw(struct psp_context *psp) 2581 { 2582 int ret; 2583 struct amdgpu_device *adev = psp->adev; 2584 struct amdgpu_firmware_info *ucode = 2585 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2586 struct amdgpu_ras *ras = psp->ras_context.ras; 2587 2588 /* 2589 * Skip SMU FW reloading in case of using BACO for runpm only, 2590 * as SMU is always alive. 2591 */ 2592 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2593 return 0; 2594 2595 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2596 return 0; 2597 2598 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2599 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2600 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2601 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2602 if (ret) 2603 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2604 } 2605 2606 ret = psp_execute_ip_fw_load(psp, ucode); 2607 2608 if (ret) 2609 dev_err(adev->dev, "PSP load smu failed!\n"); 2610 2611 return ret; 2612 } 2613 2614 static bool fw_load_skip_check(struct psp_context *psp, 2615 struct amdgpu_firmware_info *ucode) 2616 { 2617 if (!ucode->fw || !ucode->ucode_size) 2618 return true; 2619 2620 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2621 return true; 2622 2623 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2624 (psp_smu_reload_quirk(psp) || 2625 psp->autoload_supported || 2626 psp->pmfw_centralized_cstate_management)) 2627 return true; 2628 2629 if (amdgpu_sriov_vf(psp->adev) && 2630 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2631 return true; 2632 2633 if (psp->autoload_supported && 2634 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2635 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2636 /* skip mec JT when autoload is enabled */ 2637 return true; 2638 2639 return false; 2640 } 2641 2642 int psp_load_fw_list(struct psp_context *psp, 2643 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2644 { 2645 int ret = 0, i; 2646 struct amdgpu_firmware_info *ucode; 2647 2648 for (i = 0; i < ucode_count; ++i) { 2649 ucode = ucode_list[i]; 2650 psp_print_fw_hdr(psp, ucode); 2651 ret = psp_execute_ip_fw_load(psp, ucode); 2652 if (ret) 2653 return ret; 2654 } 2655 return ret; 2656 } 2657 2658 static int psp_load_non_psp_fw(struct psp_context *psp) 2659 { 2660 int i, ret; 2661 struct amdgpu_firmware_info *ucode; 2662 struct amdgpu_device *adev = psp->adev; 2663 2664 if (psp->autoload_supported && 2665 !psp->pmfw_centralized_cstate_management) { 2666 ret = psp_load_smu_fw(psp); 2667 if (ret) 2668 return ret; 2669 } 2670 2671 /* Load P2S table first if it's available */ 2672 psp_load_p2s_table(psp); 2673 2674 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2675 ucode = &adev->firmware.ucode[i]; 2676 2677 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2678 !fw_load_skip_check(psp, ucode)) { 2679 ret = psp_load_smu_fw(psp); 2680 if (ret) 2681 return ret; 2682 continue; 2683 } 2684 2685 if (fw_load_skip_check(psp, ucode)) 2686 continue; 2687 2688 if (psp->autoload_supported && 2689 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2690 IP_VERSION(11, 0, 7) || 2691 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2692 IP_VERSION(11, 0, 11) || 2693 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2694 IP_VERSION(11, 0, 12)) && 2695 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2696 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2697 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2698 /* PSP only receive one SDMA fw for sienna_cichlid, 2699 * as all four sdma fw are same 2700 */ 2701 continue; 2702 2703 psp_print_fw_hdr(psp, ucode); 2704 2705 ret = psp_execute_ip_fw_load(psp, ucode); 2706 if (ret) 2707 return ret; 2708 2709 /* Start rlc autoload after psp recieved all the gfx firmware */ 2710 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2711 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2712 ret = psp_rlc_autoload_start(psp); 2713 if (ret) { 2714 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2715 return ret; 2716 } 2717 } 2718 } 2719 2720 return 0; 2721 } 2722 2723 static int psp_load_fw(struct amdgpu_device *adev) 2724 { 2725 int ret; 2726 struct psp_context *psp = &adev->psp; 2727 2728 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2729 /* should not destroy ring, only stop */ 2730 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2731 } else { 2732 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2733 2734 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2735 if (ret) { 2736 dev_err(adev->dev, "PSP ring init failed!\n"); 2737 goto failed; 2738 } 2739 } 2740 2741 ret = psp_hw_start(psp); 2742 if (ret) 2743 goto failed; 2744 2745 ret = psp_load_non_psp_fw(psp); 2746 if (ret) 2747 goto failed1; 2748 2749 ret = psp_asd_initialize(psp); 2750 if (ret) { 2751 dev_err(adev->dev, "PSP load asd failed!\n"); 2752 goto failed1; 2753 } 2754 2755 ret = psp_rl_load(adev); 2756 if (ret) { 2757 dev_err(adev->dev, "PSP load RL failed!\n"); 2758 goto failed1; 2759 } 2760 2761 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2762 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2763 ret = psp_xgmi_initialize(psp, false, true); 2764 /* Warning the XGMI seesion initialize failure 2765 * Instead of stop driver initialization 2766 */ 2767 if (ret) 2768 dev_err(psp->adev->dev, 2769 "XGMI: Failed to initialize XGMI session\n"); 2770 } 2771 } 2772 2773 if (psp->ta_fw) { 2774 ret = psp_ras_initialize(psp); 2775 if (ret) 2776 dev_err(psp->adev->dev, 2777 "RAS: Failed to initialize RAS\n"); 2778 2779 ret = psp_hdcp_initialize(psp); 2780 if (ret) 2781 dev_err(psp->adev->dev, 2782 "HDCP: Failed to initialize HDCP\n"); 2783 2784 ret = psp_dtm_initialize(psp); 2785 if (ret) 2786 dev_err(psp->adev->dev, 2787 "DTM: Failed to initialize DTM\n"); 2788 2789 ret = psp_rap_initialize(psp); 2790 if (ret) 2791 dev_err(psp->adev->dev, 2792 "RAP: Failed to initialize RAP\n"); 2793 2794 ret = psp_securedisplay_initialize(psp); 2795 if (ret) 2796 dev_err(psp->adev->dev, 2797 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2798 } 2799 2800 return 0; 2801 2802 failed1: 2803 psp_free_shared_bufs(psp); 2804 failed: 2805 /* 2806 * all cleanup jobs (xgmi terminate, ras terminate, 2807 * ring destroy, cmd/fence/fw buffers destory, 2808 * psp->cmd destory) are delayed to psp_hw_fini 2809 */ 2810 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2811 return ret; 2812 } 2813 2814 static int psp_hw_init(void *handle) 2815 { 2816 int ret; 2817 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2818 2819 mutex_lock(&adev->firmware.mutex); 2820 /* 2821 * This sequence is just used on hw_init only once, no need on 2822 * resume. 2823 */ 2824 ret = amdgpu_ucode_init_bo(adev); 2825 if (ret) 2826 goto failed; 2827 2828 ret = psp_load_fw(adev); 2829 if (ret) { 2830 dev_err(adev->dev, "PSP firmware loading failed\n"); 2831 goto failed; 2832 } 2833 2834 mutex_unlock(&adev->firmware.mutex); 2835 return 0; 2836 2837 failed: 2838 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2839 mutex_unlock(&adev->firmware.mutex); 2840 return -EINVAL; 2841 } 2842 2843 static int psp_hw_fini(void *handle) 2844 { 2845 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2846 struct psp_context *psp = &adev->psp; 2847 2848 if (psp->ta_fw) { 2849 psp_ras_terminate(psp); 2850 psp_securedisplay_terminate(psp); 2851 psp_rap_terminate(psp); 2852 psp_dtm_terminate(psp); 2853 psp_hdcp_terminate(psp); 2854 2855 if (adev->gmc.xgmi.num_physical_nodes > 1) 2856 psp_xgmi_terminate(psp); 2857 } 2858 2859 psp_asd_terminate(psp); 2860 psp_tmr_terminate(psp); 2861 2862 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2863 2864 return 0; 2865 } 2866 2867 static int psp_suspend(void *handle) 2868 { 2869 int ret = 0; 2870 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2871 struct psp_context *psp = &adev->psp; 2872 2873 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2874 psp->xgmi_context.context.initialized) { 2875 ret = psp_xgmi_terminate(psp); 2876 if (ret) { 2877 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 2878 goto out; 2879 } 2880 } 2881 2882 if (psp->ta_fw) { 2883 ret = psp_ras_terminate(psp); 2884 if (ret) { 2885 dev_err(adev->dev, "Failed to terminate ras ta\n"); 2886 goto out; 2887 } 2888 ret = psp_hdcp_terminate(psp); 2889 if (ret) { 2890 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 2891 goto out; 2892 } 2893 ret = psp_dtm_terminate(psp); 2894 if (ret) { 2895 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 2896 goto out; 2897 } 2898 ret = psp_rap_terminate(psp); 2899 if (ret) { 2900 dev_err(adev->dev, "Failed to terminate rap ta\n"); 2901 goto out; 2902 } 2903 ret = psp_securedisplay_terminate(psp); 2904 if (ret) { 2905 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 2906 goto out; 2907 } 2908 } 2909 2910 ret = psp_asd_terminate(psp); 2911 if (ret) { 2912 dev_err(adev->dev, "Failed to terminate asd\n"); 2913 goto out; 2914 } 2915 2916 ret = psp_tmr_terminate(psp); 2917 if (ret) { 2918 dev_err(adev->dev, "Failed to terminate tmr\n"); 2919 goto out; 2920 } 2921 2922 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2923 if (ret) 2924 dev_err(adev->dev, "PSP ring stop failed\n"); 2925 2926 out: 2927 return ret; 2928 } 2929 2930 static int psp_resume(void *handle) 2931 { 2932 int ret; 2933 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2934 struct psp_context *psp = &adev->psp; 2935 2936 dev_info(adev->dev, "PSP is resuming...\n"); 2937 2938 if (psp->mem_train_ctx.enable_mem_training) { 2939 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2940 if (ret) { 2941 dev_err(adev->dev, "Failed to process memory training!\n"); 2942 return ret; 2943 } 2944 } 2945 2946 mutex_lock(&adev->firmware.mutex); 2947 2948 ret = psp_hw_start(psp); 2949 if (ret) 2950 goto failed; 2951 2952 ret = psp_load_non_psp_fw(psp); 2953 if (ret) 2954 goto failed; 2955 2956 ret = psp_asd_initialize(psp); 2957 if (ret) { 2958 dev_err(adev->dev, "PSP load asd failed!\n"); 2959 goto failed; 2960 } 2961 2962 ret = psp_rl_load(adev); 2963 if (ret) { 2964 dev_err(adev->dev, "PSP load RL failed!\n"); 2965 goto failed; 2966 } 2967 2968 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2969 ret = psp_xgmi_initialize(psp, false, true); 2970 /* Warning the XGMI seesion initialize failure 2971 * Instead of stop driver initialization 2972 */ 2973 if (ret) 2974 dev_err(psp->adev->dev, 2975 "XGMI: Failed to initialize XGMI session\n"); 2976 } 2977 2978 if (psp->ta_fw) { 2979 ret = psp_ras_initialize(psp); 2980 if (ret) 2981 dev_err(psp->adev->dev, 2982 "RAS: Failed to initialize RAS\n"); 2983 2984 ret = psp_hdcp_initialize(psp); 2985 if (ret) 2986 dev_err(psp->adev->dev, 2987 "HDCP: Failed to initialize HDCP\n"); 2988 2989 ret = psp_dtm_initialize(psp); 2990 if (ret) 2991 dev_err(psp->adev->dev, 2992 "DTM: Failed to initialize DTM\n"); 2993 2994 ret = psp_rap_initialize(psp); 2995 if (ret) 2996 dev_err(psp->adev->dev, 2997 "RAP: Failed to initialize RAP\n"); 2998 2999 ret = psp_securedisplay_initialize(psp); 3000 if (ret) 3001 dev_err(psp->adev->dev, 3002 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3003 } 3004 3005 mutex_unlock(&adev->firmware.mutex); 3006 3007 return 0; 3008 3009 failed: 3010 dev_err(adev->dev, "PSP resume failed\n"); 3011 mutex_unlock(&adev->firmware.mutex); 3012 return ret; 3013 } 3014 3015 int psp_gpu_reset(struct amdgpu_device *adev) 3016 { 3017 int ret; 3018 3019 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3020 return 0; 3021 3022 mutex_lock(&adev->psp.mutex); 3023 ret = psp_mode1_reset(&adev->psp); 3024 mutex_unlock(&adev->psp.mutex); 3025 3026 return ret; 3027 } 3028 3029 int psp_rlc_autoload_start(struct psp_context *psp) 3030 { 3031 int ret; 3032 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3033 3034 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3035 3036 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3037 psp->fence_buf_mc_addr); 3038 3039 release_psp_cmd_buf(psp); 3040 3041 return ret; 3042 } 3043 3044 int psp_ring_cmd_submit(struct psp_context *psp, 3045 uint64_t cmd_buf_mc_addr, 3046 uint64_t fence_mc_addr, 3047 int index) 3048 { 3049 unsigned int psp_write_ptr_reg = 0; 3050 struct psp_gfx_rb_frame *write_frame; 3051 struct psp_ring *ring = &psp->km_ring; 3052 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3053 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3054 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3055 struct amdgpu_device *adev = psp->adev; 3056 uint32_t ring_size_dw = ring->ring_size / 4; 3057 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3058 3059 /* KM (GPCOM) prepare write pointer */ 3060 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3061 3062 /* Update KM RB frame pointer to new frame */ 3063 /* write_frame ptr increments by size of rb_frame in bytes */ 3064 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3065 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3066 write_frame = ring_buffer_start; 3067 else 3068 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3069 /* Check invalid write_frame ptr address */ 3070 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3071 dev_err(adev->dev, 3072 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3073 ring_buffer_start, ring_buffer_end, write_frame); 3074 dev_err(adev->dev, 3075 "write_frame is pointing to address out of bounds\n"); 3076 return -EINVAL; 3077 } 3078 3079 /* Initialize KM RB frame */ 3080 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3081 3082 /* Update KM RB frame */ 3083 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3084 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3085 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3086 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3087 write_frame->fence_value = index; 3088 amdgpu_device_flush_hdp(adev, NULL); 3089 3090 /* Update the write Pointer in DWORDs */ 3091 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3092 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3093 return 0; 3094 } 3095 3096 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3097 { 3098 struct amdgpu_device *adev = psp->adev; 3099 char fw_name[PSP_FW_NAME_LEN]; 3100 const struct psp_firmware_header_v1_0 *asd_hdr; 3101 int err = 0; 3102 3103 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 3104 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 3105 if (err) 3106 goto out; 3107 3108 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3109 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3110 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3111 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3112 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3113 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3114 return 0; 3115 out: 3116 amdgpu_ucode_release(&adev->psp.asd_fw); 3117 return err; 3118 } 3119 3120 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3121 { 3122 struct amdgpu_device *adev = psp->adev; 3123 char fw_name[PSP_FW_NAME_LEN]; 3124 const struct psp_firmware_header_v1_0 *toc_hdr; 3125 int err = 0; 3126 3127 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 3128 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 3129 if (err) 3130 goto out; 3131 3132 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3133 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3134 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3135 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3136 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3137 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3138 return 0; 3139 out: 3140 amdgpu_ucode_release(&adev->psp.toc_fw); 3141 return err; 3142 } 3143 3144 static int parse_sos_bin_descriptor(struct psp_context *psp, 3145 const struct psp_fw_bin_desc *desc, 3146 const struct psp_firmware_header_v2_0 *sos_hdr) 3147 { 3148 uint8_t *ucode_start_addr = NULL; 3149 3150 if (!psp || !desc || !sos_hdr) 3151 return -EINVAL; 3152 3153 ucode_start_addr = (uint8_t *)sos_hdr + 3154 le32_to_cpu(desc->offset_bytes) + 3155 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3156 3157 switch (desc->fw_type) { 3158 case PSP_FW_TYPE_PSP_SOS: 3159 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3160 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3161 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3162 psp->sos.start_addr = ucode_start_addr; 3163 break; 3164 case PSP_FW_TYPE_PSP_SYS_DRV: 3165 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3166 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3167 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3168 psp->sys.start_addr = ucode_start_addr; 3169 break; 3170 case PSP_FW_TYPE_PSP_KDB: 3171 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3172 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3173 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3174 psp->kdb.start_addr = ucode_start_addr; 3175 break; 3176 case PSP_FW_TYPE_PSP_TOC: 3177 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3178 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3179 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3180 psp->toc.start_addr = ucode_start_addr; 3181 break; 3182 case PSP_FW_TYPE_PSP_SPL: 3183 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3184 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3185 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3186 psp->spl.start_addr = ucode_start_addr; 3187 break; 3188 case PSP_FW_TYPE_PSP_RL: 3189 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3190 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3191 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3192 psp->rl.start_addr = ucode_start_addr; 3193 break; 3194 case PSP_FW_TYPE_PSP_SOC_DRV: 3195 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3196 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3197 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3198 psp->soc_drv.start_addr = ucode_start_addr; 3199 break; 3200 case PSP_FW_TYPE_PSP_INTF_DRV: 3201 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3202 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3203 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3204 psp->intf_drv.start_addr = ucode_start_addr; 3205 break; 3206 case PSP_FW_TYPE_PSP_DBG_DRV: 3207 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3208 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3209 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3210 psp->dbg_drv.start_addr = ucode_start_addr; 3211 break; 3212 case PSP_FW_TYPE_PSP_RAS_DRV: 3213 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3214 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3215 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3216 psp->ras_drv.start_addr = ucode_start_addr; 3217 break; 3218 default: 3219 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3220 break; 3221 } 3222 3223 return 0; 3224 } 3225 3226 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3227 { 3228 const struct psp_firmware_header_v1_0 *sos_hdr; 3229 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3230 uint8_t *ucode_array_start_addr; 3231 3232 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3233 ucode_array_start_addr = (uint8_t *)sos_hdr + 3234 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3235 3236 if (adev->gmc.xgmi.connected_to_cpu || 3237 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3238 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3239 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3240 3241 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3242 adev->psp.sys.start_addr = ucode_array_start_addr; 3243 3244 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3245 adev->psp.sos.start_addr = ucode_array_start_addr + 3246 le32_to_cpu(sos_hdr->sos.offset_bytes); 3247 } else { 3248 /* Load alternate PSP SOS FW */ 3249 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3250 3251 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3252 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3253 3254 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3255 adev->psp.sys.start_addr = ucode_array_start_addr + 3256 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3257 3258 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3259 adev->psp.sos.start_addr = ucode_array_start_addr + 3260 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3261 } 3262 3263 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3264 dev_warn(adev->dev, "PSP SOS FW not available"); 3265 return -EINVAL; 3266 } 3267 3268 return 0; 3269 } 3270 3271 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3272 { 3273 struct amdgpu_device *adev = psp->adev; 3274 char fw_name[PSP_FW_NAME_LEN]; 3275 const struct psp_firmware_header_v1_0 *sos_hdr; 3276 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3277 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3278 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3279 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3280 int err = 0; 3281 uint8_t *ucode_array_start_addr; 3282 int fw_index = 0; 3283 3284 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3285 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3286 if (err) 3287 goto out; 3288 3289 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3290 ucode_array_start_addr = (uint8_t *)sos_hdr + 3291 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3292 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3293 3294 switch (sos_hdr->header.header_version_major) { 3295 case 1: 3296 err = psp_init_sos_base_fw(adev); 3297 if (err) 3298 goto out; 3299 3300 if (sos_hdr->header.header_version_minor == 1) { 3301 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3302 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3303 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3304 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3305 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3306 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3307 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3308 } 3309 if (sos_hdr->header.header_version_minor == 2) { 3310 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3311 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3312 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3313 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3314 } 3315 if (sos_hdr->header.header_version_minor == 3) { 3316 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3317 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3318 adev->psp.toc.start_addr = ucode_array_start_addr + 3319 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3320 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3321 adev->psp.kdb.start_addr = ucode_array_start_addr + 3322 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3323 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3324 adev->psp.spl.start_addr = ucode_array_start_addr + 3325 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3326 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3327 adev->psp.rl.start_addr = ucode_array_start_addr + 3328 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3329 } 3330 break; 3331 case 2: 3332 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3333 3334 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3335 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3336 err = -EINVAL; 3337 goto out; 3338 } 3339 3340 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3341 err = parse_sos_bin_descriptor(psp, 3342 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3343 sos_hdr_v2_0); 3344 if (err) 3345 goto out; 3346 } 3347 break; 3348 default: 3349 dev_err(adev->dev, 3350 "unsupported psp sos firmware\n"); 3351 err = -EINVAL; 3352 goto out; 3353 } 3354 3355 return 0; 3356 out: 3357 amdgpu_ucode_release(&adev->psp.sos_fw); 3358 3359 return err; 3360 } 3361 3362 static int parse_ta_bin_descriptor(struct psp_context *psp, 3363 const struct psp_fw_bin_desc *desc, 3364 const struct ta_firmware_header_v2_0 *ta_hdr) 3365 { 3366 uint8_t *ucode_start_addr = NULL; 3367 3368 if (!psp || !desc || !ta_hdr) 3369 return -EINVAL; 3370 3371 ucode_start_addr = (uint8_t *)ta_hdr + 3372 le32_to_cpu(desc->offset_bytes) + 3373 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3374 3375 switch (desc->fw_type) { 3376 case TA_FW_TYPE_PSP_ASD: 3377 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3378 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3379 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3380 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3381 break; 3382 case TA_FW_TYPE_PSP_XGMI: 3383 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3384 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3385 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3386 break; 3387 case TA_FW_TYPE_PSP_RAS: 3388 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3389 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3390 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3391 break; 3392 case TA_FW_TYPE_PSP_HDCP: 3393 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3394 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3395 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3396 break; 3397 case TA_FW_TYPE_PSP_DTM: 3398 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3399 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3400 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3401 break; 3402 case TA_FW_TYPE_PSP_RAP: 3403 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3404 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3405 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3406 break; 3407 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3408 psp->securedisplay_context.context.bin_desc.fw_version = 3409 le32_to_cpu(desc->fw_version); 3410 psp->securedisplay_context.context.bin_desc.size_bytes = 3411 le32_to_cpu(desc->size_bytes); 3412 psp->securedisplay_context.context.bin_desc.start_addr = 3413 ucode_start_addr; 3414 break; 3415 default: 3416 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3417 break; 3418 } 3419 3420 return 0; 3421 } 3422 3423 static int parse_ta_v1_microcode(struct psp_context *psp) 3424 { 3425 const struct ta_firmware_header_v1_0 *ta_hdr; 3426 struct amdgpu_device *adev = psp->adev; 3427 3428 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3429 3430 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3431 return -EINVAL; 3432 3433 adev->psp.xgmi_context.context.bin_desc.fw_version = 3434 le32_to_cpu(ta_hdr->xgmi.fw_version); 3435 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3436 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3437 adev->psp.xgmi_context.context.bin_desc.start_addr = 3438 (uint8_t *)ta_hdr + 3439 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3440 3441 adev->psp.ras_context.context.bin_desc.fw_version = 3442 le32_to_cpu(ta_hdr->ras.fw_version); 3443 adev->psp.ras_context.context.bin_desc.size_bytes = 3444 le32_to_cpu(ta_hdr->ras.size_bytes); 3445 adev->psp.ras_context.context.bin_desc.start_addr = 3446 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3447 le32_to_cpu(ta_hdr->ras.offset_bytes); 3448 3449 adev->psp.hdcp_context.context.bin_desc.fw_version = 3450 le32_to_cpu(ta_hdr->hdcp.fw_version); 3451 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3452 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3453 adev->psp.hdcp_context.context.bin_desc.start_addr = 3454 (uint8_t *)ta_hdr + 3455 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3456 3457 adev->psp.dtm_context.context.bin_desc.fw_version = 3458 le32_to_cpu(ta_hdr->dtm.fw_version); 3459 adev->psp.dtm_context.context.bin_desc.size_bytes = 3460 le32_to_cpu(ta_hdr->dtm.size_bytes); 3461 adev->psp.dtm_context.context.bin_desc.start_addr = 3462 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3463 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3464 3465 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3466 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3467 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3468 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3469 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3470 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3471 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3472 3473 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3474 3475 return 0; 3476 } 3477 3478 static int parse_ta_v2_microcode(struct psp_context *psp) 3479 { 3480 const struct ta_firmware_header_v2_0 *ta_hdr; 3481 struct amdgpu_device *adev = psp->adev; 3482 int err = 0; 3483 int ta_index = 0; 3484 3485 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3486 3487 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3488 return -EINVAL; 3489 3490 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3491 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3492 return -EINVAL; 3493 } 3494 3495 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3496 err = parse_ta_bin_descriptor(psp, 3497 &ta_hdr->ta_fw_bin[ta_index], 3498 ta_hdr); 3499 if (err) 3500 return err; 3501 } 3502 3503 return 0; 3504 } 3505 3506 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3507 { 3508 const struct common_firmware_header *hdr; 3509 struct amdgpu_device *adev = psp->adev; 3510 char fw_name[PSP_FW_NAME_LEN]; 3511 int err; 3512 3513 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3514 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3515 if (err) 3516 return err; 3517 3518 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3519 switch (le16_to_cpu(hdr->header_version_major)) { 3520 case 1: 3521 err = parse_ta_v1_microcode(psp); 3522 break; 3523 case 2: 3524 err = parse_ta_v2_microcode(psp); 3525 break; 3526 default: 3527 dev_err(adev->dev, "unsupported TA header version\n"); 3528 err = -EINVAL; 3529 } 3530 3531 if (err) 3532 amdgpu_ucode_release(&adev->psp.ta_fw); 3533 3534 return err; 3535 } 3536 3537 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3538 { 3539 struct amdgpu_device *adev = psp->adev; 3540 char fw_name[PSP_FW_NAME_LEN]; 3541 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3542 struct amdgpu_firmware_info *info = NULL; 3543 int err = 0; 3544 3545 if (!amdgpu_sriov_vf(adev)) { 3546 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3547 return -EINVAL; 3548 } 3549 3550 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3551 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3552 if (err) { 3553 if (err == -ENODEV) { 3554 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3555 err = 0; 3556 goto out; 3557 } 3558 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3559 } 3560 3561 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3562 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3563 info->fw = adev->psp.cap_fw; 3564 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3565 adev->psp.cap_fw->data; 3566 adev->firmware.fw_size += ALIGN( 3567 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3568 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3569 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3570 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3571 3572 return 0; 3573 3574 out: 3575 amdgpu_ucode_release(&adev->psp.cap_fw); 3576 return err; 3577 } 3578 3579 static int psp_set_clockgating_state(void *handle, 3580 enum amd_clockgating_state state) 3581 { 3582 return 0; 3583 } 3584 3585 static int psp_set_powergating_state(void *handle, 3586 enum amd_powergating_state state) 3587 { 3588 return 0; 3589 } 3590 3591 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3592 struct device_attribute *attr, 3593 char *buf) 3594 { 3595 struct drm_device *ddev = dev_get_drvdata(dev); 3596 struct amdgpu_device *adev = drm_to_adev(ddev); 3597 uint32_t fw_ver; 3598 int ret; 3599 3600 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3601 dev_info(adev->dev, "PSP block is not ready yet\n."); 3602 return -EBUSY; 3603 } 3604 3605 mutex_lock(&adev->psp.mutex); 3606 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3607 mutex_unlock(&adev->psp.mutex); 3608 3609 if (ret) { 3610 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3611 return ret; 3612 } 3613 3614 return sysfs_emit(buf, "%x\n", fw_ver); 3615 } 3616 3617 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3618 struct device_attribute *attr, 3619 const char *buf, 3620 size_t count) 3621 { 3622 struct drm_device *ddev = dev_get_drvdata(dev); 3623 struct amdgpu_device *adev = drm_to_adev(ddev); 3624 int ret, idx; 3625 char fw_name[100]; 3626 const struct firmware *usbc_pd_fw; 3627 struct amdgpu_bo *fw_buf_bo = NULL; 3628 uint64_t fw_pri_mc_addr; 3629 void *fw_pri_cpu_addr; 3630 3631 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3632 dev_err(adev->dev, "PSP block is not ready yet."); 3633 return -EBUSY; 3634 } 3635 3636 if (!drm_dev_enter(ddev, &idx)) 3637 return -ENODEV; 3638 3639 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3640 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3641 if (ret) 3642 goto fail; 3643 3644 /* LFB address which is aligned to 1MB boundary per PSP request */ 3645 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3646 AMDGPU_GEM_DOMAIN_VRAM | 3647 AMDGPU_GEM_DOMAIN_GTT, 3648 &fw_buf_bo, &fw_pri_mc_addr, 3649 &fw_pri_cpu_addr); 3650 if (ret) 3651 goto rel_buf; 3652 3653 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3654 3655 mutex_lock(&adev->psp.mutex); 3656 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3657 mutex_unlock(&adev->psp.mutex); 3658 3659 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3660 3661 rel_buf: 3662 release_firmware(usbc_pd_fw); 3663 fail: 3664 if (ret) { 3665 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3666 count = ret; 3667 } 3668 3669 drm_dev_exit(idx); 3670 return count; 3671 } 3672 3673 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3674 { 3675 int idx; 3676 3677 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3678 return; 3679 3680 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3681 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3682 3683 drm_dev_exit(idx); 3684 } 3685 3686 /** 3687 * DOC: usbc_pd_fw 3688 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3689 * this file will trigger the update process. 3690 */ 3691 static DEVICE_ATTR(usbc_pd_fw, 0644, 3692 psp_usbc_pd_fw_sysfs_read, 3693 psp_usbc_pd_fw_sysfs_write); 3694 3695 int is_psp_fw_valid(struct psp_bin_desc bin) 3696 { 3697 return bin.size_bytes; 3698 } 3699 3700 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3701 struct bin_attribute *bin_attr, 3702 char *buffer, loff_t pos, size_t count) 3703 { 3704 struct device *dev = kobj_to_dev(kobj); 3705 struct drm_device *ddev = dev_get_drvdata(dev); 3706 struct amdgpu_device *adev = drm_to_adev(ddev); 3707 3708 adev->psp.vbflash_done = false; 3709 3710 /* Safeguard against memory drain */ 3711 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3712 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 3713 kvfree(adev->psp.vbflash_tmp_buf); 3714 adev->psp.vbflash_tmp_buf = NULL; 3715 adev->psp.vbflash_image_size = 0; 3716 return -ENOMEM; 3717 } 3718 3719 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3720 if (!adev->psp.vbflash_tmp_buf) { 3721 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3722 if (!adev->psp.vbflash_tmp_buf) 3723 return -ENOMEM; 3724 } 3725 3726 mutex_lock(&adev->psp.mutex); 3727 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3728 adev->psp.vbflash_image_size += count; 3729 mutex_unlock(&adev->psp.mutex); 3730 3731 dev_dbg(adev->dev, "IFWI staged for update\n"); 3732 3733 return count; 3734 } 3735 3736 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3737 struct bin_attribute *bin_attr, char *buffer, 3738 loff_t pos, size_t count) 3739 { 3740 struct device *dev = kobj_to_dev(kobj); 3741 struct drm_device *ddev = dev_get_drvdata(dev); 3742 struct amdgpu_device *adev = drm_to_adev(ddev); 3743 struct amdgpu_bo *fw_buf_bo = NULL; 3744 uint64_t fw_pri_mc_addr; 3745 void *fw_pri_cpu_addr; 3746 int ret; 3747 3748 if (adev->psp.vbflash_image_size == 0) 3749 return -EINVAL; 3750 3751 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 3752 3753 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3754 AMDGPU_GPU_PAGE_SIZE, 3755 AMDGPU_GEM_DOMAIN_VRAM, 3756 &fw_buf_bo, 3757 &fw_pri_mc_addr, 3758 &fw_pri_cpu_addr); 3759 if (ret) 3760 goto rel_buf; 3761 3762 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3763 3764 mutex_lock(&adev->psp.mutex); 3765 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3766 mutex_unlock(&adev->psp.mutex); 3767 3768 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3769 3770 rel_buf: 3771 kvfree(adev->psp.vbflash_tmp_buf); 3772 adev->psp.vbflash_tmp_buf = NULL; 3773 adev->psp.vbflash_image_size = 0; 3774 3775 if (ret) { 3776 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 3777 return ret; 3778 } 3779 3780 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 3781 return 0; 3782 } 3783 3784 /** 3785 * DOC: psp_vbflash 3786 * Writing to this file will stage an IFWI for update. Reading from this file 3787 * will trigger the update process. 3788 */ 3789 static struct bin_attribute psp_vbflash_bin_attr = { 3790 .attr = {.name = "psp_vbflash", .mode = 0660}, 3791 .size = 0, 3792 .write = amdgpu_psp_vbflash_write, 3793 .read = amdgpu_psp_vbflash_read, 3794 }; 3795 3796 /** 3797 * DOC: psp_vbflash_status 3798 * The status of the flash process. 3799 * 0: IFWI flash not complete. 3800 * 1: IFWI flash complete. 3801 */ 3802 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3803 struct device_attribute *attr, 3804 char *buf) 3805 { 3806 struct drm_device *ddev = dev_get_drvdata(dev); 3807 struct amdgpu_device *adev = drm_to_adev(ddev); 3808 uint32_t vbflash_status; 3809 3810 vbflash_status = psp_vbflash_status(&adev->psp); 3811 if (!adev->psp.vbflash_done) 3812 vbflash_status = 0; 3813 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3814 vbflash_status = 1; 3815 3816 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3817 } 3818 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 3819 3820 static struct bin_attribute *bin_flash_attrs[] = { 3821 &psp_vbflash_bin_attr, 3822 NULL 3823 }; 3824 3825 static struct attribute *flash_attrs[] = { 3826 &dev_attr_psp_vbflash_status.attr, 3827 &dev_attr_usbc_pd_fw.attr, 3828 NULL 3829 }; 3830 3831 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 3832 { 3833 struct device *dev = kobj_to_dev(kobj); 3834 struct drm_device *ddev = dev_get_drvdata(dev); 3835 struct amdgpu_device *adev = drm_to_adev(ddev); 3836 3837 if (attr == &dev_attr_usbc_pd_fw.attr) 3838 return adev->psp.sup_pd_fw_up ? 0660 : 0; 3839 3840 return adev->psp.sup_ifwi_up ? 0440 : 0; 3841 } 3842 3843 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 3844 struct bin_attribute *attr, 3845 int idx) 3846 { 3847 struct device *dev = kobj_to_dev(kobj); 3848 struct drm_device *ddev = dev_get_drvdata(dev); 3849 struct amdgpu_device *adev = drm_to_adev(ddev); 3850 3851 return adev->psp.sup_ifwi_up ? 0660 : 0; 3852 } 3853 3854 const struct attribute_group amdgpu_flash_attr_group = { 3855 .attrs = flash_attrs, 3856 .bin_attrs = bin_flash_attrs, 3857 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 3858 .is_visible = amdgpu_flash_attr_is_visible, 3859 }; 3860 3861 const struct amd_ip_funcs psp_ip_funcs = { 3862 .name = "psp", 3863 .early_init = psp_early_init, 3864 .late_init = NULL, 3865 .sw_init = psp_sw_init, 3866 .sw_fini = psp_sw_fini, 3867 .hw_init = psp_hw_init, 3868 .hw_fini = psp_hw_fini, 3869 .suspend = psp_suspend, 3870 .resume = psp_resume, 3871 .is_idle = NULL, 3872 .check_soft_reset = NULL, 3873 .wait_for_idle = NULL, 3874 .soft_reset = NULL, 3875 .set_clockgating_state = psp_set_clockgating_state, 3876 .set_powergating_state = psp_set_powergating_state, 3877 }; 3878 3879 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 3880 .type = AMD_IP_BLOCK_TYPE_PSP, 3881 .major = 3, 3882 .minor = 1, 3883 .rev = 0, 3884 .funcs = &psp_ip_funcs, 3885 }; 3886 3887 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 3888 .type = AMD_IP_BLOCK_TYPE_PSP, 3889 .major = 10, 3890 .minor = 0, 3891 .rev = 0, 3892 .funcs = &psp_ip_funcs, 3893 }; 3894 3895 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 3896 .type = AMD_IP_BLOCK_TYPE_PSP, 3897 .major = 11, 3898 .minor = 0, 3899 .rev = 0, 3900 .funcs = &psp_ip_funcs, 3901 }; 3902 3903 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 3904 .type = AMD_IP_BLOCK_TYPE_PSP, 3905 .major = 11, 3906 .minor = 0, 3907 .rev = 8, 3908 .funcs = &psp_ip_funcs, 3909 }; 3910 3911 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 3912 .type = AMD_IP_BLOCK_TYPE_PSP, 3913 .major = 12, 3914 .minor = 0, 3915 .rev = 0, 3916 .funcs = &psp_ip_funcs, 3917 }; 3918 3919 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 3920 .type = AMD_IP_BLOCK_TYPE_PSP, 3921 .major = 13, 3922 .minor = 0, 3923 .rev = 0, 3924 .funcs = &psp_ip_funcs, 3925 }; 3926 3927 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 3928 .type = AMD_IP_BLOCK_TYPE_PSP, 3929 .major = 13, 3930 .minor = 0, 3931 .rev = 4, 3932 .funcs = &psp_ip_funcs, 3933 }; 3934